add openGauss 3.1.0 feature code

This commit is contained in:
yanghao 2022-09-03 16:22:35 +08:00
parent 801d945a3d
commit b919f404e8
2759 changed files with 521358 additions and 366321 deletions

View File

@ -52,7 +52,6 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR}/${openGauss} CACHE INTERNAL "")
set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "")
set(3RD_PATH $ENV{THIRD_BIN_PATH})
execute_process(COMMAND sh ${PROJECT_SRC_DIR}/get_PlatForm_str.sh OUTPUT_VARIABLE PLAT_FORM_NAME OUTPUT_STRIP_TRAILING_WHITESPACE)
set(prefix_home $ENV{PREFIX_HOME})
set(CMAKE_INSTALL_PREFIX ${prefix_home} CACHE INTERNAL "")
if("$ENV{GCC_VERSION}" STREQUAL "")

View File

@ -153,16 +153,6 @@ redochecksmall: all
redischeck: all
dfsredischeck: all
orccheckxian: all
orccheckusa: all
orcchecksmall: all
dfscheck: all
obscheck: all
obsorccheck: all
@ -171,7 +161,7 @@ securitycheck: all
parquetchecksmall: all
check fastcheck fastcheck_inplace fastcheck_parallel_initdb qunitcheck redischeck redocheck redochecksmall orccheckxian orccheckusa orcchecksmall parquetchecksmall dfscheck obscheck obsorccheck securitycheck dfsredischeck installcheck installcheck-parallel 2pccheck:
check fastcheck fastcheck_inplace fastcheck_parallel_initdb qunitcheck redischeck redocheck redochecksmall orccheckxian orccheckusa orcchecksmall parquetchecksmall obscheck obsorccheck securitycheck installcheck installcheck-parallel 2pccheck:
$(MAKE) -C $(root_builddir)/distribute/test/regress $@
#llt include all low level test

View File

@ -82,23 +82,13 @@ redochecksmall: all
redischeck: all
dfsredischeck: all
orccheckxian: all
orccheckusa: all
orcchecksmall: all
dfscheck: all
obscheck: all
obsorccheck: all
securitycheck: all
check fastcheck fastcheck_inplace fastcheck_parallel_initdb qunitcheck redischeck redocheck redochecksmall orccheckxian orccheckusa orcchecksmall dfscheck obscheck obsorccheck securitycheck dfsredischeck installcheck installcheck-parallel 2pccheck:
check fastcheck fastcheck_inplace fastcheck_parallel_initdb qunitcheck redischeck redocheck redochecksmall orccheckxian orccheckusa orcchecksmall obscheck obsorccheck securitycheck installcheck installcheck-parallel 2pccheck:
$(MAKE) -C src/test/regress $@
#llt include all low level test

View File

@ -90,6 +90,5 @@ then
else
./package_opengauss.sh -3rd ${build_binarylib_dir} -m ${build_version_mode} -f ${config_file}
fi
fi
exit 0

View File

@ -41,8 +41,6 @@
./share/postgresql/extension/file_fdw.control
./share/postgresql/extension/hstore--unpackaged--1.0.sql
./share/postgresql/extension/hstore--1.0--1.1.sql
./share/postgresql/extension/hdfs_fdw--1.0.sql
./share/postgresql/extension/hdfs_fdw.control
./share/postgresql/extension/log_fdw--1.0.sql
./share/postgresql/extension/log_fdw.control
./share/postgresql/timezone/GB-Eire
@ -632,6 +630,7 @@
./share/postgresql/timezone/Navajo
./share/postgresql/timezone/GMT
./share/postgresql/system_views.sql
./share/postgresql/private_system_views.sql
./share/postgresql/performance_views.sql
./share/postgresql/sql_features.txt
./share/postgresql/pg_cast_oid.txt
@ -671,46 +670,25 @@
./share/postgresql/timezonesets/Etc.txt
./share/postgresql/postgres.bki
./share/sslcert/gsql/openssl.cnf
./share/sslcert/grpc/openssl.cnf
./lib/libnuma.so
./lib/libnuma.so.1
./lib/libnuma.so.1.0.0
./lib/libnuma.so*
./lib/libpq.so
./lib/libpq.so.5
./lib/libpq.so.5.5
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libcgroup.so
./lib/libcgroup.so.1
./lib/libz.so
./lib/libz.so.1
./lib/libz.so.1.2.11
./lib/liblz4.so
./lib/liblz4.so.1
./lib/liblz4.so.1.9.2
./lib/libcjson.so
./lib/libcjson.so.1
./lib/libcjson.so.1.7.14
./lib/libcjson_utils.so
./lib/libcjson_utils.so.1
./lib/libcjson_utils.so.1.7.14
./lib/libssl.so*
./lib/libcrypto.so*
./lib/libcgroup.so*
./lib/libz.so*
./lib/liblz4.so*
./lib/libcjson.so*
./lib/libcjson_utils.so*
./lib/libstdc++.so.6
./lib/libgcc_s.so.1
./lib/libgomp.so
./lib/libgomp.so.1
./lib/libgomp.so.1.0.0
./lib/libgomp.so*
./lib/libdcf.so
./lib/libzstd.so
./lib/libzstd.so.1
./lib/libzstd.so.1.5.0
./lib/libcurl.so
./lib/libcurl.so.4
./lib/libcurl.so.4.7.0
./lib/libzstd.so*
./lib/libcurl.so*
./lib/libxgboost.so
./lib/libpagecompression.so
./lib/libpagecompression.so.1
./lib/libpagecompression.so*
./lib/postgresql/latin2_and_win1250.so
./lib/postgresql/euc2004_sjis2004.so
./lib/postgresql/euc_kr_and_mic.so
@ -727,6 +705,7 @@
./lib/postgresql/utf8_and_sjis.so
./lib/postgresql/utf8_and_cyrillic.so
./lib/postgresql/hstore.so
./lib/postgresql/packages.so
./lib/postgresql/utf8_and_euc_kr.so
./lib/postgresql/ascii_and_mic.so
./lib/postgresql/utf8_and_iso8859_1.so
@ -741,18 +720,20 @@
./lib/postgresql/utf8_and_euc2004.so
./lib/postgresql/utf8_and_big5.so
./lib/postgresql/mppdb_decoding.so
./lib/postgresql/sql_decoding.so
./lib/postgresql/pg_plugin
./lib/postgresql/proc_srclib
./lib/postgresql/security_plugin.so
./lib/postgresql/pg_upgrade_support.so
./lib/postgresql/latin2_and_win1250.so
./lib/postgresql/euc2004_sjis2004.so
./lib/postgresql/pgoutput.so
./lib/libxgboost.so
./include/postgresql/server/postgres_ext.h
./include/postgresql/server/pg_config_os.h
./include/postgresql/server/pgtime.h
./include/postgresql/server/datatypes.h
./include/postgresql/server/client_logic/client_logic_enums.h
./include/postgresql/server/libpq/libpq-fs.h
./include/postgresql/server/nodes/primnodes.h
./include/postgresql/server/nodes/parsenodes.h
./include/postgresql/server/nodes/parsenodes_common.h
@ -901,15 +882,15 @@
./include/postgresql/server/lib/ilist.h
./include/postgresql/server/pgxc/locator.h
./include/postgresql/server/gstrace/gstrace_infra.h
./include/postgresql/server/db4ai/db4ai.h
[libpq]
./lib/libpq.so
./lib/libpq.so.5
./lib/libpq.so.5.5
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libcrypto.so*
./lib/libssl.so*
[header]
./include/libpq/libpq-fs.h
./include/libpq-fe.h
./include/postgres_ext.h
./include/gs_thread.h
@ -923,4 +904,4 @@
./include/pqcomm.h
./include/pqexpbuffer.h
[version]
3.0.0
3.0.0

View File

@ -36,6 +36,7 @@
./bin/kadmind
./bin/dbmind
./bin/gs_dbmind
./bin/constant
./bin/server.key.cipher
./bin/server.key.rand
./bin/gs_plan_simulator.sh
@ -71,8 +72,6 @@
./share/postgresql/extension/file_fdw.control
./share/postgresql/extension/hstore--unpackaged--1.0.sql
./share/postgresql/extension/hstore--1.0--1.1.sql
./share/postgresql/extension/hdfs_fdw--1.0.sql
./share/postgresql/extension/hdfs_fdw.control
./share/postgresql/extension/log_fdw--1.0.sql
./share/postgresql/extension/log_fdw.control
./share/postgresql/extension/mot_fdw--1.0.sql
@ -724,7 +723,6 @@
./lib/postgresql/pgxs/src/Makefile.port
./lib/postgresql/pgxs/src/nls-global.mk
./lib/postgresql/pgxs/src/Makefile.global
./lib/postgresql/pgxs/src/get_PlatForm_str.sh
./lib/postgresql/pgxs/config/install-sh
./lib/postgresql/euc_cn_and_mic.so
./lib/postgresql/latin_and_mic.so
@ -767,18 +765,11 @@
./lib/libnuma.so
./lib/libnuma.so.1
./lib/libnuma.so.1.0.0
./lib/libcgroup.so
./lib/libcgroup.so.1
./lib/libcom_err_gauss.so
./lib/libcom_err_gauss.so.3
./lib/libcom_err_gauss.so.3.0
./lib/libatomic.so
./lib/libatomic.so.1
./lib/libatomic.so.1.2.0
./lib/libcgroup.so*
./lib/libcom_err_gauss.so*
./lib/libatomic.so*
./lib/libmasstree.so
./lib/libupb.so
./lib/libupb.so.9
./lib/libupb.so.9.0.0
./lib/libupb.so*
./lib/libabsl_str_format_internal.so
./lib/libabsl_strings.so
./lib/libabsl_throw_delegate.so
@ -792,61 +783,28 @@
./lib/libabsl_log_severity.so
./lib/libaddress_sorting.so
./lib/libaddress_sorting.so.9
./lib/libgssapi_krb5_gauss.so
./lib/libgssapi_krb5_gauss.so.2
./lib/libgssapi_krb5_gauss.so.2.2
./lib/libgssrpc_gauss.so
./lib/libgssrpc_gauss.so.4
./lib/libgssrpc_gauss.so.4.2
./lib/libk5crypto_gauss.so
./lib/libk5crypto_gauss.so.3
./lib/libk5crypto_gauss.so.3.1
./lib/libgssapi_krb5_gauss.so*
./lib/libgssrpc_gauss.so*
./lib/libk5crypto_gauss.so*
./lib/libkadm5clnt.so
./lib/libkadm5clnt_mit.so
./lib/libkadm5clnt_mit.so.11
./lib/libkadm5clnt_mit.so.11.0
./lib/libkadm5clnt_mit.so.12
./lib/libkadm5clnt_mit.so.12.0
./lib/libkadm5clnt_mit.so*
./lib/libkadm5srv.so
./lib/libkadm5srv_mit.so
./lib/libkadm5srv_mit.so.11
./lib/libkadm5srv_mit.so.11.0
./lib/libkadm5srv_mit.so.12
./lib/libkadm5srv_mit.so.12.0
./lib/libkdb5.so
./lib/libkdb5.so.9
./lib/libkdb5.so.9.0
./lib/libkdb5.so.10
./lib/libkdb5.so.10.0
./lib/libkrad.so
./lib/libkrad.so.0
./lib/libkrad.so.0.0
./lib/libkrb5_gauss.so
./lib/libkrb5_gauss.so.3
./lib/libkrb5_gauss.so.3.3
./lib/libkrb5support_gauss.so
./lib/libkrb5support_gauss.so.0
./lib/libkrb5support_gauss.so.0.1
./lib/libkadm5srv_mit.so*
./lib/libkdb5.so*
./lib/libkrad.so*
./lib/libkrb5_gauss.so*
./lib/libkrb5support_gauss.so*
./lib/krb5/plugins/kdb/db2.so
./lib/libverto.so
./lib/libverto.so.0
./lib/libverto.so.0.0
./lib/libverto.so*
./lib/libcurl.so*
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libcrypto.so*
./lib/libssl.so*
./lib/libgcc_s.so.1
./lib/libstdc++.so.6
./lib/libz.so
./lib/libz.so.1
./lib/libz.so.1.2.11
./lib/liblz4.so
./lib/liblz4.so.1
./lib/liblz4.so.1.9.2
./lib/libz.so*
./lib/liblz4.so*
./lib/libcjson.so*
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libconfig.so*
./lib/libpgport_tool.so
./lib/libpgport_tool.so.1
./share/llvmir/GaussDB_expr.ir
@ -859,22 +817,13 @@
./lib/libpcre.so*
./lib/libsecurec.so
./lib/libxml2.so*
./lib/libparquet.so
./lib/libparquet.so.14
./lib/libparquet.so.14.1.0
./lib/libarrow.so
./lib/libarrow.so.14
./lib/libarrow.so.14.1.0
./lib/OBS.ini
./lib/postgresql/latin2_and_win1250.so
./lib/postgresql/euc2004_sjis2004.so
./lib/libdcf.so
./lib/libzstd.so
./lib/libzstd.so.1
./lib/libzstd.so.1.5.0
./lib/libzstd.so*
./lib/libxgboost.so
./lib/libpagecompression.so
./lib/libpagecompression.so.1
./lib/libpagecompression.so*
./include/postgresql/server/postgres_ext.h
./include/postgresql/server/pg_config_os.h
@ -1167,25 +1116,7 @@
./include/postgresql/server/catalog/namespace.h
./include/postgresql/server/commands/trigger.h
./include/postgresql/server/executor/spi.h
./include/postgresql/server/access/ustore/undo/knl_uundotype.h
./include/postgresql/server/access/ustore/undo/knl_uundoapi.h
./include/postgresql/server/access/ustore/knl_uheap.h
./include/postgresql/server/access/ustore/knl_utuple.h
./include/postgresql/server/access/ustore/knl_utype.h
./include/postgresql/server/access/ustore/knl_upage.h
./include/postgresql/server/access/ustore/knl_uredo.h
./include/postgresql/server/access/ustore/knl_uundovec.h
./include/postgresql/server/access/ustore/knl_uundorecord.h
./include/postgresql/server/access/ustore/undo/knl_uundoxlog.h
./include/postgresql/server/access/ustore/undo/knl_uundotxn.h
./include/postgresql/server/access/ustore/undo/knl_uundozone.h
./include/postgresql/server/access/ustore/undo/knl_uundospace.h
./include/postgresql/server/communication/commproxy_basic.h
./include/postgresql/server/access/parallel_recovery/page_redo.h
./include/postgresql/server/access/parallel_recovery/spsc_blocking_queue.h
./include/postgresql/server/executor/exec/execdesc.h
./include/postgresql/server/db4ai/matrix.h
./include/postgresql/server/db4ai/scores.h
./include/postgresql/server/db4ai/db4ai.h
./jre/ASSEMBLY_EXCEPTION
./jre/bin/java
./jre/bin/jjs
@ -1371,33 +1302,18 @@
./lib/libpq_ce.so.5
./lib/libpq_ce.so.5.5
./lib/libgauss_cl_jni.so
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libconfig.so*
./lib/libcrypto.so*
./lib/libstdc++.so.6
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libssl.so*
./lib/libpgport_tool.so
./lib/libpgport_tool.so.1
./lib/libgssapi_krb5_gauss.so
./lib/libgssapi_krb5_gauss.so.2
./lib/libgssapi_krb5_gauss.so.2.2
./lib/libgssrpc_gauss.so
./lib/libgssrpc_gauss.so.4
./lib/libgssrpc_gauss.so.4.2
./lib/libk5crypto_gauss.so
./lib/libk5crypto_gauss.so.3
./lib/libk5crypto_gauss.so.3.1
./lib/libkrb5support_gauss.so
./lib/libkrb5support_gauss.so.0
./lib/libkrb5support_gauss.so.0.1
./lib/libkrb5_gauss.so
./lib/libkrb5_gauss.so.3
./lib/libkrb5_gauss.so.3.3
./lib/libcom_err_gauss.so
./lib/libcom_err_gauss.so.3
./lib/libcom_err_gauss.so.3.0
./lib/libgssapi_krb5_gauss.so*
./lib/libgssrpc_gauss.so*
./lib/libk5crypto_gauss.so*
./lib/libkrb5support_gauss.so*
./lib/libkrb5_gauss.so*
./lib/libcom_err_gauss.so*
[libpq]
./lib/libpq.a
./lib/libpq.so
@ -1407,33 +1323,18 @@
./lib/libpq_ce.so.5
./lib/libpq_ce.so.5.5
./lib/libgauss_cl_jni.so
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libconfig.so*
./lib/libcrypto.so*
./lib/libstdc++.so.6
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libssl.so*
./lib/libpgport_tool.so
./lib/libpgport_tool.so.1
./lib/libgssapi_krb5_gauss.so
./lib/libgssapi_krb5_gauss.so.2
./lib/libgssapi_krb5_gauss.so.2.2
./lib/libgssrpc_gauss.so
./lib/libgssrpc_gauss.so.4
./lib/libgssrpc_gauss.so.4.2
./lib/libk5crypto_gauss.so
./lib/libk5crypto_gauss.so.3
./lib/libk5crypto_gauss.so.3.1
./lib/libkrb5support_gauss.so
./lib/libkrb5support_gauss.so.0
./lib/libkrb5support_gauss.so.0.1
./lib/libkrb5_gauss.so
./lib/libkrb5_gauss.so.3
./lib/libkrb5_gauss.so.3.3
./lib/libcom_err_gauss.so
./lib/libcom_err_gauss.so.3
./lib/libcom_err_gauss.so.3.0
./lib/libgssapi_krb5_gauss.so*
./lib/libgssrpc_gauss.so*
./lib/libk5crypto_gauss.so*
./lib/libkrb5support_gauss.so*
./lib/libkrb5_gauss.so*
./lib/libcom_err_gauss.so*
./include/gs_thread.h
./include/gs_threadlocal.h
./include/postgres_ext.h

View File

@ -1,16 +1,9 @@
#!/bin/bash
#######################################################################
# Copyright (c): 2020-2021, Huawei Tech. Co., Ltd.
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2025. All rights reserved.
# descript: Compile opengauss
# Return 0 means OK.
# Return 1 means failed.
# version: 1.0
# date: 2020-11-28
#######################################################################
# It is just a wrapper to package_internal.sh
# Example: ./build_opengauss.sh -3rd /path/to/your/third_party_binarylibs/
# Example: ./build_opengauss.sh -3rd path/third_party_binarylibs/
# change it to "N", if you want to build with original build system based on solely Makefiles
declare CMAKE_PKG="N"
declare SCRIPT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd)
@ -32,7 +25,7 @@ function print_help()
-pkg|--package provode type of installation packages, values parameter is server.
-m|--version_mode this values of paramenter is debug, release, memcheck, the default value is release.
-pm product mode, values parameter is opengauss.
-mc|--make_check this values of paramenter is on or off, the default value is on.
-mc|--make_check this values of paramenter is on or off, the default value is on.
-s|--symbol_mode whether separate symbol in debug mode, the default value is on.
-co|--cmake_opt more cmake options
"
@ -93,7 +86,7 @@ while [ $# -gt 0 ]; do
fi
product_mode=$2
shift 2
;;
;;
-mc|--make_check)
if [ "$2"X = X ]; then
echo "no given make check values"
@ -101,7 +94,7 @@ while [ $# -gt 0 ]; do
fi
make_check=$2
shift 2
;;
;;
-s|--symbol_mode)
if [ "$2"X = X ]; then
echo "no given symbol parameter"
@ -109,7 +102,7 @@ while [ $# -gt 0 ]; do
fi
separate_symbol=$2
shift 2
;;
;;
--cmake_opt)
if [ "$2"X = X ]; then
echo "no extra configure options provided"
@ -125,7 +118,7 @@ while [ $# -gt 0 ]; do
fi
extra_config_opt=$2
shift 2
;;
;;
*)
echo "Internal Error: option processing error: $1" 1>&2
echo "please input right paramtenter, the following command may help you"
@ -149,7 +142,11 @@ else
echo "begin config cmake options:" >> "$LOG_FILE" 2>&1
declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install"
declare CMAKE_BUILD_DIR=${ROOT_DIR}/tmp_build
declare CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_MOT=ON ${extra_cmake_opt}"
if [ "$product_mode"x == "lite"x ]; then
declare CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_PRIVATEGAUSS=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_LITE_MODE=ON ${extra_cmake_opt}"
else
declare CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_MOT=ON ${extra_cmake_opt}"
fi
echo "[cmake options] cmake options is:${CMAKE_OPT}" >> "$LOG_FILE" 2>&1
source $SCRIPT_DIR/utils/cmake_compile.sh || exit 1
fi
@ -164,6 +161,5 @@ function main()
gaussdb_build
}
main
echo "now, all build has finished!"
exit 0

View File

@ -1,814 +0,0 @@
#!/bin/bash
#######################################################################
# Copyright (c): 2020-2021, Huawei Tech. Co., Ltd.
# descript: Compile and pack MPPDB
# Return 0 means OK.
# Return 1 means failed.
# version: 2.0
# date: 2021-12-12
#######################################################################
##default package type is server
declare package_type='server'
declare install_package_format='tar'
##default version mode is relase
declare version_mode='release'
declare binarylib_dir='None'
declare separate_symbol='on'
#detect platform information.
PLATFORM=32
bit=$(getconf LONG_BIT)
if [ "$bit" -eq 64 ]; then
PLATFORM=64
fi
#get OS distributed version.
kernel=""
version=""
ext_version=""
if [ -f "/etc/euleros-release" ]; then
kernel=$(cat /etc/euleros-release | awk -F ' ' '{print $1}' | tr A-Z a-z)
version=$(cat /etc/euleros-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z)
ext_version=$version
elif [ -f "/etc/openEuler-release" ]; then
kernel=$(cat /etc/openEuler-release | awk -F ' ' '{print $1}' | tr A-Z a-z)
version=$(cat /etc/openEuler-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z)
elif [ -f "/etc/centos-release" ]; then
kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr A-Z a-z)
version=$(cat /etc/centos-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z)
else
kernel=$(lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z)
version=$(lsb_release -r | awk -F ' ' '{print $2}')
fi
if [ X"$kernel" == X"euleros" ]; then
dist_version="EULER"
elif [ X"$kernel" == X"centos" ]; then
dist_version="CentOS"
elif [ X"$kernel" == X"openeuler" ]; then
dist_version="openEuler"
else
echo "Only support EulerOS|Centos|openEuler platform."
echo "Kernel is $kernel"
exit 1
fi
show_package=false
gcc_version="7.3.0"
##add platform architecture information
cpus_num=$(grep -w processor /proc/cpuinfo|wc -l)
PLATFORM_ARCH=$(uname -p)
if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then
ARCHITECTURE_EXTRA_FLAG=_euleros2.0_${ext_version}_$PLATFORM_ARCH
release_file_list="aarch64_lite_list"
else
ARCHITECTURE_EXTRA_FLAG=_euleros2.0_sp5_${PLATFORM_ARCH}
release_file_list="x86_64_lite_list"
fi
##default install version storage path
declare mppdb_version='openGauss Lite'
declare mppdb_name_for_package="$(echo ${mppdb_version} | sed 's/ /-/g')"
declare package_path='./'
declare version_number=''
declare make_check='off'
declare zip_package='on'
declare extra_config_opt=''
#######################################################################
##putout the version of mppdb
#######################################################################
function print_version()
{
echo "$version_number"
}
#######################################################################
## print help information
#######################################################################
function print_help()
{
echo "Usage: $0 [OPTION]
-h|--help show help information.
-V|--version show version information.
-f|--file provide the file list released.
-3rd|--binarylib_dir the directory of third party binarylibs.
-pkg|--package provode type of installation packages, values parameter is all, server, jdbc, odbc, agent.
-pm product mode, values parameter is single, multiple or opengauss, default value is multiple.
-p|--path generation package storage path.
-t packaging format, values parameter is tar or rpm, the default value is tar.
-m|--version_mode this values of paramenter is debug, release, memcheck, the default value is release.
-mc|--make_check this values of paramenter is on or off, the default value is on.
-s|--symbol_mode whether separate symbol in debug mode, the default value is on.
-cv|--gcc_version gcc-version option: 7.3.0.
-nopkg|--no_package don't zip binaries into packages
-co|--config_opt more config options
-S|--show_pkg show server package name and Bin name base on current configuration.
"
}
if [ $# = 0 ] ; then
echo "missing option"
print_help
exit 1
fi
SCRIPT_PATH=${0}
FIRST_CHAR=$(expr substr "$SCRIPT_PATH" 1 1)
if [ "$FIRST_CHAR" = "/" ]; then
SCRIPT_PATH=${0}
else
SCRIPT_PATH="$(pwd)/${SCRIPT_PATH}"
fi
SCRIPT_NAME=$(basename $SCRIPT_PATH)
SCRIPT_DIR=$(dirname "${SCRIPT_PATH}")
SCRIPT_DIR=$(dirname "$SCRIPT_DIR")
if [ ! -f "$SCRIPT_DIR/$SCRIPT_NAME" ] ; then
SCRIPT_DIR=$SCRIPT_DIR/script
fi
package_path=$SCRIPT_DIR
#######################################################################
##read version from $release_file_list
#######################################################################
function read_mpp_version()
{
cd $SCRIPT_DIR
local head=$(cat $release_file_list | grep "\[version\]" -n | awk -F: '{print $1}')
if [ ! -n "$head" ]; then
echo "error: no find version in the $release_file_list file "
exit 1
fi
local tail=$(cat $release_file_list | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}')
if [ ! -n "$tail" ]; then
local all=$(cat $release_file_list | wc -l)
let tail=$all+1-$head
fi
version_number=$(cat $release_file_list | awk "NR==$head+1,NR==$tail+$head-1")
echo "${mppdb_name_for_package}-${version_number}">version.cfg
#auto read the number from kernal globals.cpp, no need to change it here
}
#########################################################################
##read command line paramenters
#######################################################################
while [ $# -gt 0 ]; do
case "$1" in
-h|--help)
print_help
exit 1
;;
-V|--version)
print_version
exit 1
;;
-f|--file)
if [ "$2"X = X ]; then
echo "no given file name"
exit 1
fi
release_file_list=$2
shift 2
;;
-3rd|--binarylib_dir)
if [ "$2"X = X ]; then
echo "no given binarylib directory values"
exit 1
fi
binarylib_dir=$2
shift 2
;;
-p|--path)
if [ "$2"X = X ]; then
echo "no given generration package path"
exit 1
fi
package_path=$2
if [ ! -d "$package_path" ]; then
mkdir -p $package_path
fi
shift 2
;;
-pkg)
if [ "$2"X = X ]; then
echo "no given package type name"
exit 1
fi
package_type=$2
shift 2
;;
-s|--symbol_mode)
if [ "$2"X = X ]; then
echo "no given symbol parameter"
exit 1
fi
separate_symbol=$2
shift 2
;;
-t)
if [ "$2"X = X ]; then
echo "no given installation package format values"
exit 1
fi
if [ "$2" = rpm ]; then
echo "error: do not suport rpm package now!"
exit 1
fi
install_package_format=$2
shift 1
;;
-m|--version_mode)
if [ "$2"X = X ]; then
echo "no given version number values"
exit 1
fi
version_mode=$2
shift 2
;;
-mc|--make_check)
if [ "$2"X = X ]; then
echo "no given make check values"
exit 1
fi
make_check=$2
shift 2
;;
-cv|--gcc_version)
if [ "$2"X = X ]; then
echo "no given gcc version"
exit 1
fi
gcc_version=$2
shift 2
;;
-nopkg|--no_package)
zip_package='off'
shift 1
;;
-co|--config_opt)
if [ "$2"X = X ]; then
echo "no extra configure options provided"
exit 1
fi
extra_config_opt=$2
shift 2
;;
-S|--show_pkg)
show_package=true
shift
;;
*)
echo "Internal Error: option processing error: $1" 1>&2
echo "please input right paramtenter, the following command may help you"
echo "./cmake_package_internal.sh --help or ./cmake_package_internal.sh -h"
exit 1
esac
done
read_mpp_version
if [ "$gcc_version" = "7.3.0" ]; then
gcc_version=${gcc_version:0:3}
else
echo "Unknown gcc version $gcc_version"
exit 1
fi
#######################################################################
## declare all package name
#######################################################################
declare version_string="${mppdb_name_for_package}-${version_number}"
declare package_pre_name="${version_string}-${dist_version}-${PLATFORM_ARCH}"
declare server_package_name="${package_pre_name}.${install_package_format}.gz"
declare libpq_package_name="${package_pre_name}-Libpq.${install_package_format}.gz"
declare symbol_package_name="${package_pre_name}-symbol.${install_package_format}.gz"
echo "[makemppdb] $(date +%y-%m-%d' '%T): script dir : ${SCRIPT_DIR}"
ROOT_DIR=$(dirname "$SCRIPT_DIR")
ROOT_DIR=$(dirname "$ROOT_DIR")
PLAT_FORM_STR=$(sh "${ROOT_DIR}/src/get_PlatForm_str.sh")
if [ "${PLAT_FORM_STR}"x == "Failed"x ]
then
echo "Only support EulerOS openEuler platform."
exit 1
fi
CMAKE_BUILD_DIR=${ROOT_DIR}/tmp_build
declare LOG_FILE="${ROOT_DIR}/build/script/makemppdb_pkg.log"
declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install"
declare ERR_MKGS_FAILED=1
declare MKGS_OK=0
if [ "${binarylib_dir}" != 'None' ] && [ -d "${binarylib_dir}" ]; then
BUILD_TOOLS_PATH="${binarylib_dir}/buildtools/${PLAT_FORM_STR}"
PLATFORM_PATH="${binarylib_dir}/platform/${PLAT_FORM_STR}"
BINARYLIBS_PATH="${binarylib_dir}/dependency"
else
BUILD_TOOLS_PATH="${ROOT_DIR}/buildtools/${PLAT_FORM_STR}"
PLATFORM_PATH="${ROOT_DIR}/platform/${PLAT_FORM_STR}"
BINARYLIBS_PATH="${ROOT_DIR}/binarylibs"
fi
declare UPGRADE_SQL_DIR="${ROOT_DIR}/src/include/catalog/upgrade_sql"
export CC="$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/gcc"
export CXX="$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/g++"
export LD_LIBRARY_PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/lib64:$BUILD_TOOLS_PATH/gcc$gcc_version/isl/lib:$BUILD_TOOLS_PATH/gcc$gcc_version/mpc/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/mpfr/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/gmp/lib/:$LD_LIBRARY_PATH
export PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin:$PATH
jdkpath=${binarylib_dir}/platform/huaweijdk8/${PLATFORM_ARCH}/jdk
if [ ! -d "${jdkpath}" ]; then
jdkpath=${binarylib_dir}/platform/openjdk8/${PLATFORM_ARCH}/jdk
fi
export JAVA_HOME=${jdkpath}
declare p7zpath="${BUILD_TOOLS_PATH}/p7z/bin"
###################################
# build parameter about enable-llt
##################################
echo "[makemppdb] $(date +%y-%m-%d' '%T): Work root dir : ${ROOT_DIR}"
###################################
# get version number from globals.cpp
##################################
function read_mpp_number()
{
global_kernal="${ROOT_DIR}/src/common/backend/utils/init/globals.cpp"
version_name="GRAND_VERSION_NUM"
version_num=""
line=$(cat $global_kernal | grep ^const* | grep $version_name)
version_num1=${line#*=}
#remove the symbol;
version_num=$(echo $version_num1 | tr -d ";")
#remove the blank
version_num=$(echo $version_num)
if echo $version_num | grep -qE '^92[0-9]+$'
then
# get the last three number
latter=${version_num:2}
echo "92.${latter}" >>${SCRIPT_DIR}/version.cfg
else
echo "Cannot get the version number from globals.cpp."
exit 1
fi
}
read_mpp_number
#######################################################################
# Print log.
#######################################################################
log()
{
echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@"
echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@" >> "$LOG_FILE" 2>&1
}
#######################################################################
# print log and exit.
#######################################################################
die()
{
log "$@"
echo "$@"
exit $ERR_MKGS_FAILED
}
#######################################################################
## Check the installation package production environment
#######################################################################
function mpp_pkg_pre_check()
{
if [ -d "$BUILD_DIR" ]; then
rm -rf $BUILD_DIR
fi
if [ -d "$LOG_FILE" ]; then
rm -rf $LOG_FILE
fi
}
#######################################################################
# Install all SQL files from distribute/include/catalog/upgrade_sql
# to INSTALL_DIR/bin/script/upgrade_sql.
# Package all SQL files and then verify them with SHA256.
#######################################################################
function package_upgrade_sql()
{
echo "Begin to install upgrade_sql files..."
UPGRADE_SQL_TAR="upgrade_sql.tar.gz"
UPGRADE_SQL_SHA256="upgrade_sql.sha256"
MULTIP_IGNORE_VERSION=(289 294 296)
cp -r "${UPGRADE_SQL_DIR}" .
[ $? -ne 0 ] && die "Failed to cp upgrade_sql files"
tar -czf ${UPGRADE_SQL_TAR} upgrade_sql
[ $? -ne 0 ] && die "Failed to package ${UPGRADE_SQL_TAR}"
rm -rf ./upgrade_sql > /dev/null 2>&1
sha256sum ${UPGRADE_SQL_TAR} | awk -F" " '{print $1}' > "${UPGRADE_SQL_SHA256}"
[ $? -ne 0 ] && die "Failed to generate sha256 sum file for ${UPGRADE_SQL_TAR}"
chmod 600 ${UPGRADE_SQL_TAR}
chmod 600 ${UPGRADE_SQL_SHA256}
echo "Successfully packaged upgrade_sql files."
}
#######################################################################
##install gaussdb database and others
##select to install something according to variables package_type need
#######################################################################
function mpp_pkg_bld()
{
install_gaussdb
}
#######################################################################
##install gaussdb database contained server,client and libpq
#######################################################################
function install_gaussdb()
{
# Generate the license control file, and set md5sum string to the code.
echo "Modify gaussdb_version.cpp file." >> "$LOG_FILE" 2>&1
echo "Modify gaussdb_version.cpp file success." >> "$LOG_FILE" 2>&1
cd "$ROOT_DIR/"
if [ $? -ne 0 ]; then
die "change dir to $SRC_DIR failed."
fi
if [ "$version_mode" = "debug" -a "$separate_symbol" = "on" ]; then
echo "WARNING: do not separate symbol in debug mode!"
fi
binarylibs_path=${ROOT_DIR}/binarylibs
if [ "${binarylib_dir}"x != "None"x ]; then
binarylibs_path=${binarylib_dir}
fi
export BUILD_TUPLE=${PLATFORM_ARCH}
export THIRD_BIN_PATH="${binarylibs_path}"
export PREFIX_HOME="${BUILD_DIR}"
if [ "$version_mode"x == "release"x ]; then
CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_PRIVATEGAUSS=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_LITE_MODE=ON"
export DEBUG_TYPE=release
elif [ "$version_mode"x == "memcheck"x ]; then
CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_PRIVATEGAUSS=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_LITE_MODE=ON"
export DEBUG_TYPE=memcheck
else
CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_PRIVATEGAUSS=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_LITE_MODE=ON"
export DEBUG_TYPE=debug
fi
echo "Begin run cmake for gaussdb server" >> "$LOG_FILE" 2>&1
echo "CMake options: ${CMAKE_OPT}" >> "$LOG_FILE" 2>&1
echo "CMake release: ${DEBUG_TYPE}" >> "$LOG_FILE" 2>&1
export GAUSSHOME=${BUILD_DIR}
export LD_LIBRARY_PATH=${BUILD_DIR}/lib:${BUILD_DIR}/lib/postgresql:${LD_LIBRARY_PATH}
cd ${ROOT_DIR}
[ -d "${CMAKE_BUILD_DIR}" ] && rm -rf ${CMAKE_BUILD_DIR}
[ -d "${BUILD_DIR}" ] && rm -rf ${BUILD_DIR}
mkdir -p ${CMAKE_BUILD_DIR}
cd ${CMAKE_BUILD_DIR}
cmake .. ${CMAKE_OPT}
echo "Begin make and install gaussdb server" >> "$LOG_FILE" 2>&1
make VERBOSE=1 -sj ${cpus_num}
if [ $? -ne 0 ]; then
die "make failed."
fi
make install -sj ${cpus_num}
if [ $? -ne 0 ]; then
die "make install failed."
fi
## check build specification
spec="gaussdbkernel"
if ( cat $SCRIPT_DIR/gauss.spec | grep 'PRODUCT' | grep 'GaussDB Kernel' >/dev/null 2>&1 ); then
spec="gaussdbkernel"
elif ( cat $SCRIPT_DIR/gauss.spec | grep 'PRODUCT' | grep 'openGauss' >/dev/null 2>&1 ); then
spec="opengauss"
fi
chmod 444 ${BUILD_DIR}/bin/cluster_guc.conf
dos2unix ${BUILD_DIR}/bin/cluster_guc.conf > /dev/null 2>&1
#back to separate_debug_symbol.sh dir
cd $SCRIPT_DIR
if [ "$version_mode" = "release" -a "$separate_symbol" = "on" -a "$zip_package" = "on" ]; then
chmod +x ./separate_debug_information.sh
./separate_debug_information.sh
cd $SCRIPT_DIR
mv symbols.tar.gz $symbol_package_name
fi
#back to root dir
cd $ROOT_DIR
#insert the commitid to version.cfg as the upgrade app path specification
export PATH=${BUILD_DIR}:$PATH
export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH
commitid=$(LD_PRELOAD='' ${BUILD_DIR}/bin/gaussdb -V | cut -d ")" -f 1 | awk '{print $NF}')
echo "${commitid}" >>${SCRIPT_DIR}/version.cfg
echo "End insert commitid into version.cfg" >> "$LOG_FILE" 2>&1
cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/iperf/comm/bin/iperf3 ${BUILD_DIR}/bin
if [ $? -ne 0 ]; then
die "cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/iperf/comm/bin/iperf3 ${BUILD_DIR}/bin failed"
fi
cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/iperf/comm/lib/libiperf.so.0 ${BUILD_DIR}/lib
if [ $? -ne 0 ]; then
die "cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/iperf/comm/lib/libiperf.so.0 ${BUILD_DIR}/lib failed"
fi
cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/fio/comm/bin/fio ${BUILD_DIR}/bin
if [ $? -ne 0 ]; then
die "cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/fio/comm/bin/fio ${BUILD_DIR}/bin failed"
fi
}
#######################################################################
##select package type according to variable package_type
#######################################################################
function mpp_pkg_make()
{
case "$package_type" in
server)
echo "file list: $release_file_list"
make_package $release_file_list 'server'
make_package $release_file_list 'libpq'
;;
libpq)
make_package $release_file_list 'libpq'
;;
esac
}
declare package_command
#######################################################################
##select package command accroding to install_package_format
#######################################################################
function select_package_command()
{
case "$install_package_format" in
tar)
tar='tar'
option=' -zcvf'
package_command="$tar$option"
;;
rpm)
rpm='rpm'
option=' -i'
package_command="$rpm$option"
;;
esac
}
###############################################################
## client tools package
## Roach no
## sslcert no
## Data Studio no
## Database Manager no
## Migration Toolkit no
## Cluster Configuration Assistant (CCA) no
## CAT no
###############################################################
function target_file_copy_for_non_server()
{
for file in $(echo $1)
do
tar -cpf - $file | ( cd $2; tar -xpf - )
done
}
declare bin_name="${package_pre_name}.bin"
declare sha256_name=''
declare script_dir="${ROOT_DIR}/script"
#######################################################################
##copy target file into temporary directory temp
#######################################################################
function target_file_copy()
{
###################################################
# make bin package
###################################################
for file in $(echo $1)
do
tar -cpf - $file | ( cd $2; tar -xpf - )
done
cd $BUILD_DIR
if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then
# do nothing in current version
echo ""
else
sed -i '/^process_cpu_affinity|/d' $2/bin/cluster_guc.conf
fi
if [ "$(ls -A /lib64/libaio.so*)" != "" ]
then
cp /lib64/libaio.so* $2/lib/
elif [ "$(ls -A /lib/libaio.so*)" != "" ]
then
cp /lib/libaio.so* $2/lib/
fi
if [ "$(ls -A /lib64/libnuma.so*)" != "" ]
then
cp /lib64/libnuma.so* $2/lib/
elif [ "$(ls -A /lib/libnuma.so*)" != "" ]
then
cp /lib/libnuma.so* $2/lib/
fi
#generate bin file
echo "Begin generate ${bin_name} bin file..." >> "$LOG_FILE" 2>&1
curpath=$(pwd)
cd $2
tar -zcf ${curpath}/${bin_name} . >> "$LOG_FILE" 2>&1
cd ${curpath}
if [ $? -ne 0 ]; then
echo "Please check and makesure '7z' exist. "
die "generate ${bin_name} failed."
fi
echo "End generate ${bin_name} bin file" >> "$LOG_FILE" 2>&1
#generate sha256 file
sha256_name="${package_pre_name}.sha256"
echo "Begin generate ${sha256_name} sha256 file..." >> "$LOG_FILE" 2>&1
sha256sum "${bin_name}" | awk -F" " '{print $1}' > "$sha256_name"
if [ $? -ne 0 ]; then
die "generate sha256 file failed."
fi
echo "End generate ${sha256_name} sha256 file" >> "$LOG_FILE" 2>&1
cp $2/lib/libstdc++.so.6 ./
###################################################
# make server package
###################################################
if [ -d "${2}" ]; then
rm -rf ${2}
fi
mkdir -p ${2}
mkdir -p $2/dependency
cp libstdc++.so.6 $2/dependency
mv ${bin_name} ${sha256_name} $2
}
#######################################################################
##function make_package have three actions
##1.parse release_file_list variable represent file
##2.copy target file into a newly created temporary directory temp
##3.package all file in the temp directory and renome to destination package_path
#######################################################################
function make_package()
{
cd $SCRIPT_DIR
releasefile=$1
pkgname=$2
local head=$(cat $releasefile | grep "\[$pkgname\]" -n | awk -F: '{print $1}')
if [ ! -n "$head" ]; then
die "error: ono find $pkgname in the $releasefile file "
fi
local tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}')
if [ ! -n "$tail" ]; then
local all=$(cat $releasefile | wc -l)
let tail=$all+1-$head
fi
dest=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1")
if [ "$pkgname"x = "libpq"x -a \( "$version_mode" = "debug" -o "$version_mode" = "release" \) ]; then
# copy include file
head=$(cat $releasefile | grep "\[header\]" -n | awk -F: '{print $1}')
if [ ! -n "$head" ]; then
die "error: ono find header in the $releasefile file "
fi
tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}')
if [ ! -n "$tail" ]; then
all=$(cat $releasefile | wc -l)
let tail=$all+1-$head
fi
dest1=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1")
dest=$(echo "$dest";echo "$dest1")
fi
mkdir -p ${BUILD_DIR}
cd ${BUILD_DIR}
rm -rf temp
mkdir temp
case "$pkgname" in
server)
mkdir -p ${BUILD_DIR}/temp/etc
target_file_copy "$dest" ${BUILD_DIR}/temp
;;
*)
target_file_copy_for_non_server "$dest" ${BUILD_DIR}/temp $pkgname
;;
esac
cd ${BUILD_DIR}/temp
select_package_command
case "$pkgname" in
server)
echo "packaging server..."
cp ${SCRIPT_DIR}/version.cfg ${BUILD_DIR}/temp
if [ $? -ne 0 ]; then
die "copy ${SCRIPT_DIR}/version.cfg to ${BUILD_DIR}/temp failed"
fi
cp ${ROOT_DIR}/${open_gauss}/liteom/install.sh ./
if [ $? -ne 0 ]
then
die "copy ${ROOT_DIR}/${open_gauss}/liteom/install.sh to ${BUILD_DIR}/temp failed"
fi
cp ${ROOT_DIR}/${open_gauss}/liteom/uninstall.sh ./
if [ $? -ne 0 ]
then
die "copy ${ROOT_DIR}/${open_gauss}/liteom/uninstall.sh to ${BUILD_DIR}/temp failed"
fi
cp ${ROOT_DIR}/${open_gauss}/liteom/opengauss_lite.conf ./
if [ $? -ne 0 ]
then
die "copy ${ROOT_DIR}/${open_gauss}/liteom/opengauss_lite.conf to ${BUILD_DIR}/temp failed"
fi
# pkg upgrade scripts:upgrade_GAUSSV5.sh, upgrade_common.sh, upgrade_config.sh, upgrade_errorcode.sh
for filename in upgrade_GAUSSV5.sh upgrade_common.sh upgrade_config.sh upgrade_errorcode.sh
do
if ! cp ${ROOT_DIR}/${open_gauss}/liteom/${filename} ./ ; then
die "copy ${ROOT_DIR}/${open_gauss}/liteom/${filename} to ${BUILD_DIR}/temp failed"
fi
done
# install upgrade_sql.* files.
package_upgrade_sql
$package_command "${server_package_name}" ./* >>"$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "$package_command ${server_package_name} failed"
fi
mv ${server_package_name} ${package_path}
echo "install $pkgname tools is ${server_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1
echo "success!"
;;
libpq)
echo "packaging libpq..."
$package_command "${libpq_package_name}" ./* >>"$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "$package_command ${libpq_package_name} failed"
fi
mv ${libpq_package_name} ${package_path}
echo "install $pkgname tools is ${libpq_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1
echo "success!"
;;
esac
}
#############################################################
# show package for hotpatch sdv.
#############################################################
if [ "$show_package" = true ]; then
echo "package: "$server_package_name
echo "bin: "$bin_name
exit 0
fi
#############################################################
# main function
#############################################################
# 1. clean install path and log file
mpp_pkg_pre_check
# 2. chose action
mpp_pkg_bld
if [ "$zip_package" = "off" ]; then
echo "The option 'nopkg' is on, no package will be zipped."
exit 0
fi
# 3. make package
mpp_pkg_make
#clean mpp_install directory
echo "clean enviroment"
echo "[makemppdb] $(date +%y-%m-%d' '%T): remove ${BUILD_DIR}" >>"$LOG_FILE" 2>&1
mkdir ${ROOT_DIR}/output
mv ${ROOT_DIR}/build/script/*.tar.gz ${ROOT_DIR}/output/
echo "now, all packages has finished!"
exit 0

View File

@ -24,8 +24,6 @@
declare SCRIPT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd)
declare ROOT_DIR=$(dirname "${SCRIPT_DIR}")
declare ROOT_DIR=$(dirname "${ROOT_DIR}")
declare package_type='server'
declare product_mode='opengauss'
declare version_mode='release'
@ -35,8 +33,6 @@ declare cm_dir='None'
declare show_package='false'
declare install_package_format='tar'
function print_help()
{
echo "Usage: $0 [OPTION]
@ -48,7 +44,6 @@ function print_help()
"
}
if [ $# = 0 ] ; then
echo "missing option"
print_help
@ -126,10 +121,18 @@ fi
declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install"
declare PKG_TMP_DIR="${BUILD_DIR}/temp"
if [ -e "$SCRIPT_DIR/utils/internal_packages.sh" ];then
source $SCRIPT_DIR/utils/internal_packages.sh
if [ "${product_mode}" == "lite" ]; then
if [ -e "$SCRIPT_DIR/utils/internal_packages_lite.sh" ];then
source $SCRIPT_DIR/utils/internal_packages_lite.sh
else
exit 1
fi
else
exit 1
if [ -e "$SCRIPT_DIR/utils/internal_packages.sh" ];then
source $SCRIPT_DIR/utils/internal_packages.sh
else
exit 1
fi
fi
function main()
@ -141,4 +144,4 @@ function main()
main
echo "now, all packages has finished!"
exit 0
exit 0

View File

@ -1,12 +1,6 @@
#!/bin/bash
#######################################################################
# Copyright (c): 2012-2019, Huawei Tech. Co., Ltd.
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2025. All rights reserved.
# descript: Separate debug information
# Return 0 means OK.
# Return 1 means failed.
# version: 2.0
# date: 2019-1-6
#######################################################################
DEPTH=$(pwd)
INSTALL_DIR=$DEPTH/../../mppdb_temp_install
@ -119,7 +113,7 @@ separate_symbol()
chmod 755 "$INSTALL_DIR/${symbol_name}.symbol"
mv $INSTALL_DIR/${symbol_name}.symbol $CPTODEST
fi
elif [[ "$x" = *".so" ]]; then
elif [[ "$x" = *".so" || "$x" = *".so."* ]]; then
if [[ "$platformname" = "Redhat" ]] || [[ "$platformname" = "Euler" ]]; then
if [[ "$x" = "libkadm5clnt.so" ]]; then
echo "$x is not a dynamically linked or not stripped, do not separate symbol"

View File

@ -1,12 +1,6 @@
#!/bin/bash
#######################################################################
# Copyright (c): 2020-2021, Huawei Tech. Co., Ltd.
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2025. All rights reserved.
# descript: Compile and pack openGauss
# Return 0 means OK.
# Return 1 means failed.
# version: 2.0
# date: 2020-08-08
#######################################################################
#######################################################################
## Check the installation package production environment
#######################################################################
@ -27,10 +21,10 @@ function gaussdb_pkg_pre_clean()
function read_gaussdb_version()
{
cd ${SCRIPT_DIR}
echo "${gaussdb_name_for_package}-${version_number}" > version.cfg
echo "${product_name}-${version_number}" > version.cfg
#auto read the number from kernal globals.cpp, no need to change it here
}
###################################
# get version number from globals.cpp
##################################
@ -96,6 +90,7 @@ function make_license_control()
die "modify '$gaussdb_version_file' failed."
fi
}
function make_gaussdb_kernel()
{
export BUILD_TUPLE=${PLATFORM_ARCH}
@ -125,11 +120,10 @@ function make_gaussdb_kernel()
if [ $? -ne 0 ]; then
die "make install failed."
fi
echo "End make install gaussdb server" >> "$LOG_FILE" 2>&1
}
#######################################################################
##install gaussdb database contained server,client and libpq
#######################################################################
@ -148,14 +142,14 @@ function install_gaussdb()
echo "WARNING: do not separate symbol in debug mode!"
fi
if [ "$product_mode" != "opengauss" ]; then
die "the product mode can only be opengauss!"
if [ "$product_mode" != "opengauss" -a "$product_mode" != "lite" ]; then
die "the product mode can only be opengauss, lite!"
fi
echo "build gaussdb kernel." >> "$LOG_FILE" 2>&1
make_gaussdb_kernel
echo "build gaussdb kernel success." >> "$LOG_FILE" 2>&1
chmod 444 ${BUILD_DIR}/bin/cluster_guc.conf
dos2unix ${BUILD_DIR}/bin/cluster_guc.conf > /dev/null 2>&1
@ -163,9 +157,6 @@ function install_gaussdb()
get_kernel_commitid
}
#######################################################################
##install gaussdb database and others
##select to install something according to variables package_type need
@ -184,4 +175,4 @@ function gaussdb_build()
echo "please input right paramenter values server or libpq "
exit 1
esac
}
}

View File

@ -1,15 +1,9 @@
#!/bin/bash
#######################################################################
# Copyright (c): 2020-2025, Huawei Tech. Co., Ltd.
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2025. All rights reserved.
# descript: Compile and pack openGauss
# Return 0 means OK.
# Return 1 means failed.
# version: 2.0
# date: 2020-08-08
#######################################################################
declare LOG_FILE="${SCRIPT_DIR}/makemppdb_pkg.log"
declare gaussdb_version='openGauss'
declare product_name='openGauss'
declare PLATFORM_ARCH=$(uname -p)
declare package_path=${ROOT_DIR}/output
declare install_package_format="tar"
@ -44,17 +38,14 @@ function die()
{
log "$@"
echo "$@"
exit $ERR_MKGS_FAILED
exit $ERR_FAILED
}
#######################################################################
##select package command accroding to install_package_format
#######################################################################
function select_package_command()
{
case "$install_package_format" in
tar)
tar='tar'
@ -65,86 +56,61 @@ function select_package_command()
}
select_package_command
#######################################################################
##get os dist version
#######################################################################
export PLAT_FORM_STR=$(sh "${ROOT_DIR}/src/get_PlatForm_str.sh")
if [ "${PLAT_FORM_STR}"x == "Failed"x -o "${PLAT_FORM_STR}"x == ""x ]
then
echo "We only support openEuler(aarch64), EulerOS(aarch64), CentOS, Kylin(aarch64), Asianux platform."
exit 1;
fi
if [[ "$PLAT_FORM_STR" =~ "euleros" ]]; then
dist_version="EulerOS"
if [ "$PLATFORM_ARCH"X == "aarch64"X ];then
GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA"
fi
elif [[ "$PLAT_FORM_STR" =~ "centos" ]]; then
dist_version="CentOS"
if [ "$PLATFORM_ARCH"X == "aarch64"X ];then
GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA"
fi
elif [[ "$PLAT_FORM_STR" =~ "openeuler" ]]; then
dist_version="openEuler"
if [ "$PLATFORM_ARCH"X == "aarch64"X ];then
GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA -D__ARM_LSE"
fi
elif [[ "$PLAT_FORM_STR" =~ "kylin" ]]; then
dist_version="Kylin"
if [ "$PLATFORM_ARCH"X == "aarch64"X ];then
GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA"
fi
elif [[ "$PLAT_FORM_STR" =~ "asianux" ]]; then
dist_version="Asianux"
if [ "$PLATFORM_ARCH"X == "aarch64"X ];then
GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA"
fi
if [[ -f "/etc/euleros-release" ]]; then
os_name="EulerOS"
elif [[ -f "/etc/centos-release" ]]; then
os_name="CentOS"
elif [[ -f "/etc/openEuler-release" ]]; then
os_name="openEuler"
elif [[ -f "/etc/kylin-release" ]]; then
os_name="Kylin"
elif [[ -f "/etc/asianux-release" ]]; then
os_name="Asianux"
else
echo "We only support openEuler(aarch64), EulerOS(aarch64), CentOS, Kylin(aarch64), Asianux platform."
echo "Kernel is $kernel"
exit 1
os_name=$(lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z | sed 's/.*/\L&/; s/[a-z]*/\u&/g')
fi
##add platform architecture information
if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then
if [ "$dist_version" != "openEuler" ] && [ "$dist_version" != "EulerOS" ] && [ "$dist_version" != "Kylin" ] && [ "$dist_version" != "Asianux" ]; then
if [ "$os_name" != "openEuler" ] && [ "$os_name" != "EulerOS" ] && [ "$os_name" != "Kylin" ] && [ "$dist_version" != "Asianux" ]; then
echo "We only support NUMA on openEuler(aarch64), EulerOS(aarch64), Kylin(aarch64), Asianux platform."
exit 1
fi
GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA"
fi
if [ "${binarylib_dir}" != 'None' ] && [ -d "${binarylib_dir}" ]; then
BUILD_TOOLS_PATH="${binarylib_dir}/buildtools/${PLAT_FORM_STR}"
PLATFORM_PATH="${binarylib_dir}/platform/${PLAT_FORM_STR}"
BINARYLIBS_PATH="${binarylib_dir}/dependency"
BUILD_TOOLS_PATH="${binarylib_dir}/buildtools"
PLATFORM_PATH="${binarylib_dir}/kernel/platform"
BINARYLIBS_PATH="${binarylib_dir}/kernel/dependency"
else
die "${binarylib_dir} not exist"
die "${binarylib_dir} doesn't exist."
fi
declare INSTALL_TOOLS_DIR=${BINARYLIBS_PATH}/install_tools_${PLAT_FORM_STR}
declare UNIX_ODBC="${BINARYLIBS_PATH}/${PLAT_FORM_STR}/unixodbc"
declare INSTALL_TOOLS_DIR=${binarylib_dir}/install_tools
declare UNIX_ODBC="${BINARYLIBS_PATH}/unixodbc"
# Comment 编译相关
gcc_version="7.3"
# Comment 编译相关
gcc_version="7.3"
ccache -V >/dev/null 2>&1 && USE_CCACHE="ccache " ENABLE_CCACHE="--enable-ccache"
export CC="${USE_CCACHE}$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/gcc"
export CXX="${USE_CCACHE}$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/g++"
export LD_LIBRARY_PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/lib64:$BUILD_TOOLS_PATH/gcc$gcc_version/isl/lib:$BUILD_TOOLS_PATH/gcc$gcc_version/mpc/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/mpfr/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/gmp/lib/:$LD_LIBRARY_PATH
export PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin:$PATH
export JAVA_HOME=${binarylib_dir}/platform/huaweijdk8/${PLATFORM_ARCH}/jdk
export JAVA_HOME=${PLATFORM_PATH}/huaweijdk8/${PLATFORM_ARCH}/jdk
declare ERR_MKGS_FAILED=1
declare MKGS_OK=0
declare ERR_FAILED=1
declare ERR_OK=0
gaussdb_200_file="${binarylib_dir}/buildtools/license_control/gaussdb.version.GaussDB200"
gaussdb_300_file="${binarylib_dir}/buildtools/license_control/gaussdb.version.GaussDB300"
gaussdb_200_standard_file="${binarylib_dir}/buildtools/license_control/gaussdb.license.GaussDB200_Standard"
gaussdb_200_file="${BUILD_TOOLS_PATH}/license_control/gaussdb.version.GaussDB200"
gaussdb_300_file="${BUILD_TOOLS_PATH}/license_control/gaussdb.version.GaussDB300"
gaussdb_200_standard_file="${BUILD_TOOLS_PATH}/license_control/gaussdb.license.GaussDB200_Standard"
gaussdb_version_file="${ROOT_DIR}/src/gausskernel/process/postmaster/gaussdb_version.cpp"
if [ -f "$SCRIPT_DIR/gaussdb.ver" ];then
declare version_number=$(cat ${SCRIPT_DIR}/gaussdb.ver | grep 'VERSION' | awk -F "=" '{print $2}')
else
@ -157,13 +123,10 @@ declare release_file_list="${PLATFORM_ARCH}_${product_mode}_list"
#######################################################################
## declare all package name
#######################################################################
declare gaussdb_name_for_package="$(echo ${gaussdb_version} | sed 's/ /-/g')"
declare version_string="${gaussdb_name_for_package}-${version_number}"
declare package_pre_name="${version_string}-${dist_version}-${PLATFORM}bit"
declare version_string="${product_name}-${version_number}"
declare package_pre_name="${version_string}-${os_name}-${PLATFORM}bit"
declare libpq_package_name="${package_pre_name}-Libpq.tar.gz"
declare tools_package_name="${package_pre_name}-tools.tar.gz"
declare kernel_package_name="${package_pre_name}.tar.bz2"
declare symbol_package_name="${package_pre_name}-symbol.tar.gz"
declare sha256_name="${package_pre_name}.sha256"

View File

@ -1,6 +1,5 @@
#!/bin/bash
#############################################################################
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2025. All rights reserved.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
@ -15,7 +14,6 @@
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
# Description : gs_backup is a utility to back up or restore binary files and parameter files.
#############################################################################
declare UPGRADE_SQL_DIR="${ROOT_DIR}/src/include/catalog/upgrade_sql"

View File

@ -0,0 +1,217 @@
#!/bin/bash
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2025. All rights reserved.
# descript: package opengauss lite
declare UPGRADE_SQL_DIR="${ROOT_DIR}/src/include/catalog/upgrade_sql"
#######################################################################
# move pkgs to output directory
#######################################################################
function deploy_pkgs()
{
mkdir -p $package_path
for pkg in $@; do
if [ -f "$pkg" ]; then
mv $pkg $package_path/
fi
done
}
#######################################################################
# copy directory's files list to $2
#######################################################################
function copy_files_list()
{
for file in $(echo $1)
do
test -e $file && tar -cpf - $file | ( cd $2; tar -xpf - )
done
}
#######################################################################
##copy target file into temporary directory temp
#######################################################################
function target_file_copy()
{
cd ${BUILD_DIR}
copy_files_list "$1" $2
sed -i '/^process_cpu_affinity|/d' $2/bin/cluster_guc.conf
#generate tar file
echo "Begin generate ${kernel_package_name} tar file..." >> "$LOG_FILE" 2>&1
cd $2
${BUILD_TOOLS_PATH}/p7z/bin/7z a -t7z -sfx "${kernel_package_name}" ./* >> "$LOG_FILE" 2>&1
cd '-'
mv $2/"${kernel_package_name}" ./
if [ $? -ne 0 ]; then
die "generate ${kernel_package_name} failed."
fi
echo "End generate ${kernel_package_name} tar file" >> "$LOG_FILE" 2>&1
#generate sha256 file
sha256_name="${sha256_name}"
echo "Begin generate ${sha256_name} sha256 file..." >> "$LOG_FILE" 2>&1
sha256sum "${kernel_package_name}" | awk -F" " '{print $1}' > "${sha256_name}"
if [ $? -ne 0 ]; then
die "generate sha256 file failed."
fi
echo "End generate ${sha256_name} sha256 file" >> "$LOG_FILE" 2>&1
if [ -d "${2}" ]; then
rm -rf ${2}
fi
}
function target_file_copy_for_non_server()
{
cd ${BUILD_DIR}
copy_files_list "$1" $2
}
#######################################################################
##function prep_dest_list parse release_file_list variable represent file
#######################################################################
function prep_dest_list()
{
cd $SCRIPT_DIR
releasefile=$1
pkgname=$2
local head=$(cat $releasefile | grep "\[$pkgname\]" -n | awk -F: '{print $1}')
if [ ! -n "$head" ]; then
die "error: ono find $pkgname in the $releasefile file "
fi
local tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}')
if [ ! -n "$tail" ]; then
local all=$(cat $releasefile | wc -l)
let tail=$all+1-$head
fi
dest_list=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1")
}
#######################################################################
##back to separate_debug_symbol.sh dir
#######################################################################
function separate_symbol()
{
cd $SCRIPT_DIR
if [ "$version_mode" = "release" ]; then
chmod +x ./separate_debug_information.sh
./separate_debug_information.sh
cd $SCRIPT_DIR
mv symbols.tar.gz $symbol_package_name
deploy_pkgs $symbol_package_name
fi
}
function make_package_srv()
{
echo "Begin package server"
cd $SCRIPT_DIR
prep_dest_list $release_file_list 'server'
rm -rf ${BUILD_DIR}/temp
mkdir -p ${BUILD_DIR}/temp/etc
target_file_copy "$dest_list" ${BUILD_DIR}/temp
make_package_upgrade_sql
cp ${SCRIPT_DIR}/version.cfg ${BUILD_DIR}/temp
if [ $? -ne 0 ]; then
die "copy ${SCRIPT_DIR}/version.cfg to ${BUILD_DIR}/temp failed"
fi
# pkg install uninstall scripts:install.sh, uninstall.sh, opengauss_lite.conf, upgrade_errorcode.sh
for filename in install.sh uninstall.sh opengauss_lite.conf
do
if ! cp ${ROOT_DIR}/liteom/${filename} ${BUILD_DIR}/temp ; then
die "copy ${ROOT_DIR}/liteom/${filename} to ${BUILD_DIR}/temp failed"
fi
done
chmod 500 ./install.sh ./uninstall.sh
# pkg upgrade scripts:upgrade_GAUSSV5.sh, upgrade_common.sh, upgrade_config.sh, upgrade_errorcode.sh
for filename in upgrade_GAUSSV5.sh upgrade_common.sh upgrade_config.sh upgrade_errorcode.sh
do
if ! cp ${ROOT_DIR}/liteom/${filename} ${BUILD_DIR}/temp ; then
die "copy ${ROOT_DIR}/liteom/${filename} to ${BUILD_DIR}/temp failed"
fi
done
chmod 500 ./upgrade_GAUSSV5.sh
chmod 400 ./upgrade_common.sh upgrade_errorcode.sh
chmod 600 ./opengauss_lite.conf upgrade_config.sh
mkdir -p ${BUILD_DIR}/temp/dependency
cp ${BUILD_DIR}/lib/libstdc++.so.6 ${BUILD_DIR}/temp/dependency
cd ${BUILD_DIR}/temp
cp ${BUILD_DIR}/"${kernel_package_name}" ./
cp ${BUILD_DIR}/"${sha256_name}" ./
tar -czf "${package_pre_name}.tar.gz" ./*
deploy_pkgs "${package_pre_name}.tar.gz"
echo "make server(all) package success!"
}
#######################################################################
# Install all SQL files from src/distribute/include/catalog/upgrade_sql
# to INSTALL_DIR/bin/script/upgrade_sql.
# Package all SQL files and then verify them with SHA256.
#######################################################################
function make_package_upgrade_sql()
{
echo "Begin to install upgrade_sql files..."
UPGRADE_SQL_TAR="upgrade_sql.tar.gz"
UPGRADE_SQL_SHA256="upgrade_sql.sha256"
cd $SCRIPT_DIR
mkdir -p ${BUILD_DIR}
cd ${BUILD_DIR}
rm -rf temp
mkdir temp
cd ${BUILD_DIR}/temp
cp -r "${UPGRADE_SQL_DIR}" ./upgrade_sql
[ $? -ne 0 ] && die "Failed to cp upgrade_sql files"
tar -czf ${UPGRADE_SQL_TAR} upgrade_sql
[ $? -ne 0 ] && die "Failed to package ${UPGRADE_SQL_TAR}"
rm -rf ./upgrade_sql > /dev/null 2>&1
sha256sum ${UPGRADE_SQL_TAR} | awk -F" " '{print $1}' > "${UPGRADE_SQL_SHA256}"
[ $? -ne 0 ] && die "Failed to generate sha256 sum file for ${UPGRADE_SQL_TAR}"
chmod 600 ${UPGRADE_SQL_TAR}
chmod 600 ${UPGRADE_SQL_SHA256}
echo "Successfully packaged upgrade_sql files."
}
function make_package_libpq()
{
cd $SCRIPT_DIR
prep_dest_list $release_file_list 'libpq'
rm -rf ${BUILD_DIR}/temp
mkdir -p ${BUILD_DIR}/temp
target_file_copy_for_non_server "$dest_list" ${BUILD_DIR}/temp
if [ "$version_mode" != "memcheck" ]; then
# copy include file
prep_dest_list $release_file_list 'header'
target_file_copy_for_non_server "$dest_list" ${BUILD_DIR}/temp
fi
cd ${BUILD_DIR}/temp
echo "packaging libpq..."
tar -zvcf "${libpq_package_name}" ./* >>"$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "$package_command ${libpq_package_name} failed"
fi
deploy_pkgs ${libpq_package_name}
echo "install $pkgname tools is ${libpq_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1
echo "success!"
}
function gaussdb_pkg()
{
echo "Start package opengauss."
separate_symbol
make_package_srv
make_package_libpq
echo "End package opengauss."
}

View File

@ -1,12 +1,6 @@
#!/bin/bash
#######################################################################
# Copyright (c): 2020-2025, Huawei Tech. Co., Ltd.
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2025. All rights reserved.
# descript: Compile and pack openGauss
# Return 0 means OK.
# Return 1 means failed.
# version: 2.0
# date: 2020-08-08
#######################################################################
#######################################################################
## Check the installation package production environment
#######################################################################
@ -27,16 +21,14 @@ function gaussdb_pkg_pre_clean()
function read_gaussdb_version()
{
cd ${SCRIPT_DIR}
echo "${gaussdb_name_for_package}-${version_number}" > version.cfg
echo "${product_name}-${version_number}" > version.cfg
#auto read the number from kernal globals.cpp, no need to change it here
}
PG_REG_TEST_ROOT="${ROOT_DIR}"
ROACH_DIR="${ROOT_DIR}/distribute/bin/roach"
MPPDB_DECODING_DIR="${ROOT_DIR}/contrib/mppdb_decoding"
###################################
# get version number from globals.cpp
##################################
@ -63,7 +55,6 @@ function read_gaussdb_number()
fi
}
#######################################################################
##insert the commitid to version.cfg as the upgrade app path specification
#######################################################################
@ -76,7 +67,6 @@ function get_kernel_commitid()
echo "End insert commitid into version.cfg" >> "$LOG_FILE" 2>&1
}
#######################################################################
## generate the version file.
#######################################################################
@ -105,7 +95,6 @@ function make_license_control()
fi
}
#######################################################################
##back to separate_debug_symbol.sh dir
#######################################################################
@ -142,8 +131,8 @@ function install_gaussdb()
echo "WARNING: do not separate symbol in debug mode!"
fi
if [ "$product_mode" != "opengauss" ]; then
die "the product mode can only be opengauss!"
if [ "$product_mode" != "opengauss" -a "$product_mode" != "lite" ]; then
die "the product mode can only be opengauss, lite!"
fi
#configure
@ -176,6 +165,20 @@ function install_gaussdb()
else
./configure $shared_opt CFLAGS="-O0 ${GAUSSDB_EXTRA_FLAGS}" --enable-mot --enable-debug --enable-cassert CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1
fi
elif [ "$product_mode"x == "lite"x ]; then
shared_opt="--gcc-version=${gcc_version}.0 --prefix="${BUILD_DIR}" --3rd=${binarylib_dir} --enable-thread-safety ${enable_readline} --without-zlib --without-gssapi --without-krb5"
if [ "$version_mode"x == "release"x ]; then
# configure -D__USE_NUMA -D__ARM_LSE with arm single mode
if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then
echo "configure -D__USE_NUMA -D__ARM_LSE with arm single mode"
GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA -D__ARM_LSE"
fi
./configure $shared_opt CFLAGS="-O2 -g3 ${GAUSSDB_EXTRA_FLAGS}" CC=g++ $extra_config_opt --enable-lite-mode >> "$LOG_FILE" 2>&1
elif [ "$version_mode"x == "memcheck"x ]; then
./configure $shared_opt CFLAGS='-O0' --enable-debug --enable-cassert --enable-memory-check CC=g++ $extra_config_opt --enable-lite-mode >> "$LOG_FILE" 2>&1
else
./configure $shared_opt CFLAGS="-O0 ${GAUSSDB_EXTRA_FLAGS}" --enable-debug --enable-cassert CC=g++ $extra_config_opt --enable-lite-mode>> "$LOG_FILE" 2>&1
fi
fi
if [ $? -ne 0 ]; then
@ -258,13 +261,9 @@ function install_gaussdb()
chmod 444 ${BUILD_DIR}/bin/cluster_guc.conf
dos2unix ${BUILD_DIR}/bin/cluster_guc.conf > /dev/null 2>&1
separate_symbol
get_kernel_commitid
}
#######################################################################
##install gaussdb database and others
##select to install something according to variables package_type need

View File

@ -4,8 +4,8 @@
./bin/gstrace
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_initdb
./bin/gs_ctl
./bin/gs_initdb
./bin/gs_guc
./bin/gs_restore
./bin/pg_config
@ -41,8 +41,6 @@
./share/postgresql/extension/file_fdw.control
./share/postgresql/extension/hstore--unpackaged--1.0.sql
./share/postgresql/extension/hstore--1.0--1.1.sql
./share/postgresql/extension/hdfs_fdw--1.0.sql
./share/postgresql/extension/hdfs_fdw.control
./share/postgresql/extension/log_fdw--1.0.sql
./share/postgresql/extension/log_fdw.control
./share/postgresql/timezone/GB-Eire
@ -632,6 +630,7 @@
./share/postgresql/timezone/Navajo
./share/postgresql/timezone/GMT
./share/postgresql/system_views.sql
./share/postgresql/private_system_views.sql
./share/postgresql/performance_views.sql
./share/postgresql/sql_features.txt
./share/postgresql/pg_cast_oid.txt
@ -674,39 +673,21 @@
./lib/libpq.so
./lib/libpq.so.5
./lib/libpq.so.5.5
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libcgroup.so
./lib/libcgroup.so.1
./lib/libz.so
./lib/libz.so.1
./lib/libz.so.1.2.11
./lib/liblz4.so
./lib/liblz4.so.1
./lib/liblz4.so.1.9.2
./lib/libcjson.so
./lib/libcjson.so.1
./lib/libcjson.so.1.7.14
./lib/libcjson_utils.so
./lib/libcjson_utils.so.1
./lib/libcjson_utils.so.1.7.14
./lib/libssl.so*
./lib/libcrypto.so*
./lib/libcgroup.so*
./lib/libz.so*
./lib/liblz4.so*
./lib/libcjson.so*
./lib/libcjson_utils.so*
./lib/libstdc++.so.6
./lib/libgcc_s.so.1
./lib/libgomp.so
./lib/libgomp.so.1
./lib/libgomp.so.1.0.0
./lib/libgomp.so*
./lib/libdcf.so
./lib/libzstd.so
./lib/libzstd.so.1
./lib/libzstd.so.1.5.0
./lib/libcurl.so
./lib/libcurl.so.4
./lib/libcurl.so.4.7.0
./lib/libzstd.so*
./lib/libcurl.so*
./lib/libxgboost.so
./lib/libpagecompression.so
./lib/libpagecompression.so.1
./lib/libpagecompression.so*
./lib/postgresql/latin2_and_win1250.so
./lib/postgresql/euc2004_sjis2004.so
./lib/postgresql/euc_kr_and_mic.so
@ -723,6 +704,7 @@
./lib/postgresql/utf8_and_sjis.so
./lib/postgresql/utf8_and_cyrillic.so
./lib/postgresql/hstore.so
./lib/postgresql/packages.so
./lib/postgresql/utf8_and_euc_kr.so
./lib/postgresql/ascii_and_mic.so
./lib/postgresql/utf8_and_iso8859_1.so
@ -737,9 +719,11 @@
./lib/postgresql/utf8_and_euc2004.so
./lib/postgresql/utf8_and_big5.so
./lib/postgresql/mppdb_decoding.so
./lib/postgresql/sql_decoding.so
./lib/postgresql/pg_plugin
./lib/postgresql/proc_srclib
./lib/postgresql/security_plugin.so
./lib/postgresql/pg_upgrade_support.so
./lib/postgresql/latin2_and_win1250.so
./lib/postgresql/euc2004_sjis2004.so
./lib/postgresql/pgoutput.so
@ -897,15 +881,15 @@
./include/postgresql/server/lib/ilist.h
./include/postgresql/server/pgxc/locator.h
./include/postgresql/server/gstrace/gstrace_infra.h
./include/postgresql/server/db4ai/db4ai.h
[libpq]
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libpq.so
./lib/libpq.so.5
./lib/libpq.so.5.5
./lib/libcrypto.so*
./lib/libssl.so*
[header]
./include/libpq/libpq-fs.h
./include/libpq-fe.h
./include/postgres_ext.h
./include/gs_thread.h
@ -919,4 +903,4 @@
./include/pqcomm.h
./include/pqexpbuffer.h
[version]
3.0.0
3.0.0

View File

@ -36,6 +36,7 @@
./bin/kadmind
./bin/dbmind
./bin/gs_dbmind
./bin/constant
./bin/server.key.cipher
./bin/server.key.rand
./bin/gs_plan_simulator.sh
@ -71,8 +72,6 @@
./share/postgresql/extension/file_fdw.control
./share/postgresql/extension/hstore--unpackaged--1.0.sql
./share/postgresql/extension/hstore--1.0--1.1.sql
./share/postgresql/extension/hdfs_fdw--1.0.sql
./share/postgresql/extension/hdfs_fdw.control
./share/postgresql/extension/log_fdw--1.0.sql
./share/postgresql/extension/log_fdw.control
./share/postgresql/extension/mot_fdw--1.0.sql
@ -724,7 +723,6 @@
./lib/postgresql/pgxs/src/Makefile.port
./lib/postgresql/pgxs/src/nls-global.mk
./lib/postgresql/pgxs/src/Makefile.global
./lib/postgresql/pgxs/src/get_PlatForm_str.sh
./lib/postgresql/pgxs/config/install-sh
./lib/postgresql/euc_cn_and_mic.so
./lib/postgresql/latin_and_mic.so
@ -764,18 +762,11 @@
./lib/libpq_ce.so.5
./lib/libpq_ce.so.5.5
./lib/libgauss_cl_jni.so
./lib/libcgroup.so
./lib/libcgroup.so.1
./lib/libcom_err_gauss.so
./lib/libcom_err_gauss.so.3
./lib/libcom_err_gauss.so.3.0
./lib/libatomic.so
./lib/libatomic.so.1
./lib/libatomic.so.1.2.0
./lib/libcgroup.so*
./lib/libcom_err_gauss.so*
./lib/libatomic.so*
./lib/libmasstree.so
./lib/libupb.so
./lib/libupb.so.9
./lib/libupb.so.9.0.0
./lib/libupb.so*
./lib/libabsl_str_format_internal.so
./lib/libabsl_strings.so
./lib/libabsl_throw_delegate.so
@ -789,61 +780,28 @@
./lib/libabsl_log_severity.so
./lib/libaddress_sorting.so
./lib/libaddress_sorting.so.9
./lib/libgssapi_krb5_gauss.so
./lib/libgssapi_krb5_gauss.so.2
./lib/libgssapi_krb5_gauss.so.2.2
./lib/libgssrpc_gauss.so
./lib/libgssrpc_gauss.so.4
./lib/libgssrpc_gauss.so.4.2
./lib/libk5crypto_gauss.so
./lib/libk5crypto_gauss.so.3
./lib/libk5crypto_gauss.so.3.1
./lib/libgssapi_krb5_gauss.so*
./lib/libgssrpc_gauss.so*
./lib/libk5crypto_gauss.so*
./lib/libkadm5clnt.so
./lib/libkadm5clnt_mit.so
./lib/libkadm5clnt_mit.so.11
./lib/libkadm5clnt_mit.so.11.0
./lib/libkadm5clnt_mit.so.12
./lib/libkadm5clnt_mit.so.12.0
./lib/libkadm5clnt_mit.so*
./lib/libkadm5srv.so
./lib/libkadm5srv_mit.so
./lib/libkadm5srv_mit.so.11
./lib/libkadm5srv_mit.so.11.0
./lib/libkadm5srv_mit.so.12
./lib/libkadm5srv_mit.so.12.0
./lib/libkdb5.so
./lib/libkdb5.so.9
./lib/libkdb5.so.9.0
./lib/libkdb5.so.10
./lib/libkdb5.so.10.0
./lib/libkrad.so
./lib/libkrad.so.0
./lib/libkrad.so.0.0
./lib/libkrb5_gauss.so
./lib/libkrb5_gauss.so.3
./lib/libkrb5_gauss.so.3.3
./lib/libkrb5support_gauss.so
./lib/libkrb5support_gauss.so.0
./lib/libkrb5support_gauss.so.0.1
./lib/libkadm5srv_mit.so*
./lib/libkdb5.so*
./lib/libkrad.so*
./lib/libkrb5_gauss.so*
./lib/libkrb5support_gauss.so*
./lib/krb5/plugins/kdb/db2.so
./lib/libverto.so
./lib/libverto.so.0
./lib/libverto.so.0.0
./lib/libverto.so*
./lib/libcurl.so*
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libcrypto.so*
./lib/libssl.so*
./lib/libgcc_s.so.1
./lib/libstdc++.so.6
./lib/libz.so
./lib/libz.so.1
./lib/libz.so.1.2.11
./lib/liblz4.so
./lib/liblz4.so.1
./lib/liblz4.so.1.9.2
./lib/libz.so*
./lib/liblz4.so*
./lib/libcjson.so*
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libconfig.so*
./lib/libpgport_tool.so
./lib/libpgport_tool.so.1
./share/llvmir/GaussDB_expr.ir
@ -856,22 +814,13 @@
./lib/libpcre.so*
./lib/libsecurec.so
./lib/libxml2.so*
./lib/libparquet.so
./lib/libparquet.so.14
./lib/libparquet.so.14.1.0
./lib/libarrow.so
./lib/libarrow.so.14
./lib/libarrow.so.14.1.0
./lib/OBS.ini
./lib/postgresql/latin2_and_win1250.so
./lib/postgresql/euc2004_sjis2004.so
./lib/libdcf.so
./lib/libzstd.so
./lib/libzstd.so.1
./lib/libzstd.so.1.5.0
./lib/libzstd.so*
./lib/libxgboost.so
./lib/libpagecompression.so
./lib/libpagecompression.so.1
./lib/libpagecompression.so*
./include/postgresql/server/postgres_ext.h
./include/postgresql/server/pg_config_os.h
./include/postgresql/server/pgtime.h
@ -999,8 +948,8 @@
./include/postgresql/server/storage/item/itemptr.h
./include/postgresql/server/storage/lock/s_lock.h
./include/postgresql/server/storage/backendid.h
./include/postgresql/server/storage/lock.h
./include/postgresql/server/storage/lwlock.h
./include/postgresql/server/storage/lock/lock.h
./include/postgresql/server/storage/lock/lwlock.h
./include/postgresql/server/storage/lwlocknames.h
./include/postgresql/server/storage/barrier.h
./include/postgresql/server/storage/shmem.h
@ -1163,25 +1112,7 @@
./include/postgresql/server/catalog/namespace.h
./include/postgresql/server/commands/trigger.h
./include/postgresql/server/executor/spi.h
./include/postgresql/server/access/ustore/undo/knl_uundotype.h
./include/postgresql/server/access/ustore/undo/knl_uundoapi.h
./include/postgresql/server/access/ustore/knl_uheap.h
./include/postgresql/server/access/ustore/knl_utuple.h
./include/postgresql/server/access/ustore/knl_utype.h
./include/postgresql/server/access/ustore/knl_upage.h
./include/postgresql/server/access/ustore/knl_uredo.h
./include/postgresql/server/access/ustore/knl_uundovec.h
./include/postgresql/server/access/ustore/knl_uundorecord.h
./include/postgresql/server/access/ustore/undo/knl_uundoxlog.h
./include/postgresql/server/access/ustore/undo/knl_uundotxn.h
./include/postgresql/server/access/ustore/undo/knl_uundozone.h
./include/postgresql/server/access/ustore/undo/knl_uundospace.h
./include/postgresql/server/communication/commproxy_basic.h
./include/postgresql/server/access/parallel_recovery/page_redo.h
./include/postgresql/server/access/parallel_recovery/spsc_blocking_queue.h
./include/postgresql/server/executor/exec/execdesc.h
./include/postgresql/server/db4ai/matrix.h
./include/postgresql/server/db4ai/scores.h
./include/postgresql/server/db4ai/db4ai.h
./jre/ASSEMBLY_EXCEPTION
./jre/bin/java
./jre/bin/jjs
@ -1367,33 +1298,18 @@
./lib/libpq_ce.so.5
./lib/libpq_ce.so.5.5
./lib/libgauss_cl_jni.so
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libconfig.so*
./lib/libcrypto.so*
./lib/libstdc++.so.6
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libssl.so*
./lib/libpgport_tool.so
./lib/libpgport_tool.so.1
./lib/libgssapi_krb5_gauss.so
./lib/libgssapi_krb5_gauss.so.2
./lib/libgssapi_krb5_gauss.so.2.2
./lib/libgssrpc_gauss.so
./lib/libgssrpc_gauss.so.4
./lib/libgssrpc_gauss.so.4.2
./lib/libk5crypto_gauss.so
./lib/libk5crypto_gauss.so.3
./lib/libk5crypto_gauss.so.3.1
./lib/libkrb5support_gauss.so
./lib/libkrb5support_gauss.so.0
./lib/libkrb5support_gauss.so.0.1
./lib/libkrb5_gauss.so
./lib/libkrb5_gauss.so.3
./lib/libkrb5_gauss.so.3.3
./lib/libcom_err_gauss.so
./lib/libcom_err_gauss.so.3
./lib/libcom_err_gauss.so.3.0
./lib/libgssapi_krb5_gauss.so*
./lib/libgssrpc_gauss.so*
./lib/libk5crypto_gauss.so*
./lib/libkrb5support_gauss.so*
./lib/libkrb5_gauss.so*
./lib/libcom_err_gauss.so*
[libpq]
./lib/libpq.a
./lib/libpq.so
@ -1403,33 +1319,18 @@
./lib/libpq_ce.so.5
./lib/libpq_ce.so.5.5
./lib/libgauss_cl_jni.so
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libconfig.so*
./lib/libcrypto.so*
./lib/libstdc++.so.6
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libssl.so*
./lib/libpgport_tool.so
./lib/libpgport_tool.so.1
./lib/libgssapi_krb5_gauss.so
./lib/libgssapi_krb5_gauss.so.2
./lib/libgssapi_krb5_gauss.so.2.2
./lib/libgssrpc_gauss.so
./lib/libgssrpc_gauss.so.4
./lib/libgssrpc_gauss.so.4.2
./lib/libk5crypto_gauss.so
./lib/libk5crypto_gauss.so.3
./lib/libk5crypto_gauss.so.3.1
./lib/libkrb5support_gauss.so
./lib/libkrb5support_gauss.so.0
./lib/libkrb5support_gauss.so.0.1
./lib/libkrb5_gauss.so
./lib/libkrb5_gauss.so.3
./lib/libkrb5_gauss.so.3.3
./lib/libcom_err_gauss.so
./lib/libcom_err_gauss.so.3
./lib/libcom_err_gauss.so.3.0
./lib/libgssapi_krb5_gauss.so*
./lib/libgssrpc_gauss.so*
./lib/libk5crypto_gauss.so*
./lib/libkrb5support_gauss.so*
./lib/libkrb5_gauss.so*
./lib/libcom_err_gauss.so*
./include/gs_thread.h
./include/gs_threadlocal.h
./include/postgres_ext.h

View File

@ -52,6 +52,8 @@ option(ENABLE_PRIVATEGAUSS "enable privategauss,the old is --enable-pribategauss
option(ENABLE_LITE_MODE "enable lite in single_node mode,the old is --enable-lite-mode" OFF)
option(ENABLE_DEBUG "enable privategauss,the old is --enable-pribategauss" OFF)
option(ENABLE_MOT "enable mot in single_node mode,the old is --enable-mot" OFF)
option(ENABLE_NUMA "enable numa,the old is --enable-numa" ON)
option(ENABLE_LSE "enable lse,the old is --enable-lse" ON)
option(ENABLE_MYSQL_FDW "enable export or import data with mysql,the old is --enable-mysql-fdw" OFF)
option(ENABLE_ORACLE_FDW "enable export or import data with oracle,the old is --enable-oracle-fdw" OFF)
option(BUILD_BY_CMAKE "the BUILD_BY_CMAKE is new,used in distribute pg_regress.cpp" ON)
@ -112,7 +114,7 @@ if($ENV{DEBUG_TYPE} STREQUAL "debug" OR $ENV{DEBUG_TYPE} STREQUAL "memcheck")
endif()
if(${BUILD_TUPLE} STREQUAL "aarch64")
if($ENV{DEBUG_TYPE} STREQUAL "release" AND ${ENABLE_MULTIPLE_NODES} STREQUAL "OFF")
if($ENV{DEBUG_TYPE} STREQUAL "release" AND ${ENABLE_MULTIPLE_NODES} STREQUAL "OFF" AND ${ENABLE_LSE} STREQUAL "ON")
set(DB_COMMON_DEFINE ${DB_COMMON_DEFINE} -D__ARM_LSE)
set(OS_OPTIONS -march=armv8-a+crc+lse)
else()
@ -120,7 +122,7 @@ if(${BUILD_TUPLE} STREQUAL "aarch64")
endif()
endif()
if(${BUILD_TUPLE} STREQUAL "aarch64")
if(${BUILD_TUPLE} STREQUAL "aarch64" AND ${ENABLE_NUMA} STREQUAL "ON")
if(NOT $ENV{DEBUG_TYPE} STREQUAL "memcheck")
set(DB_COMMON_DEFINE ${DB_COMMON_DEFINE} -D__USE_NUMA)
endif()
@ -171,7 +173,7 @@ set(MEMCHECK_LIBS "")
set(MEMCHECK_LINK_DIRECTORIES "")
if(${ENABLE_MEMORY_CHECK})
set(MEMCHECK_FLAGS ${MEMCHECK_FLAGS} -fsanitize=address -fsanitize=leak -fno-omit-frame-pointer)
set(MEMCHECK_LIBS ${MEMCHECK_LIBS} libasan.a)
set(MEMCHECK_LIBS ${MEMCHECK_LIBS} -static-libasan)
set(MEMCHECK_LINK_DIRECTORIES ${MEMCHECK_LINK_DIRECTORIES} ${MEMCHECK_LIB_PATH})
set(GAUSSDB_CONFIGURE "${GAUSSDB_CONFIGURE} -DENABLE_MEMORY_CHECK")
list(REMOVE_ITEM LIB_SECURE_OPTIONS -fstack-protector)
@ -183,7 +185,7 @@ set(THREAD_LIBS "")
set(THREAD_LINK_DIRECTORIES "")
if(${ENABLE_THREAD_CHECK})
set(THREAD_FLAGS ${THREAD_FLAGS} -fsanitize=thread -fno-omit-frame-pointer)
set(THREAD_LIBS ${THREAD_LIBS} libtsan.a)
set(THREAD_LIBS ${THREAD_LIBS} -static-libtsan)
set(THREAD_LINK_DIRECTORIES ${THREAD_LINK_DIRECTORIES} ${MEMCHECK_LIB_PATH})
set(GAUSSDB_CONFIGURE "${GAUSSDB_CONFIGURE} -DENABLE_THREAD_CHECK")
endif()
@ -244,7 +246,7 @@ add_definitions(-Wno-builtin-macro-redefined)
SET_GCC_FLAGS(DB_COMMON_FLAGS "")
#hotpatch
set(HOTPATCH_PLATFORM_LIST suse11_sp1_x86_64 suse12_sp5_x86_64 euleros2.0_sp8_aarch64 euleros2.0_sp9_aarch64 euleros2.0_sp10_aarch64 euleros2.0_sp2_x86_64 euleros2.0_sp5_x86_64 euleros2.0_sp10_x86_64 kylinv10_sp1_aarch64 kylinv10_sp1_x86_64_intel)
set(HOTPATCH_PLATFORM_LIST suse11_sp1_x86_64 suse12_sp5_x86_64 euleros2.0_sp8_aarch64 euleros2.0_sp9_aarch64 euleros2.0_sp10_aarch64 euleros2.0_sp2_x86_64 euleros2.0_sp5_x86_64 euleros2.0_sp10_x86_64 kylinv10_sp1_aarch64 kylinv10_sp1_x86_64)
set(HOTPATCH_ARM_LIST euleros2.0_sp8_aarch64 euleros2.0_sp9_aarch64 euleros2.0_sp10_aarch64 kylinv10_sp1_aarch64)
list(FIND HOTPATCH_PLATFORM_LIST "${PLAT_FORM_NAME}" RET_HOTPATCH)
list(FIND HOTPATCH_ARM_LIST "${PLAT_FORM_NAME}" RET_ARM_HOTPATCH)

View File

@ -5,12 +5,10 @@ option(ENABLE_LLT "enable llt, current value is --enable-llt" OFF)
option(ENABLE_UT "enable ut, current value is --enable-ut" OFF)
option(WITH_OPENEULER_OS "Build openGauss rpm package on openEuler os" OFF)
execute_process(COMMAND sh ${PROJECT_SRC_DIR}/get_PlatForm_str.sh OUTPUT_VARIABLE PLAT_FORM_STR OUTPUT_STRIP_TRAILING_WHITESPACE)
#############################################################################
# get the depend lib path
# 1. libedit, event, libcgroup, kerberos, zlib1.2.11, boost,
# libxml and protobuf are support parameter --enable-llt and --enable-ut;
# libxml are support parameter --enable-llt and --enable-ut;
# $(LIB_SUPPORT_LLT)
# 2. Huawei_Secure_C, gtest, mockcpp, unixodbc, libstd
# and openssl not support parameter --enable-llt and --enable-ut;
@ -37,12 +35,11 @@ endif()
set(LIB_UNIFIED_SUPPORT comm)
set(MEMCHECK_BUILD_TYPE debug)
set(DEPENDENCY_PATH ${3RD_PATH}/dependency/${PLAT_FORM_STR})
set(PLATFORM_PATH ${3RD_PATH}/platform/${PLAT_FORM_STR})
set(BUILDTOOLS_PATH ${3RD_PATH}/buildtools/${PLAT_FORM_STR})
set(COMPONENT_PATH ${3RD_PATH}/component/${PLAT_FORM_STR})
set(DEPENDENCY_PATH ${3RD_PATH}/kernel/dependency)
set(PLATFORM_PATH ${3RD_PATH}/kernel/platform)
set(BUILDTOOLS_PATH ${3RD_PATH}/buildtools)
set(COMPONENT_PATH ${3RD_PATH}/kernel/component)
set(MEMCHECK_HOME ${DEPENDENCY_PATH}/memcheck/${MEMCHECK_BUILD_TYPE})
set(CJSON_HOME ${DEPENDENCY_PATH}/cjson/${SUPPORT_LLT})
set(ETCD_HOME ${DEPENDENCY_PATH}/etcd/${LIB_UNIFIED_SUPPORT})
set(EVENT_HOME ${DEPENDENCY_PATH}/event/${LIB_UNIFIED_SUPPORT})
@ -59,23 +56,18 @@ set(CGROUP_HOME ${DEPENDENCY_PATH}/libcgroup/${SUPPORT_LLT})
set(CURL_HOME ${DEPENDENCY_PATH}/libcurl/${SUPPORT_LLT})
set(EDIT_HOME ${DEPENDENCY_PATH}/libedit/${SUPPORT_LLT})
set(OBS_HOME ${DEPENDENCY_PATH}/libobs/${LIB_UNIFIED_SUPPORT})
set(ORC_HOME ${DEPENDENCY_PATH}/liborc/${SUPPORT_LLT})
set(PARQUET_HOME ${DEPENDENCY_PATH}/libparquet/${SUPPORT_LLT})
set(XML2_HOME ${DEPENDENCY_PATH}/libxml2/${SUPPORT_LLT})
set(LLVM_HOME ${DEPENDENCY_PATH}/llvm/${LIB_UNIFIED_SUPPORT})
set(LZ4_HOME ${DEPENDENCY_PATH}/lz4/${SUPPORT_LLT})
set(NANOMSG_HOME ${DEPENDENCY_PATH}/nanomsg/${LIB_UNIFIED_SUPPORT})
set(NANOMSG_HOME ${DEPENDENCY_PATH}/nng/${LIB_UNIFIED_SUPPORT})
set(NCURSES_HOME ${DEPENDENCY_PATH}/ncurses/${SUPPORT_LLT})
set(OPENSSL_HOME ${DEPENDENCY_PATH}/openssl/${LIB_UNIFIED_SUPPORT})
set(PLJAVA_HOME ${DEPENDENCY_PATH}/pljava/${LIB_UNIFIED_SUPPORT})
if (EXISTS "${3RD_PATH}/platform/openjdk8/${BUILD_TUPLE}/jdk")
set(JAVA_HOME ${3RD_PATH}/platform/openjdk8/${BUILD_TUPLE}/jdk)
set(JAVA_HOME ${PLATFORM_PATH}/openjdk8/${BUILD_TUPLE}/jdk)
else()
set(JAVA_HOME ${3RD_PATH}/platform/huaweijdk8/${BUILD_TUPLE}/jdk)
set(JAVA_HOME ${PLATFORM_PATH}/huaweijdk8/${BUILD_TUPLE}/jdk)
endif()
set(PROTOBUF_HOME ${DEPENDENCY_PATH}/protobuf/${SUPPORT_LLT})
set(THRIFT_HOME ${DEPENDENCY_PATH}/thrift)
set(SNAPPY_HOME ${DEPENDENCY_PATH}/snappy/${LIB_UNIFIED_SUPPORT})
set(ZLIB_HOME ${DEPENDENCY_PATH}/zlib1.2.11/${SUPPORT_LLT})
set(XGBOOST_HOME ${DEPENDENCY_PATH}/xgboost/${SUPPORT_LLT})
set(ZSTD_HOME ${DEPENDENCY_PATH}/zstd)
@ -87,19 +79,19 @@ set(DCF_HOME ${COMPONENT_PATH}/dcf)
set(MOCKCPP_HOME ${BUILDTOOLS_PATH}/mockcpp/${LIB_UNIFIED_SUPPORT})
set(GTEST_HOME ${BUILDTOOLS_PATH}/gtest/${LIB_UNIFIED_SUPPORT})
set(LIBSTD_HOME ${BUILDTOOLS_PATH}/gcc${GCC_VERSION_LIT}/${LIB_UNIFIED_SUPPORT})
set(MASSTREE_HOME ${BUILDTOOLS_PATH}/masstree/${LIB_UNIFIED_SUPPORT})
set(NUMA_HOME ${DEPENDENCY_PATH}/numactl/${SUPPORT_LLT})
set(ARROW_HOME ${DEPENDENCY_PATH}/libparquet/${SUPPORT_LLT})
set(BOOST_HOME ${DEPENDENCY_PATH}/boost/${SUPPORT_LLT})
set(ODBC_HOME ${3RD_PATH}/dependency/${PLAT_FORM_STR}/unixodbc)
set(ODBC_HOME ${DEPENDENCY_PATH}/unixodbc)
set(MASSTREE_HOME ${DEPENDENCY_PATH}/masstree/${LIB_UNIFIED_SUPPORT})
set(LCOV_HOME ${BUILDTOOLS_PATH}/gcc${GCC_VERSION_LIT}/gcc/lib/gcc/${HOST_TUPLE})
#############################################################################
# memcheck
#############################################################################
set(MEMCHECK_LIB_PATH ${MEMCHECK_HOME}/gcc${GCC_VERSION}/lib/)
set(GCC_LIB_PATH $ENV{GCC_INSTALL_HOME})
set(MEMCHECK_LIB_PATH $ENV{GCC_INSTALL_HOME}/lib64/)
if("${GCC_LIB_PATH}" STREQUAL "")
set(GCC_LIB_PATH ${BUILDTOOLS_PATH}/gcc${GCC_VERSION_LIT}/gcc)
set(MEMCHECK_HOME ${DEPENDENCY_PATH}/memcheck/${MEMCHECK_BUILD_TYPE})
set(MEMCHECK_LIB_PATH ${MEMCHECK_HOME}/gcc${GCC_VERSION}/lib/)
endif()
#############################################################################
# lcov
@ -206,18 +198,6 @@ set(LIBEDIT_LIB_PATH ${EDIT_HOME}/lib)
set(LIBOBS_INCLUDE_PATH ${OBS_HOME}/include)
set(LIBOBS_LIB_PATH ${OBS_HOME}/lib)
#############################################################################
# orc component
#############################################################################
set(LIBORC_INCLUDE_PATH ${ORC_HOME}/include)
set(LIBORC_LIB_PATH ${ORC_HOME}/lib)
#############################################################################
# parquet component
#############################################################################
set(LIBPARQUET_INCLUDE_PATH ${PARQUET_HOME}/include)
set(LIBPARQUET_LIB_PATH ${PARQUET_HOME}/lib)
#############################################################################
# xml2 component
#############################################################################
@ -259,25 +239,6 @@ set(LIBOPENSSL_LIB_PATH ${OPENSSL_HOME}/lib)
set(LIBOPENSSL_SSL_PATH ${OPENSSL_HOME}/ssl)
set(LIBOPENSSL_INCLUDE_PATH ${OPENSSL_HOME}/include)
#############################################################################
# protobuf component
#############################################################################
set(PROTOBUF_INCLUDE_PATH ${PROTOBUF_HOME}/include)
set(PROTOBUF_LIB_PATH ${PROTOBUF_HOME}/lib)
#############################################################################
# thrift component
#############################################################################
set(LIBTHRIFT_INCLUDE_PATH ${THRIFT_HOME}/include)
set(LIBTHRIFT_LIB_PATH ${THRIFT_HOME}/lib)
set(LIBTHRIFT_BIN_PATH ${THRIFT_HOME}/bin)
#############################################################################
# snappy component
#############################################################################
set(SNAPPY_INCLUDE_PATH ${SNAPPY_HOME}/include)
set(SNAPPY_LIB_PATH ${SNAPPY_HOME}/lib)
#############################################################################
# zlib component
#############################################################################
@ -328,12 +289,6 @@ set(SECURE_LIB_PATH ${SECURE_HOME}/lib)
set(NUMA_INCLUDE_PATH ${NUMA_HOME}/include)
set(NUMA_LIB_PATH ${NUMA_HOME}/lib)
#############################################################################
# arrow component
#############################################################################
set(ARROW_INCLUDE_PATH ${ARROW_HOME}/include)
set(ARROW_LIB_PATH ${ARROW_HOME}/lib)
#############################################################################
# odbc component
#############################################################################

28
configure vendored
View File

@ -5890,8 +5890,7 @@ fi
llvm_version_str='10.0.0'
if [[ ! -z "${with_3rdpartydir}" ]] && [[ "$enable_lite_mode" != yes ]]; then
platstr=$(sh src/get_PlatForm_str.sh)
llvm_version_str=`${with_3rdpartydir}/dependency/${platstr}/llvm/comm/bin/llvm-config --version`
llvm_version_str=`${with_3rdpartydir}/kernel/dependency/llvm/comm/bin/llvm-config --version`
fi
llvm_major_version=$(echo $llvm_version_str | awk -F "." '{print $1}')
llvm_minor_version=$(echo $llvm_version_str | awk -F "." '{print $2}')
@ -6622,18 +6621,17 @@ fi
#
with_jdk=''
if [[ ! -z "${with_3rdpartydir}" ]] && [[ "$with_openeuler_os" != yes ]]; then
platstr=$(sh src/get_PlatForm_str.sh)
cpuarch=$(uname -m)
for d in "openjdk8" "huaweijdk8"; do
$as_echo "$as_me:$LINENO: checking for jdk in ${with_3rdpartydir}/platform/${d}/${cpuarch}" >&5
if [ ! -d "${with_3rdpartydir}/platform/${d}/${cpuarch}" ]; then
$as_echo "$as_me:$LINENO: checking for jdk in ${with_3rdpartydir}/kernel/platform/${d}/${cpuarch}" >&5
if [ ! -d "${with_3rdpartydir}/kernel/platform/${d}/${cpuarch}" ]; then
$as_echo "$as_me:$LINENO: result: no" >&5
continue
fi
for d2 in $(ls "${with_3rdpartydir}/platform/${d}/${cpuarch}" | sort -r 2>/dev/null); do
if [ -f "${with_3rdpartydir}/platform/${d}/${cpuarch}/${d2}/jre/bin/java" ]; then
with_jdk="${with_3rdpartydir}/platform/${d}/${cpuarch}/${d2}"
for d2 in $(ls "${with_3rdpartydir}/kernel/platform/${d}/${cpuarch}" | sort -r 2>/dev/null); do
if [ -f "${with_3rdpartydir}/kernel/platform/${d}/${cpuarch}/${d2}/jre/bin/java" ]; then
with_jdk="${with_3rdpartydir}/kernel/platform/${d}/${cpuarch}/${d2}"
break;
fi
done
@ -6643,15 +6641,15 @@ if [[ ! -z "${with_3rdpartydir}" ]] && [[ "$with_openeuler_os" != yes ]]; then
break;
fi
$as_echo "$as_me:$LINENO: checking for jdk in ${with_3rdpartydir}/platform/${platstr}/${d}" >&5
if [ ! -d "${with_3rdpartydir}/platform/${platstr}/${d}" ]; then
$as_echo "$as_me:$LINENO: checking for jdk in ${with_3rdpartydir}/kernel/platform/${d}" >&5
if [ ! -d "${with_3rdpartydir}/kernel/platform/${d}" ]; then
$as_echo "$as_me:$LINENO: result: no" >&5
continue
fi
for d2 in $(ls "${with_3rdpartydir}/platform/${platstr}/${d}" | sort -r 2>/dev/null); do
if [ -f "${with_3rdpartydir}/platform/${platstr}/${d}/${d2}/jre/bin/java" ]; then
with_jdk="${with_3rdpartydir}/platform/${platstr}/${d}/${d2}"
for d2 in $(ls "${with_3rdpartydir}/kernel/platform/${d}" | sort -r 2>/dev/null); do
if [ -f "${with_3rdpartydir}/kernel/platform/${d}/${d2}/jre/bin/java" ]; then
with_jdk="${with_3rdpartydir}/kernel/platform/${d}/${d2}"
break;
fi
done
@ -29832,8 +29830,8 @@ CFLAGS="-DSTREAMPLAN $CFLAGS"
# force set char to signed char
CFLAGS="-fsigned-char $CFLAGS"
platform_version=`sh ./src/get_PlatForm_str.sh`
is_euler=`echo $platform_version | grep euleros`
release_file=`find /etc/ -name "euleros-release"`
is_euler=`echo $release_file | grep EulerOS`
# New gcc version should be compatible to old one
if test "$CC_VERSION" = "$NEW_GCC"; then
CFLAGS=" -std=c++11 -D_GLIBCXX_USE_CXX11_ABI=0 $CFLAGS -fno-aggressive-loop-optimizations -Wno-attributes -fno-omit-frame-pointer -fno-expensive-optimizations"

View File

@ -4,10 +4,9 @@ set(CMAKE_VERBOSE_MAKEFILE ON)
set(CMAKE_RULE_MESSAGES OFF)
set(CMAKE_SKIP_RPATH TRUE)
INCLUDE_DIRECTORIES(${LIBTHRIFT_INCLUDE_PATH} ${SNAPPY_INCLUDE_PATH} ${CJSON_INCLUDE_PATH} ${BOOST_INCLUDE_PATH})
INCLUDE_DIRECTORIES(${CJSON_INCLUDE_PATH} ${BOOST_INCLUDE_PATH})
set(CMAKE_MODULE_PATH
${CMAKE_CURRENT_SOURCE_DIR}/carbondata
set(CMAKE_MODULE_PATH
${CMAKE_CURRENT_SOURCE_DIR}/gsredistribute
${CMAKE_CURRENT_SOURCE_DIR}/hstore
${CMAKE_CURRENT_SOURCE_DIR}/test_decoding
@ -21,7 +20,6 @@ set(CMAKE_MODULE_PATH
${CMAKE_CURRENT_SOURCE_DIR}/pagehack
${CMAKE_CURRENT_SOURCE_DIR}/pg_xlogdump
${CMAKE_CURRENT_SOURCE_DIR}/file_fdw
${CMAKE_CURRENT_SOURCE_DIR}/hdfs_fdw
${CMAKE_CURRENT_SOURCE_DIR}/log_fdw
${CMAKE_CURRENT_SOURCE_DIR}/gc_fdw
)
@ -40,7 +38,6 @@ add_subdirectory(dummy_seclabel)
add_subdirectory(pagehack)
add_subdirectory(pg_xlogdump)
add_subdirectory(file_fdw)
add_subdirectory(hdfs_fdw)
add_subdirectory(log_fdw)
if("${ENABLE_MULTIPLE_NODES}" STREQUAL "OFF")
add_subdirectory(gc_fdw)

View File

@ -1015,7 +1015,7 @@ static void gcDeparseLockingClause(deparse_expr_cxt* context)
* that DECLARE CURSOR ... FOR UPDATE is supported, which it isn't
* before 8.3.
*/
if (relid == root->parse->resultRelation &&
if (relid == linitial2_int(root->parse->resultRelations) &&
(root->parse->commandType == CMD_UPDATE || root->parse->commandType == CMD_DELETE)) {
/* Relation is UPDATE/DELETE target, so use FOR UPDATE */
appendStringInfoString(buf, " FOR UPDATE");

View File

@ -1,13 +1,13 @@
/*-------------------------------------------------------------------------
*
* option_single.cpp
* option_single.cpp
* FDW option handling for gc_fdw
*
* IDENTIFICATION
* contrib/gc_fdw/option_single.cpp
* contrib/gc_fdw/option_single.cpp
*
*-------------------------------------------------------------------------
*/
*/
#include "postgres.h"
#include "knl/knl_variable.h"
@ -21,11 +21,11 @@
#include "commands/extension.h"
#include "foreign/foreign.h"
#include "gaussdb_version.h"
#include "utils/builtins.h"
PG_FUNCTION_INFO_V1(gc_fdw_validator);
#include "utils/builtins.h"
PG_FUNCTION_INFO_V1(gc_fdw_validator);
Datum gc_fdw_validator(PG_FUNCTION_ARGS)
{
{
PG_RETURN_VOID();
}
}

View File

@ -1,35 +0,0 @@
#This is the main CMAKE for build all components.
add_custom_command(
OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/orc.cpp
COMMAND protoc-c --c_out=${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/orc.proto
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/orc.proto
COMMENT "Now Generating orc.cpp"
)
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_hdfs_fdw_SRC)
set(TGT_hdfs_fdw_INC
${LIBORC_INCLUDE_PATH}
${PROJECT_SRC_DIR}/include
${PROJECT_OPENGS_DIR}/contrib/hdfs_fdw/include
${PROJECT_OPENGS_DIR}/contrib
)
if("${ENABLE_MULTIPLE_NODES}" STREQUAL "ON")
list(APPEND TGT_hdfs_fdw_INC ${PROJECT_TRUNK_DIR}/contrib/carbondata)
endif()
set(hdfs_fdw_DEF_OPTIONS ${MACRO_OPTIONS})
set(hdfs_fdw_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS})
set(hdfs_fdw_LINK_OPTIONS ${BIN_LINK_OPTIONS})
add_static_objtarget(contrib_hdfs_fdw TGT_hdfs_fdw_SRC TGT_hdfs_fdw_INC "${hdfs_fdw_DEF_OPTIONS}" "${hdfs_fdw_COMPILE_OPTIONS}" "${hdfs_fdw_LINK_OPTIONS}")
if("${ENABLE_MULTIPLE_NODES}" STREQUAL "ON")
add_dependencies(contrib_hdfs_fdw carbondata_static)
endif()
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/hdfs_fdw.control
DESTINATION share/postgresql/extension/
)
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/hdfs_fdw--1.0.sql
DESTINATION share/postgresql/extension/
)

View File

@ -1,39 +0,0 @@
# contrib/hdfs_fdw/Makefile
MODULE_big = hdfs_fdw
OBJS = hdfs_fdw.o scheduler.o
SHLIB_LINK = -lz $(shell pkg-config --libs libprotobuf)
top_builddir = ../..
EXTENSION = hdfs_fdw
DATA = hdfs_fdw--1.0.sql hdfs_fdw.control
REGRESS = hdfs_fdw
EXTRA_CLEAN = sql/hdfs_fdw.sql expected/hdfs_fdw.out
ifdef USE_PGXS
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
include $(PGXS)
else
subdir = contrib/hdfs_fdw
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/src/gausskernel/common.mk
override CXXFLAGS += -I$(top_builddir)/contrib/hdfs_fdw/include -I$(LIBCARBONDATA_INCLUDE_PATH) -I$(LIBTHRIFT_INCLUDE_PATH)
install:install-data
.PHONY: install-data
install-data: installdirs
$(INSTALL_DATA) $(addprefix $(srcdir)/, $(DATA)) '$(DESTDIR)$(datadir)/extension/'
installdirs:
$(MKDIR_P) '$(DESTDIR)$(datadir)/extension'
uninstall:uninstall-data
.PHONY: uninstall-data
uninstall-data:
rm -f $(addprefix '$(DESTDIR)$(datadir)/extension'/, $(notdir $(DATA)))
endif

View File

@ -1,23 +0,0 @@
/* contrib/hdfs_fdw/hdfs_fdw--1.0.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION hdfs_fdw" to load this file. \quit
CREATE FUNCTION pg_catalog.hdfs_fdw_handler()
RETURNS fdw_handler
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT NOT FENCED;
CREATE FUNCTION pg_catalog.hdfs_fdw_validator(text[], oid)
RETURNS void
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT NOT FENCED;
CREATE FOREIGN DATA WRAPPER hdfs_fdw
HANDLER hdfs_fdw_handler
VALIDATOR hdfs_fdw_validator;
/* we define the unified FDW named dfs_fdw. The hdfs_fdw will be reserved */
/* in order to compate with older version. */
CREATE FOREIGN DATA WRAPPER dfs_fdw
HANDLER hdfs_fdw_handler
VALIDATOR hdfs_fdw_validator;

View File

@ -1,5 +0,0 @@
# hdfs_fdw extension
comment = 'foreign-data wrapper for flat file access'
default_version = '1.0'
module_pathname = '$libdir/hdfs_fdw'
relocatable = true

File diff suppressed because it is too large Load Diff

View File

@ -1,75 +0,0 @@
#ifndef HDFS_FDW_h
#define HDFS_FDW_h
#include "access/dfs/dfs_am.h"
#include "foreign/foreign.h"
#include "nodes/execnodes.h"
#include "nodes/pg_list.h"
#define HDFS_TUPLE_COST_MULTIPLIER 5
typedef enum OptionType {
T_INVALID_TYPE = 0x0000,
T_SERVER_COMMON_OPTION = 0x0001 << 1,
T_OBS_SERVER_OPTION = 0x0001 << 2,
T_HDFS_SERVER_OPTION = 0x0001 << 3,
T_DUMMY_SERVER_OPTION = 0x0001 << 4,
T_FOREIGN_TABLE_COMMON_OPTION = 0x0001 << 5,
T_FOREIGN_TABLE_OBS_OPTION = 0x0001 << 6,
T_FOREIGN_TABLE_HDFS_OPTION = 0x0001 << 7,
T_SERVER_TYPE_OPTION = 0x0001 << 8,
T_FOREIGN_TABLE_TEXT_OPTION = 0x0001 << 9,
T_FOREIGN_TABLE_CSV_OPTION = 0x0001 << 10
} OptionType;
#define OBS_SERVER_OPTION (T_SERVER_COMMON_OPTION | T_OBS_SERVER_OPTION)
#define HDFS_SERVER_OPTION (T_SERVER_COMMON_OPTION | T_HDFS_SERVER_OPTION)
#define DUMMY_SERVER_OPTION (T_SERVER_COMMON_OPTION | T_DUMMY_SERVER_OPTION)
#define OBS_FOREIGN_TABLE_OPTION \
(T_FOREIGN_TABLE_COMMON_OPTION | T_FOREIGN_TABLE_OBS_OPTION | T_FOREIGN_TABLE_CSV_OPTION | \
T_FOREIGN_TABLE_TEXT_OPTION)
#define HDFS_FOREIGN_TABLE_OPTION \
(T_FOREIGN_TABLE_COMMON_OPTION | T_FOREIGN_TABLE_HDFS_OPTION | T_FOREIGN_TABLE_CSV_OPTION | \
T_FOREIGN_TABLE_TEXT_OPTION)
#define SERVER_TYPE_OPTION (T_SERVER_TYPE_OPTION)
#define DFS_OPTION_ARRAY \
(T_SERVER_COMMON_OPTION | T_HDFS_SERVER_OPTION | T_OBS_SERVER_OPTION | T_DUMMY_SERVER_OPTION | \
HDFS_FOREIGN_TABLE_OPTION | OBS_FOREIGN_TABLE_OPTION)
#define SERVER_TYPE_OPTION_ARRAY (T_SERVER_TYPE_OPTION)
/*
* HdfsValidOption keeps an option name and a context. When an option is passed
* into hdfs_fdw objects (server and foreign table), we compare this option's
* name and context against those of valid options.
*/
typedef struct HdfsValidOption {
const char* optionName;
uint32 optType;
} HdfsValidOption;
/*
* FDW-specific information for RelOptInfo.fdw_private.
*/
typedef struct HdfsFdwPlanState {
double tuplesCount; /* estimate of number of rows in file */
HdfsFdwOptions* hdfsFdwOptions;
} HdfsFdwPlanState;
/*
* HdfsFdwExecState keeps foreign data wrapper specific execution state that we
* create and hold onto when executing the query.
*/
typedef struct HdfsFdwExecState {
dfs::reader::ReaderState* readerState;
dfs::reader::Reader* fileReader;
} HdfsFdwExecState;
/* Function declarations for foreign data wrapper */
extern "C" Datum hdfs_fdw_handler(PG_FUNCTION_ARGS);
extern "C" Datum hdfs_fdw_validator(PG_FUNCTION_ARGS);
#endif /* hdfs_fdw_H */

View File

@ -1,71 +0,0 @@
#ifndef SCHEDULER_H_
#define SCHEDULER_H_
#include "hdfs_fdw.h"
#include "foreign/foreign.h"
#include "nodes/execnodes.h"
#include "nodes/pg_list.h"
#include <stdio.h>
#include <stdlib.h>
#define PGXC_NODE_ID 9015
#define MAX_FOREIGN_PARTITIONKEY_NUM 4
#define MASK_HIGH_UINT32(x) (x & (uint64)0xFFFFFFFF)
#define GETIP(x) ((uint32)(x & (uint64)0xFFFFFFFF))
#define GETOID(x) ((uint32)(x >> 32))
#define LOCAL_IP "127.0.0.1"
#define LOCAL_HOST "localhost"
#define MAX_HOST_NAME_LENGTH 255
#define EXTRA_IP_NUM 32
#define MAX_ROUNDROBIN_AVAILABLE_DN_NUM 256
#define MAX_UINT32 (0xFFFFFFFF)
typedef struct dnWork {
Oid nodeOid;
List* toDoList;
} dnWork;
typedef struct dnInfoStat {
uint32 ipAddr;
uint32 Start;
uint32 Cnt;
} dnInfoStat;
/*
* This is executed on CN to schedule the files to each dn for load balance and use cache.
* add param isAnalyze because the func CNScheduling has called by foreign scan which get random dn for schedule,
* the random dn also changed, we should get a determinded dn to get single stats for global stats in order to get
* stable stats.
*/
List* CNScheduling(Oid foreignTableId, Index relId, List* columnList, List* scanClauses, List*& prunningResult,
List*& partList, char locatorType, bool isAnalyze, List* allColumnList, int16 attrNum, int64* fileNum);
/* CNSchedulingForAnalyze
* firstly calculate the files to be analyzed,and because we just do analyze in one datanode,
* the analyze files is about (total files) /(datanode number)
* we will find the suitable datanode to do the anlayze and return
* add param isglbstats in order to discriminate the processing of single stats and global stats.
* we only get files in one dn for single stats, and we must get files in all dns for global stats.
*/
List* CNSchedulingForAnalyze(unsigned int* totalFilesNum, unsigned int* numOfDns, Oid foreignTableId, bool isglbstats);
/*
* Assign the list of splits scheduled by CN to the current data node.
* @_in_param splitToDnMap: The list of all the splits which map to each data node.
* @_in_out_param readerState: The state stores the splits of the current data node.
* @_in_param conn: The connector of dfs(hdfs or obs)
*/
void AssignSplits(List* splitToDnMap, dfs::reader::ReaderState* readerState, dfs::DFSConnector* conn);
/*
* Search the catalog pg_partition to build a partition list to return.
*
* @_in param relid: The oid of the foreign table by which we search the partition list from the
* catalog.
* @return Return the partition list we get.
*/
List* GetPartitionList(Oid relid);
#endif /* SCHEDULER_H_ */

View File

@ -1,23 +0,0 @@
#!/bin/bash
#Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
#openGauss is licensed under Mulan PSL v2.
#You can use this software according to the terms and conditions of the Mulan PSL v2.
#You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
#-------------------------------------------------------------------------
#
# init.sh
#
# IDENTIFICATION
# contrib/hdfs_fdw/init.sh
#
#-------------------------------------------------------------------------
protoc-c --c_out=. orc.proto

View File

@ -1,149 +0,0 @@
message IntegerStatistics {
optional sint64 minimum = 1;
optional sint64 maximum = 2;
optional sint64 sum = 3;
}
message DoubleStatistics {
optional double minimum = 1;
optional double maximum = 2;
optional double sum = 3;
}
message StringStatistics {
optional string minimum = 1;
optional string maximum = 2;
}
message BucketStatistics {
repeated uint64 count = 1 [packed=true];
}
message DecimalStatistics {
optional string minimum = 1;
optional string maximum = 2;
optional string sum = 3;
}
message DateStatistics {
// min,max values saved as days since epoch
optional sint32 minimum = 1;
optional sint32 maximum = 2;
}
message ColumnStatistics {
optional uint64 numberOfValues = 1;
optional IntegerStatistics intStatistics = 2;
optional DoubleStatistics doubleStatistics = 3;
optional StringStatistics stringStatistics = 4;
optional BucketStatistics bucketStatistics = 5;
optional DecimalStatistics decimalStatistics = 6;
optional DateStatistics dateStatistics = 7;
}
message RowIndexEntry {
repeated uint64 positions = 1 [packed=true];
optional ColumnStatistics statistics = 2;
}
message RowIndex {
repeated RowIndexEntry entry = 1;
}
message OrcStream {
// if you add new index stream kinds, you need to make sure to update
// StreamName to ensure it is added to the stripe in the right area
enum Kind {
PRESENT = 0;
DATA = 1;
LENGTH = 2;
DICTIONARY_DATA = 3;
DICTIONARY_COUNT = 4;
SECONDARY = 5;
ROW_INDEX = 6;
}
required Kind kind = 1;
optional uint32 column = 2;
optional uint64 length = 3;
}
message ColumnEncoding {
enum Kind {
DIRECT = 0;
DICTIONARY = 1;
DIRECT_V2 = 2;
DICTIONARY_V2 = 3;
}
required Kind kind = 1;
optional uint32 dictionarySize = 2;
}
message StripeFooter {
repeated OrcStream streams = 1;
repeated ColumnEncoding columns = 2;
}
message FieldType {
enum Kind {
BOOLEAN = 0;
BYTE = 1;
SHORT = 2;
INT = 3;
LONG = 4;
FLOAT = 5;
DOUBLE = 6;
STRING = 7;
BINARY = 8;
TIMESTAMP = 9;
LIST = 10;
MAP = 11;
STRUCT = 12;
UNION = 13;
DECIMAL = 14;
DATE = 15;
}
required Kind kind = 1;
repeated uint32 subtypes = 2 [packed=true];
repeated string fieldNames = 3;
}
message StripeInformation {
optional uint64 offset = 1;
optional uint64 indexLength = 2;
optional uint64 dataLength = 3;
optional uint64 footerLength = 4;
optional uint64 numberOfRows = 5;
}
message UserMetadataItem {
required string name = 1;
required bytes value = 2;
}
message Footer {
optional uint64 headerLength = 1;
optional uint64 contentLength = 2;
repeated StripeInformation stripes = 3;
repeated FieldType types = 4;
repeated UserMetadataItem metadata = 5;
optional uint64 numberOfRows = 6;
repeated ColumnStatistics statistics = 7;
optional uint32 rowIndexStride = 8;
}
enum CompressionKind {
NONE = 0;
ZLIB = 1;
SNAPPY = 2;
LZO = 3;
}
// Serialized length must be less that 255 bytes
message PostScript {
optional uint64 footerLength = 1;
optional CompressionKind compression = 2;
optional uint64 compressionBlockSize = 3;
repeated uint32 version = 4 [packed = true];
// Leave this last in the record
optional string magic = 8000;
}

File diff suppressed because it is too large Load Diff

View File

@ -285,12 +285,12 @@ static const unsigned int crc32tab[256] = {
unsigned int crc32_sz(char* buf, int size)
{
unsigned int crc = ~((unsigned int)0);
char* p = NULL;
unsigned char* p = NULL;
int len, nr;
len = 0;
nr = size;
for (len += nr, p = buf; nr--; ++p)
for (len += nr, p = (unsigned char*)buf; nr--; ++p)
_CRC32_(crc, *p);
return ~crc;
}

View File

@ -397,6 +397,7 @@ Datum hstore_from_text(PG_FUNCTION_ARGS)
p.needfree = false;
key = PG_GETARG_TEXT_PP(0);
FUNC_CHECK_HUGE_POINTER(false, key, "tconvert()");
p.key = VARDATA_ANY(key);
p.keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(key));
@ -405,6 +406,7 @@ Datum hstore_from_text(PG_FUNCTION_ARGS)
p.isnull = true;
} else {
val = PG_GETARG_TEXT_PP(1);
FUNC_CHECK_HUGE_POINTER(false, val, "tconvert()");
p.val = VARDATA_ANY(val);
p.vallen = hstoreCheckValLen(VARSIZE_ANY_EXHDR(val));
p.isnull = false;

View File

@ -105,6 +105,13 @@ static void pg_decode_startup(LogicalDecodingContext* ctx, OutputPluginOptions*
data->only_local = true;
data->tableWhiteList = NIL;
/* read default option from GUC */
DecodeOptionsDefault *defaultOption = LogicalDecodeGetOptionsDefault();
if (defaultOption != NULL) {
data->max_txn_in_memory = defaultOption->max_txn_in_memory;
data->max_reorderbuffer_in_memory = defaultOption->max_reorderbuffer_in_memory;
}
ctx->output_plugin_private = data;
opt->output_type = OUTPUT_PLUGIN_TEXTUAL_OUTPUT;

View File

@ -1,46 +1,46 @@
#
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ---------------------------------------------------------------------------------------
#
# Makefile
# Makefile for the mysql_fdw
#
# IDENTIFICATION
# contrib/mysql_fdw/Makefile
#
# ---------------------------------------------------------------------------------------
all:mysql_fdw_target
install:install-data
top_builddir ?= ../../
include $(top_builddir)/src/Makefile.global
CODE_DIR=mysql_fdw
.PHONY: mysql_fdw_target
mysql_fdw_target: prep_checked
@make -C $(MYFDW_HOME) TOP_DIR=$(abs_top_srcdir)
prep_checked:
@test -d $(MYFDW_HOME) || ( echo "ERROR: You need fetch and copy mysql_fdw from 'third_party' repo to 'third_party_binarylibs' repo and keep directory strucutre unchanged" && exit 1 )
@test -d $(MYFDW_HOME) && date > prep_checked
.PHONY: install-data
install-data: mysql_fdw_target
@make -C $(MYFDW_HOME) TOP_DIR=$(abs_top_srcdir) install
uninstall distclean clean:
@rm -rf $(MYFDW_HOME)/*.o
@rm -rf $(MYFDW_HOME)/*.so
@rm -f prep_checked
MYSQL_FDW_RELEVANT_SOURCES = connection.c deparse.c mysql_fdw.c mysql_query.c option.c
#
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ---------------------------------------------------------------------------------------
#
# Makefile
# Makefile for the mysql_fdw
#
# IDENTIFICATION
# contrib/mysql_fdw/Makefile
#
# ---------------------------------------------------------------------------------------
all:mysql_fdw_target
install:install-data
top_builddir ?= ../../
include $(top_builddir)/src/Makefile.global
CODE_DIR=mysql_fdw
.PHONY: mysql_fdw_target
mysql_fdw_target: prep_checked
@make -C $(MYFDW_HOME) TOP_DIR=$(abs_top_srcdir)
prep_checked:
@test -d $(MYFDW_HOME) || ( echo "ERROR: You need fetch and copy mysql_fdw from 'third_party' repo to 'third_party_binarylibs' repo and keep directory strucutre unchanged" && exit 1 )
@test -d $(MYFDW_HOME) && date > prep_checked
.PHONY: install-data
install-data: mysql_fdw_target
@make -C $(MYFDW_HOME) TOP_DIR=$(abs_top_srcdir) install
uninstall distclean clean:
@rm -rf $(MYFDW_HOME)/*.o
@rm -rf $(MYFDW_HOME)/*.so
@rm -f prep_checked
MYSQL_FDW_RELEVANT_SOURCES = connection.c deparse.c mysql_fdw.c mysql_query.c option.c

View File

@ -11,13 +11,13 @@ if(${ENABLE_DEBUG} STREQUAL "ON")
endif()
set(pagehack_COMPILE_OPTIONS ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${CHECK_OPTIONS} ${BIN_SECURE_OPTIONS} ${OPTIMIZE_OPTIONS})
set(pagehack_LINK_OPTIONS ${BIN_LINK_OPTIONS})
set(pagehack_LINK_LIBS -lpgport -lcrypt -ldl -lm -ledit -lssl -lcrypto -lsecurec -lrt -lz -lminiunz -lzstd -lpagecompression)
set(pagehack_LINK_LIBS -lpgport -lcrypt -ldl -lm -ledit -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz -lzstd -lpagecompression -llz4)
add_bintarget(pagehack TGT_pagehack_SRC TGT_pagehack_INC "${pagehack_DEF_OPTIONS}" "${pagehack_COMPILE_OPTIONS}" "${pagehack_LINK_OPTIONS}" "${pagehack_LINK_LIBS}")
add_dependencies(pagehack pgport_static pagecompression)
target_link_directories(pagehack PUBLIC
${LIBOPENSSL_LIB_PATH} ${PROTOBUF_LIB_PATH} ${LIBPARQUET_LIB_PATH} ${LIBCURL_LIB_PATH} ${SECURE_LIB_PATH}
${LIBOPENSSL_LIB_PATH} ${LIBCURL_LIB_PATH} ${SECURE_LIB_PATH}
${ZLIB_LIB_PATH} ${LIBOBS_LIB_PATH} ${LIBEDIT_LIB_PATH} ${LIBCGROUP_LIB_PATH} ${CMAKE_BINARY_DIR}/lib
${ZSTD_LIB_PATH} ${PROJECT_SRC_DIR}/lib/page_compression
${ZSTD_LIB_PATH} ${LZ4_LIB_PATH} ${PROJECT_SRC_DIR}/lib/page_compression
)
install(TARGETS pagehack RUNTIME DESTINATION bin)

View File

@ -16,7 +16,7 @@ top_builddir = ../..
include $(top_builddir)/src/Makefile.global
override CPPFLAGS += -I${top_builddir}/src/lib/page_compression
override LDFLAGS += -L${top_builddir}/src/lib/page_compression
override CFLAGS += -lpagecompression -lzstd
override CFLAGS += -lpagecompression -lzstd -llz4
ifeq ($(enable_debug), yes)
PG_CPPFLAGS += -DDEBUG
endif

View File

@ -46,6 +46,7 @@
#include <time.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <new>
#include "access/heapam.h"
#include "access/htup.h"
@ -141,7 +142,15 @@ static const char* PgHeapRelName[] = {"pg_class",
"pg_am",
"pg_statistic",
"pg_toast"};
typedef enum SegmentType { SEG_HEAP, SEG_FSM, SEG_UHEAP, SEG_INDEX_BTREE, SEG_UNDO, SEG_UNKNOWN } SegmentType;
typedef enum SegmentType {
SEG_HEAP,
SEG_FSM,
SEG_UHEAP,
SEG_INDEX_BTREE,
SEG_UNDO,
SEG_UNKNOWN
} SegmentType;
static void ParsePgClassTupleData(binary tupdata, int len, binary nullBitmap, int natrrs);
static void ParsePgIndexTupleData(binary tupdata, int len, binary nullBitmap, int nattrs);
@ -1055,7 +1064,8 @@ static void ParsePgCudescXXTupleData(binary tupdata, int len, binary nullBitmap,
char* nextAttr = (char*)tupdata;
bool isnulls[nattrs];
memset(isnulls, false, nattrs);
errno_t rc = memset_s(isnulls, nattrs, false, nattrs);
securec_check_c(rc, "\0", "\0");
if (NULL != nullBitmap) {
datlen = (nattrs + 7) / 8;
int j = 0;
@ -2360,12 +2370,14 @@ static void parse_uheap_item(const Item item, unsigned len, int blkno, int linen
indentLevel = 3;
errno_t rc = snprintf_s(buffer, 128, 127, "\t\t\txid:%d, td:%d locker_td:%d\n",
utuple->xid, utuple->td_id, utuple->locker_td_id);
errno_t rc = snprintf_s(buffer, 128, 127, "\t\t\txid:%u, td:%d locker_td:%d\n",
utuple->xid, utuple->td_id, utuple->reserved);
securec_check(rc, "\0", "\0");
fprintf(stdout, "%s", buffer);
fprintf(stdout, "\t\t\tNumber of columns: %d\n", UHeapTupleHeaderGetNatts(utuple));
fprintf(stdout, "\t\t\tFlag: %d\n", utuple->flag);
fprintf(stdout, "\t\t\tFlag2: %d\n", utuple->flag2);
fprintf(stdout, "\t\t\tt_hoff: %d\n", utuple->t_hoff);
if (utuple->flag & UHEAP_HAS_NULL) {
fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_HASNULL ");
@ -2705,7 +2717,7 @@ static void ParseUHeapPageHeader(const PageHeader page, int blkno, int blknum)
fprintf(stdout, "\tpd_xid_base: %lu, pd_multi_base: %lu\n",
upage->pd_xid_base, upage->pd_multi_base);
if (upage->pd_upper < upage->pd_lower) {
fprintf(stdout, "WARNING: INVALID PAGE!");
fprintf(stdout, "WARNING: INVALID PAGE!\n");
} else {
freeSpace = upage->pd_upper - upage->pd_lower;
g_freeMax = freeSpace > g_freeMax ? freeSpace : g_freeMax;
@ -2890,7 +2902,7 @@ static void parse_heap_or_index_page(const char* buffer, int blkno, SegmentType
}
fprintf(stdout, "\n\tUHeap tuple information on this page\n");
for (i = FirstOffsetNumber; i <= nline; i++) {
rowptr = UPageGetRowPtr(buffer, i);
rowptr = UPageGenerateRowPtr(buffer, i);
if (RowPtrIsUsed(rowptr)) {
if (RowPtrHasStorage(rowptr))
nstorage++;
@ -3058,7 +3070,7 @@ static int parse_a_page(const char* buffer, int blkno, int blknum, SegmentType t
if (PageIsNew(page)) {
fprintf(stdout, "Page information of block %d/%d : new page\n\n", blkno, blknum);
ParseHeapPageHeader(page, blkno, blknum);
ParsePageHeader(page, blkno, blknum, type);
return true;
}
@ -3088,10 +3100,10 @@ static BlockNumber CalculateMaxBlockNumber(BlockNumber blknum, BlockNumber start
{
/* parse */
if (start >= blknum) {
fprintf(stderr, "start point exceeds the total block number of relation.\n");
(void)fprintf(stderr, "start point exceeds the total block number of relation.\n");
return InvalidBlockNumber;
} else if ((start + number) > blknum) {
fprintf(stderr, "don't have %d blocks from block %d in the relation, only %d blocks\n", number, start,
(void)fprintf(stderr, "don't have %u blocks from block %u in the relation, only %u blocks\n", number, start,
(blknum - start));
number = blknum;
} else if (number == 0) {
@ -3104,11 +3116,11 @@ static BlockNumber CalculateMaxBlockNumber(BlockNumber blknum, BlockNumber start
static void MarkBufferDirty(char *buffer, size_t len)
{
int writeLen = len / 2;
size_t writeLen = len / 2;
unsigned char fill_byte[writeLen] = {0xFF};
for (int i = 0; i < writeLen; i++)
for (size_t i = 0; i < writeLen; i++)
fill_byte[i] = 0xFF;
auto rc = memcpy_s(buffer + writeLen, BLCKSZ - writeLen, fill_byte, writeLen);
auto rc = memcpy_s(buffer + writeLen, len - writeLen, fill_byte, writeLen);
securec_check(rc, "", "");
}
@ -3117,9 +3129,13 @@ static int parse_page_file(const char *filename, SegmentType type, const uint32
if (type != SEG_HEAP && type != SEG_INDEX_BTREE) {
return parse_uncompressed_page_file(filename, type, start_point, number_read);
}
auto pageCompression = new PageCompression();
if (pageCompression->Init(filename, MAXPGPATH, SegNo) != SUCCESS) {
PageCompression *pageCompression = new(std::nothrow) PageCompression();
if (pageCompression == NULL) {
fprintf(stderr, "compression page new failed\n");
return false;
}
if (pageCompression->Init(filename, (BlockNumber)SegNo) != SUCCESS) {
delete pageCompression;
return parse_uncompressed_page_file(filename, type, start_point, number_read);
}
@ -3134,15 +3150,15 @@ static int parse_page_file(const char *filename, SegmentType type, const uint32
char compressed[BLCKSZ];
char decompressed[BLCKSZ];
while (start < number) {
auto compressedSize = pageCompression->ReadCompressedBuffer(start, compressed, BLCKSZ);
size_t compressedSize = pageCompression->ReadCompressedBuffer(start, compressed, BLCKSZ);
if (compressedSize == 0) {
fprintf(stderr, "read block %d failed, filename: %s_pcd: %s\n", start, filename, strerror(errno));
fprintf(stderr, "read block %u failed, filename: %s: %s\n", start, filename, strerror(errno));
delete pageCompression;
return false;
}
char *parseFile = NULL;
if (compressedSize < BLCKSZ) {
pageCompression->DecompressedPage(compressed, decompressed);
(void)pageCompression->DecompressedPage(compressed, decompressed);
parseFile = decompressed;
} else {
parseFile = compressed;
@ -3157,7 +3173,7 @@ static int parse_page_file(const char *filename, SegmentType type, const uint32
MarkBufferDirty(parseFile, BLCKSZ);
}
if (!pageCompression->WriteBackUncompressedData(compressed, compressedSize, parseFile, BLCKSZ, start)) {
fprintf(stderr, "write back failed, filename: %s_pcd: %s\n", filename, strerror(errno));
fprintf(stderr, "write back failed, filename: %s: %s\n", filename, strerror(errno));
delete pageCompression;
return false;
}
@ -3236,7 +3252,8 @@ static int parse_uncompressed_page_file(const char *filename, SegmentType type,
unsigned char fill_byte[4096] = {0xFF};
for (int i = 0; i < 4096; i++)
fill_byte[i] = 0xFF;
memcpy(buffer + 4096, fill_byte, 4096);
errno_t rc = memcpy_s(buffer + 4096, BLCKSZ - 4096, fill_byte, 4096);
securec_check_c(rc, "\0", "\0");
}
fseek(fd, (start * BLCKSZ), SEEK_SET);
fwrite(buffer, 1, BLCKSZ, fd);
@ -3718,7 +3735,8 @@ static int parse_cu_file(char* filename, uint64 offset)
seg_num = offset / (RELSEG_SIZE * BLCKSZ);
seg_offset = offset % (RELSEG_SIZE * BLCKSZ);
sprintf(fullpath, "%s.%d", filename, seg_num);
errno_t rc = snprintf_s(fullpath, sizeof(fullpath), sizeof(fullpath) - 1, "%s.%d", filename, seg_num);
securec_check_ss_c(rc, "\0", "\0");
if (NULL == (fd = fopen(fullpath, "rb"))) {
fprintf(stderr, "%s: %s\n", fullpath, strerror(errno));
@ -3970,7 +3988,9 @@ static int parse_pg_control_file(char* filename)
* Format system_identifier separately to keep platform-dependent format
* code out of the translatable message string.
*/
snprintf(sysident_str, sizeof(sysident_str), UINT64_FORMAT, ControlFile.system_identifier);
errno_t rc = snprintf_s(sysident_str, sizeof(sysident_str), sizeof(sysident_str) - 1, UINT64_FORMAT,
ControlFile.system_identifier);
securec_check_ss_c(rc, "\0", "\0");
fprintf(stdout, "pg_control version number: %u\n", ControlFile.pg_control_version);
@ -4366,26 +4386,37 @@ static bool parse_dw_file(const char* file_name, uint32 start_page, uint32 page_
size_t result;
uint32 dw_batch_page_num;
dw_file_head_t file_head;
char meta_path[PATH_MAX];
char cur_dir[PATH_MAX];
char* meta_name;
char meta_full_path[PATH_MAX];
char meta_name_tmp[PATH_MAX];
dw_batch_meta_file* batch_meta_file;
char* meta_buf = NULL;
char* dw_buf = NULL;
rc = strcpy_s(cur_dir, PATH_MAX, file_name);
/* copy the full path of dw file to meta_full_path */
if (realpath(file_name, meta_full_path) == NULL && file_name[0] == '\0') {
fprintf(stderr, "could not get correct path or the absolute path is too long!\n");
return false;
}
/* extract the path dir of dw file, which is the dir of meta file */
(void)dirname(meta_full_path);
/* extract the meta name from DW_META_FILE */
rc = strcpy_s(meta_name_tmp, PATH_MAX, DW_META_FILE);
securec_check(rc, "", "");
(void)dirname(cur_dir);
rc = strcpy_s(meta_path, PATH_MAX, cur_dir);
meta_name = basename(meta_name_tmp);
/* fetch the full meta path with above two parts */
rc = strcat_s(meta_full_path, PATH_MAX, "/");
securec_check(rc, "", "");
rc = strcat_s(meta_path, PATH_MAX, "\\");
securec_check(rc, "", "");
rc = strcat_s(meta_path, PATH_MAX, DW_META_FILE);
rc = strcat_s(meta_full_path, PATH_MAX, meta_name);
securec_check(rc, "", "");
fd = fopen(meta_path, "rb+");
fd = fopen(meta_full_path, "rb+");
if (fd == NULL) {
fprintf(stderr, "%s: %s\n", meta_path, strerror(errno));
fprintf(stderr, "%s: %s\n", meta_full_path, strerror(errno));
return false;
}
@ -4400,7 +4431,7 @@ static bool parse_dw_file(const char* file_name, uint32 start_page, uint32 page_
if (result != 1) {
free(meta_buf);
fclose(fd);
fprintf(stderr, "read %s: %s\n", meta_path, strerror(errno));
fprintf(stderr, "read %s: %s\n", meta_full_path, strerror(errno));
return false;
}
@ -4822,11 +4853,15 @@ static int ParseUndoZoneMeta(const char *filename, int zid)
uspMetaInfo = (UndoZoneMetaInfo *) (uspMetaBuffer + offset * sizeof(UndoZoneMetaInfo));
if ((zid == INVALID_ZONE_ID) || (zid != INVALID_ZONE_ID && zid == zoneId)) {
fprintf(stdout,
"zid=%d, insert=%lu, discard=%lu, forcediscard=%lu, allocate=%lu, recycle=%lu, recyclexid=%lu, "
"lsn=%lu.\n",
zoneId, UNDO_PTR_GET_OFFSET(uspMetaInfo->insert), UNDO_PTR_GET_OFFSET(uspMetaInfo->discard),
UNDO_PTR_GET_OFFSET(uspMetaInfo->forceDiscard), UNDO_PTR_GET_OFFSET(uspMetaInfo->allocate),
UNDO_PTR_GET_OFFSET(uspMetaInfo->recycle), uspMetaInfo->recycleXid, uspMetaInfo->lsn);
"zid=%d, insertURecPtr=%lu, discardURecPtr=%lu, forcediscardURecPtr=%lu, allocateTSlotPtr=%lu, "
"recycleTSlotPtr=%lu, recyclexid=%lu, lsn=%lu.\n",
zoneId,
UNDO_PTR_GET_OFFSET(uspMetaInfo->insertURecPtr),
UNDO_PTR_GET_OFFSET(uspMetaInfo->discardURecPtr),
UNDO_PTR_GET_OFFSET(uspMetaInfo->forceDiscardURecPtr),
UNDO_PTR_GET_OFFSET(uspMetaInfo->allocateTSlotPtr),
UNDO_PTR_GET_OFFSET(uspMetaInfo->recycleTSlotPtr),
uspMetaInfo->recycleXid, uspMetaInfo->lsn);
if (zid != INVALID_ZONE_ID) {
break;
@ -4928,6 +4963,7 @@ static int ParseUndoSlot(const char *filename)
}
for (uint32 loop = 0; loop < UNDO_META_SEG_SIZE; loop++) {
int flag = 0;
seekpos = (off_t)BLCKSZ * loop;
lseek(fd, seekpos, SEEK_SET);
rc = memset_s(buffer, BLCKSZ, 0, BLCKSZ);
@ -4942,13 +4978,32 @@ static int ParseUndoSlot(const char *filename)
fprintf(stdout, "Block %u, LSN (%X/%X)\n", loop, (uint32)(PageGetLSN(buffer) >> 32),
(uint32)PageGetLSN(buffer));
if (PageIsNew(buffer)) {
continue;
}
for (uint32 offset = UNDO_LOG_BLOCK_HEADER_SIZE; offset < BLCKSZ - MAXALIGN(sizeof(TransactionSlot));
offset += MAXALIGN(sizeof(TransactionSlot))) {
slot = (TransactionSlot *) (buffer + offset);
if (slot->XactId() != InvalidTransactionId || slot->StartUndoPtr() != INVALID_UNDO_REC_PTR) {
if (flag > 0) {
uint32 tempOffset = offset - (uint32)flag * MAXALIGN(sizeof(TransactionSlot));
TransactionSlot *tempSlot = NULL;
fprintf(stdout, "WARNING: invalid slot num %d.\n", flag);
for (int i = 0; i < flag; i++) {
tempOffset += MAXALIGN(sizeof(TransactionSlot));
tempSlot = (TransactionSlot *) (buffer + tempOffset);
fprintf(stdout,
"offset=%u, xid=%lu, startptr=%lu, endptr=%lu, dbid=%u, rollback finish=%d.\n",
tempOffset, tempSlot->XactId(), tempSlot->StartUndoPtr(), tempSlot->EndUndoPtr(),
tempSlot->DbId(), !(tempSlot->NeedRollback()));
}
flag = 0;
}
fprintf(stdout, "offset=%u, xid=%lu, startptr=%lu, endptr=%lu, dbid=%u, rollback finish=%d.\n",
offset, slot->XactId(), slot->StartUndoPtr(), slot->EndUndoPtr(),
slot->DbId(), !(slot->NeedRollback()));
} else {
flag++;
}
}
}
@ -4964,26 +5019,40 @@ typedef struct UndoHeader {
UndoRecordOldTd wtd_;
UndoRecordPartition wpart_;
UndoRecordTablespace wtspc_;
StringInfoData rawdata_;
} UndoHeader;
char g_dir[100] = {0};
typedef struct UHeapDiskTupleDataHeader {
ShortTransactionId xid;
uint16 td_id : 8, reserved : 8; /* Locker as well as the last updater, 8 bits each */
uint16 flag; /* Flag for tuple attributes */
uint16 flag2; /* Number of attributes for now(11 bits) */
uint8 t_hoff; /* header incl. bitmap, padding */
} UHeapDiskTupleDataHeader;
char g_dir[MAX_PATH_LEN] = {0};
static int OpenUndoBlock(int zoneId, BlockNumber blockno)
{
char fileName[100] = {0};
char fileName[MAX_PATH_LEN] = {0};
const int idLen = 13;
errno_t rc = EOK;
int segno = blockno / UNDOSEG_SIZE;
rc = snprintf_s(fileName, sizeof(fileName), sizeof(fileName), g_dir);
rc = snprintf_s(fileName, sizeof(fileName), sizeof(fileName) - 1, g_dir);
securec_check(rc, "\0", "\0");
rc = snprintf_s(fileName + strlen(fileName), sizeof(fileName), sizeof(fileName), "%05X.%07zX", zoneId, segno);
if (strlen(g_dir) + idLen >= MAX_PATH_LEN) {
fprintf(stdout, "ERROR: path is too long, MAX_PATH_LEN %d, path len %lu.\n", MAX_PATH_LEN, strlen(g_dir));
}
rc = snprintf_s(fileName + strlen(fileName), sizeof(fileName) - strlen(fileName),
sizeof(fileName) - strlen(fileName) - 1, "%05X.%07zX", zoneId, segno);
securec_check(rc, "\0", "\0");
int fd = open(fileName, O_RDONLY | PG_BINARY, S_IRUSR | S_IWUSR);
if (fd < 0) {
fprintf(stderr, "Open file(%s), return code desc(%s).\n", UNDO_META_FILE, strerror(errno));
return-1;
fprintf(stderr, "Open file(%s), return code desc(%s).\n", fileName, strerror(errno));
return -1;
}
return fd;
@ -5057,69 +5126,191 @@ bool ReadUndoRecord(UndoHeader *urec, char *buffer, int startingByte, int *alrea
&readptr, endptr, &myBytesRead, alreadyRead)) {
return false;
}
}
urec->rawdata_.len = urec->wpay_.payloadlen;
if (urec->rawdata_.len > 0) {
if (urec->rawdata_.data == NULL) {
urec->rawdata_.data = (char *)malloc(urec->rawdata_.len);
if (NULL == urec->rawdata_.data) {
fprintf(stderr, "out of memory\n");
return false;
}
}
if (!ReadUndoBytes((char *)urec->rawdata_.data, urec->rawdata_.len,
&readptr, endptr, &myBytesRead, alreadyRead)) {
return false;
}
}
}
return true;
}
static bool ParseUndoRecord(UndoRecPtr urp)
static bool ParseUndoRecord(UndoRecPtr urp, bool forward = false)
{
char buffer[BLCKSZ] = {'\0'};
BlockNumber blockno = UNDO_PTR_GET_BLOCK_NUM(urp);
int zoneId = UNDO_PTR_GET_ZONE_ID(urp);
int startingByte = UNDO_PTR_GET_PAGE_OFFSET(urp);
int fd = -1;
int alreadyRead = 0;
off_t seekpos;
errno_t rc = EOK;
uint32 ret = 0;
UndoHeader *urec = (UndoHeader *)malloc(sizeof(UndoHeader));
UndoRecPtr blkprev = INVALID_UNDO_REC_PTR;
rc = memset_s(urec, sizeof(UndoHeader), (0), sizeof(UndoHeader));
securec_check(rc, "\0", "\0");
do {
fd = OpenUndoBlock(zoneId, blockno);
if (fd < 0) {
free(urec);
return false;
}
seekpos = (off_t)BLCKSZ * (blockno % ((BlockNumber)UNDOSEG_SIZE));
lseek(fd, seekpos, SEEK_SET);
rc = memset_s(buffer, BLCKSZ, 0, BLCKSZ);
char buffer[BLCKSZ] = {'\0'};
BlockNumber blockno = UNDO_PTR_GET_BLOCK_NUM(urp);
int zoneId = UNDO_PTR_GET_ZONE_ID(urp);
int startingByte = UNDO_PTR_GET_PAGE_OFFSET(urp);
int fd = -1;
int alreadyRead = 0;
off_t seekpos;
errno_t rc = EOK;
uint32 ret = 0;
UndoHeader *urec = (UndoHeader *)malloc(sizeof(UndoHeader));
UndoRecPtr blkprev = INVALID_UNDO_REC_PTR;
const UndoRecordSize UNDO_RECORD_FIX_SIZE = SIZE_OF_UNDO_RECORD_HEADER + SIZE_OF_UNDO_RECORD_BLOCK;
uint32 curSize = UNDO_RECORD_FIX_SIZE + sizeof(UndoRecordSize);
PageHeader phdr;
rc = memset_s(urec, sizeof(UndoHeader), (0), sizeof(UndoHeader));
securec_check(rc, "\0", "\0");
ret = read(fd, (char *)buffer, BLCKSZ);
if (ret != BLCKSZ) {
do {
fd = OpenUndoBlock(zoneId, blockno);
if (fd < 0) {
free(urec);
return false;
}
seekpos = (off_t)BLCKSZ * (blockno % ((BlockNumber)UNDOSEG_SIZE));
lseek(fd, seekpos, SEEK_SET);
rc = memset_s(buffer, BLCKSZ, 0, BLCKSZ);
securec_check(rc, "\0", "\0");
ret = read(fd, (char *)buffer, BLCKSZ);
if (ret != BLCKSZ) {
close(fd);
free(urec);
fprintf(stderr, "Read undo meta page failed, expect size(8192), real size(%u).\n", ret);
return false;
}
phdr = (PageHeader)buffer;
if (ReadUndoRecord(urec, buffer, startingByte, &alreadyRead)) {
break;
}
startingByte = UNDO_LOG_BLOCK_HEADER_SIZE;
blockno++;
close(fd);
} while (true);
if (!TransactionIdIsValid(urec->whdr_.xid)) {
free(urec);
fprintf(stderr, "Read undo meta page failed, expect size(8192), real size(%u).\n", ret);
return false;
close(fd);
return true;
}
if (ReadUndoRecord(urec, buffer, startingByte, &alreadyRead)) {
break;
fprintf(stdout, "DumpPageInfo: lsn:%X/%X, pd_checksum:%u, flags:%u, lower:%u, upper:%u, special:%u, "
"pagesize_version:%u.\n",
phdr->pd_lsn.xlogid, phdr->pd_lsn.xrecoff, (uint16)(phdr->pd_checksum), (uint16)(phdr->pd_flags),
(uint16)(phdr->pd_lower), (uint16)(phdr->pd_upper), (uint16)(phdr->pd_special),
(uint16)(phdr->pd_pagesize_version));
blkprev = urec->wblk_.blkprev;
fprintf(stdout, "UndoRecPtr(%lu):\nwhdr = xid(%lu), cid(%u), reloid(%u), relfilenode(%u), utype(%u), "
"uinfo(%u).\n", urp, urec->whdr_.xid, urec->whdr_.cid, urec->whdr_.reloid, urec->whdr_.relfilenode,
urec->whdr_.utype, urec->whdr_.uinfo);
if ((urec->whdr_.uinfo & UNDO_UREC_INFO_PAYLOAD) != 0) {
fprintf(stdout, "flag_payload, size = %lu.\n", SIZE_OF_UNDO_RECORD_PAYLOAD);
curSize += SIZE_OF_UNDO_RECORD_PAYLOAD;
curSize += urec->rawdata_.len;
}
if ((urec->whdr_.uinfo & UNDO_UREC_INFO_TRANSAC) != 0) {
fprintf(stdout, "flag_transac, size = %lu.\n", SIZE_OF_UNDO_RECORD_TRANSACTION);
curSize += SIZE_OF_UNDO_RECORD_TRANSACTION;
}
if ((urec->whdr_.uinfo & UNDO_UREC_INFO_BLOCK) != 0) {
fprintf(stdout, "flag_block, size = %lu.\n", SIZE_OF_UNDO_RECORD_BLOCK);
}
if ((urec->whdr_.uinfo & UNDO_UREC_INFO_OLDTD) != 0) {
fprintf(stdout, "flag_oldtd, size = %lu.\n", SIZE_OF_UNDO_RECORD_OLDTD);
curSize += SIZE_OF_UNDO_RECORD_OLDTD;
}
if ((urec->whdr_.uinfo & UNDO_UREC_INFO_CONTAINS_SUBXACT) != 0) {
/* subxid is at the end of rawdata */
char *end = (char *)(urec->rawdata_.data) + (urec->rawdata_.len - sizeof(SubTransactionId));
SubTransactionId *subxid = (SubTransactionId *)end;
fprintf(stdout, "flag_subxact, subxid = %lu.\n", *subxid);
}
if ((urec->whdr_.uinfo & UNDO_UREC_INFO_HAS_PARTOID) != 0) {
fprintf(stdout, "flag_partoid, size = %lu.\n", SIZE_OF_UNDO_RECORD_PARTITION);
curSize += SIZE_OF_UNDO_RECORD_PARTITION;
}
if ((urec->whdr_.uinfo & UNDO_UREC_INFO_HAS_TABLESPACEOID) != 0) {
fprintf(stdout, "flag_tablespaceoid, size = %lu.\n", SIZE_OF_UNDO_RECORD_TABLESPACE);
curSize += SIZE_OF_UNDO_RECORD_TABLESPACE;
}
fprintf(stdout, "wblk = blk_prev(%lu), blockno(%u), offset(%u).\n", urec->wblk_.blkprev, urec->wblk_.blkno,
urec->wblk_.offset);
fprintf(stdout, "wtxn = prevurp(%lu).\n", urec->wtxn_.prevurp);
fprintf(stdout, "wpay = payloadlen(%u).\n", urec->wpay_.payloadlen);
fprintf(stdout, "wtd = oldxactid(%lu).\n", urec->wtd_.oldxactid);
fprintf(stdout, "wpart_ = partitionoid(%u).\n", urec->wpart_.partitionoid);
fprintf(stdout, "wtspc_ = tablespace(%u).\n", urec->wtspc_.tablespace);
fprintf(stdout, "len = alreadyRead(%u).\n", alreadyRead);
char prevLen[2];
UndoRecordSize byteToRead = sizeof(UndoRecordSize);
char *readptr = buffer + startingByte - byteToRead;
for (auto i = 0; i < byteToRead; i++) {
prevLen[i] = *readptr;
readptr++;
}
UndoRecordSize prevRecLen = *(UndoRecordSize *)(prevLen);
fprintf(stdout, "prevLen = prevLen(%u).\n", prevRecLen);
if (urec->whdr_.utype != UNDO_INSERT && urec->whdr_.utype != UNDO_MULTI_INSERT &&
urec->rawdata_.len > 0 && urec->rawdata_.data != NULL) {
UHeapDiskTupleDataHeader diskTuple;
if (urec->whdr_.utype == UNDO_INPLACE_UPDATE) {
Assert(urec->rawdata_.len >= (int)SizeOfUHeapDiskTupleData);
uint8 *t_hoff_ptr = (uint8 *)(urec->rawdata_.data);
uint8 t_hoff = *t_hoff_ptr;
char *cur_undodata_ptr = NULL;
fprintf(stdout, "t_hoff %u ", t_hoff);
rc = memcpy_s((char *)&diskTuple + OffsetTdId, SizeOfUHeapDiskTupleHeaderExceptXid,
urec->rawdata_.data + sizeof(uint8), SizeOfUHeapDiskTupleHeaderExceptXid);
securec_check(rc, "", "");
cur_undodata_ptr = urec->rawdata_.data + sizeof(uint8) + t_hoff - OffsetTdId;
uint8* flags_ptr = (uint8 *)cur_undodata_ptr;
uint8 flags = *flags_ptr;
fprintf(stdout, "flags %u ", flags);
cur_undodata_ptr += sizeof(uint8);
if (flags & UREC_INPLACE_UPDATE_XOR_PREFIX) {
uint16* prefixlen_ptr = (uint16 *)(cur_undodata_ptr);
cur_undodata_ptr += sizeof(uint16);
uint16 prefixlen = *prefixlen_ptr;
fprintf(stdout, "PREFIXLEN %u ", prefixlen);
}
if (flags & UREC_INPLACE_UPDATE_XOR_SUFFIX) {
uint16* suffixlen_ptr = (uint16 *)(cur_undodata_ptr);
cur_undodata_ptr += sizeof(uint16);
uint16 suffixlen = *suffixlen_ptr;
fprintf(stdout, "SUFFIXLEN %u ", suffixlen);
}
diskTuple.xid = (ShortTransactionId)InvalidTransactionId;
} else {
Assert(urec->rawdata_.len >= (int)SizeOfUHeapDiskTupleHeaderExceptXid);
rc = memcpy_s(((char *)&diskTuple + OffsetTdId), SizeOfUHeapDiskTupleHeaderExceptXid,
urec->rawdata_.data, SizeOfUHeapDiskTupleHeaderExceptXid);
securec_check(rc, "", "");
diskTuple.xid = (ShortTransactionId)InvalidTransactionId;
}
fprintf(stdout, "\ndiskTuple: td_id %u, reserved %u, flag %u, flag2 %u, t_hoff %u.\n",
diskTuple.td_id, diskTuple.reserved, diskTuple.flag, diskTuple.flag2, diskTuple.t_hoff);
fprintf(stdout, "current undo record size: %u\n\n", curSize);
}
startingByte = UNDO_LOG_BLOCK_HEADER_SIZE;
blockno++;
} while (true);
free(urec);
close(fd);
blkprev = urec->wblk_.blkprev;
fprintf(stderr, "UndoRecPtr(%lu):\nwhdr = xid(%lu), cid(%u), reloid(%u), relfilenode(%u), utype(%u).\n",
urp, urec->whdr_.xid, urec->whdr_.cid, urec->whdr_.reloid, urec->whdr_.relfilenode, urec->whdr_.utype);
fprintf(stderr, "wblk = blk_prev(%lu), blockno(%u), offset(%u).\n", urec->wblk_.blkprev, urec->wblk_.blkno,
urec->wblk_.offset);
fprintf(stderr, "wtxn = prevurp(%lu).\n", urec->wtxn_.prevurp);
fprintf(stderr, "wpay = payloadlen(%u).\n", urec->wpay_.payloadlen);
free(urec);
close(fd);
if (blkprev != INVALID_UNDO_REC_PTR) {
ParseUndoRecord(blkprev);
}
if (!forward) {
urp = blkprev;
} else {
urp = UNDO_LOG_OFFSET_PLUS_USABLE_BYTES(urp, curSize);
}
} while (urp != INVALID_UNDO_REC_PTR);
return true;
}
@ -5175,6 +5366,7 @@ static void fill_filenode_map(char** class_map)
{"pg_extension", 3079},
{"pg_foreign_table", 3118},
{"pg_enum", 3501},
{"pg_set", 3516},
{"pg_seclabel", 3596},
{"pg_ts_dict", 3600},
{"pg_ts_parser", 3601},
@ -5336,7 +5528,9 @@ static void fill_filenode_map(char** class_map)
return;
}
if (NULL != cmap[i].class_name) {
memcpy(name, cmap[i].class_name, strlen(cmap[i].class_name) + 1);
errno_t rc = EOK;
rc = memcpy_s(name, 64 * sizeof(char), cmap[i].class_name, strlen(cmap[i].class_name) + 1);
securec_check_c(rc, "\0", "\0");
class_map[cmap[i].ralation_id] = name;
name = NULL;
} else {
@ -5695,8 +5889,14 @@ int main(int argc, char** argv)
case HACKING_UNDO_RECORD:
ret = snprintf_s(g_dir, sizeof(g_dir), sizeof(g_dir), filename);
securec_check(ret, "\0", "\0");
if (!ParseUndoRecord(cu_offset)) {
fprintf(stderr, "Error during parsing undo group meta file %s\n", filename);
fprintf(stdout, "Parsing backward, urp %lu:\n", cu_offset);
if (!ParseUndoRecord(cu_offset, false)) {
fprintf(stderr, "Error during parsing undo group meta file %s backward\n", filename);
exit(1);
}
fprintf(stdout, "Parsing forward, urp %lu:\n", cu_offset);
if (!ParseUndoRecord(cu_offset, true)) {
fprintf(stderr, "Error during parsing undo group meta file %s forward\n", filename);
exit(1);
}
break;

View File

@ -26,6 +26,7 @@ execute_process(
COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/uheapdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/uheapdesc.cpp
COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/undologdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/undologdesc.cpp
COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/replorigindesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/replorigindesc.cpp
COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/smgr/cfs/cfs_mddesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cfs_mddesc.cpp
)
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_xlogdump_SRC)
@ -39,8 +40,8 @@ SET(xlogdump_LINK_LIBS libpgcommon.a -lpgport -lcrypt -ldl -lm -ledit -lssl -lcr
add_bintarget(pg_xlogdump TGT_xlogdump_SRC TGT_xlogdump_INC "${xlogdump_DEF_OPTIONS}" "${xlogdump_COMPILE_OPTIONS}" "${xlogdump_LINK_OPTIONS}" "${xlogdump_LINK_LIBS}")
add_dependencies(pg_xlogdump pgport_static pgcommon_static)
target_link_directories(pg_xlogdump PUBLIC
${LIBOPENSSL_LIB_PATH} ${PROTOBUF_LIB_PATH} ${LIBPARQUET_LIB_PATH} ${LIBCURL_LIB_PATH} ${SECURE_LIB_PATH}
${LIBOPENSSL_LIB_PATH} ${LIBCURL_LIB_PATH} ${SECURE_LIB_PATH}
${ZLIB_LIB_PATH} ${LIBOBS_LIB_PATH} ${LIBEDIT_LIB_PATH} ${LIBCGROUP_LIB_PATH} ${CMAKE_BINARY_DIR}/lib
)
install(TARGETS pg_xlogdump RUNTIME DESTINATION bin)
install(TARGETS pg_xlogdump RUNTIME DESTINATION bin)

View File

@ -78,14 +78,17 @@ const char* timestamptz_to_str(TimestampTz dt)
char zone[MAXDATELEN + 1];
time_t result = (time_t)timestamptz_to_time_t(dt);
struct tm* ltime = localtime(&result);
errno_t rc = EOK;
strftime(ts, sizeof(ts), "%Y-%m-%d %H:%M:%S", ltime);
strftime(zone, sizeof(zone), "%Z", ltime);
#ifdef HAVE_INT64_TIMESTAMP
sprintf(buf, "%s.%06d %s", ts, (int)(dt % USECS_PER_SEC), zone);
rc = snprintf_s(buf, sizeof(buf), sizeof(buf) - 1, "%s.%06d %s", ts, (int)(dt % USECS_PER_SEC), zone);
securec_check_ss_c(rc, "\0", "\0");
#else
sprintf(buf, "%s.%.6f %s", ts, fabs(dt - floor(dt)), zone);
rc = snprintf_s(buf, sizeof(buf), sizeof(buf) - 1, "%s.%.6f %s", ts, fabs(dt - floor(dt)), zone);
securec_check_ss_c(rc, "\0", "\0");
#endif
return buf;
@ -142,41 +145,36 @@ ForkNumber forkname_to_number(const char* forkName)
/*
* relpathbackend - construct path to a relation's file
*/
char* relpathbackend(RelFileNode rnode, BackendId backend, ForkNumber forknum)
char *relpathbackend(RelFileNode rnode, BackendId backend, ForkNumber forknum)
{
int pathlen;
char* path = NULL;
char *path = NULL;
errno_t rc = EOK;
/* Column store file path, e.g: 16384_C1.0, 16384_C1_bcm */
if (forknum > MAX_FORKNUM) {
char attr_name[32];
int attid = forknum - MAX_FORKNUM;
path = (char*)calloc(MAXPGPATH, sizeof(char));
(void)snprintf(attr_name, sizeof(attr_name), "C%d", attid);
path = (char *)calloc(MAXPGPATH, sizeof(char));
rc = snprintf_s(attr_name, sizeof(attr_name), sizeof(attr_name) - 1, "C%d", attid);
securec_check_ss_c(rc, "\0", "\0");
if (rnode.spcNode == GLOBALTABLESPACE_OID) {
/* Shared system relations live in {datadir}/global */
Assert(rnode.dbNode == 0);
pathlen = strlen("global") + 1 + OIDCHARS + 1 + strlen(attr_name) + 1;
(void)snprintf(path, pathlen, "global/%u_%s", rnode.relNode, attr_name);
rc = snprintf_s(path, pathlen, pathlen - 1, "global/%u_%s", rnode.relNode, attr_name);
} else if (rnode.spcNode == DEFAULTTABLESPACE_OID) {
/* The default tablespace is {datadir}/base */
pathlen = strlen("base") + 1 + OIDCHARS + 1 + OIDCHARS + 1 + strlen(attr_name) + 1;
(void)snprintf(path, pathlen, "base/%u/%u_%s", rnode.dbNode, rnode.relNode, attr_name);
rc = snprintf_s(path, pathlen, pathlen - 1, "base/%u/%u_%s", rnode.dbNode, rnode.relNode, attr_name);
} else {
/* All other tablespaces are accessed via symlinks */
pathlen = 9 + 1 + OIDCHARS + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + strlen(PGXCNodeName) + 1 +
OIDCHARS + 1 + OIDCHARS + 1 + strlen(attr_name) + 1;
(void)snprintf(path,
pathlen,
"pg_tblspc/%u/%s_%s/%u/%u_%s",
rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY,
PGXCNodeName,
rnode.dbNode,
rnode.relNode,
attr_name);
OIDCHARS + 1 + OIDCHARS + 1 + strlen(attr_name) + 1;
rc = snprintf_s(path, pathlen, pathlen - 1, "pg_tblspc/%u/%s_%s/%u/%u_%s", rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY, PGXCNodeName, rnode.dbNode, rnode.relNode, attr_name);
}
} else {
if (rnode.spcNode == GLOBALTABLESPACE_OID) {
@ -184,133 +182,85 @@ char* relpathbackend(RelFileNode rnode, BackendId backend, ForkNumber forknum)
Assert(rnode.dbNode == 0);
Assert(backend == InvalidBackendId);
pathlen = 7 + OIDCHARS + 1 + FORKNAMECHARS + 1;
path = (char*)malloc(pathlen);
path = (char *)malloc(pathlen);
if (forknum != MAIN_FORKNUM)
(void)snprintf(path, pathlen, "global/%u_%s", rnode.relNode, forkNames[forknum]);
rc = snprintf_s(path, pathlen, pathlen - 1, "global/%u_%s", rnode.relNode, forkNames[forknum]);
else
(void)snprintf(path, pathlen, "global/%u", rnode.relNode);
rc = snprintf_s(path, pathlen, pathlen - 1, "global/%u", rnode.relNode);
} else if (rnode.spcNode == DEFAULTTABLESPACE_OID) {
/* The default tablespace is {datadir}/base */
if (backend == InvalidBackendId) {
pathlen = 5 + OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1;
path = (char*)malloc(pathlen);
path = (char *)malloc(pathlen);
if (forknum != MAIN_FORKNUM)
(void)snprintf(path, pathlen, "base/%u/%u_%s", rnode.dbNode, rnode.relNode, forkNames[forknum]);
rc = snprintf_s(path, pathlen, pathlen - 1, "base/%u/%u_%s", rnode.dbNode, rnode.relNode,
forkNames[forknum]);
else
(void)snprintf(path, pathlen, "base/%u/%u", rnode.dbNode, rnode.relNode);
rc = snprintf_s(path, pathlen, pathlen - 1, "base/%u/%u", rnode.dbNode, rnode.relNode);
} else {
/* OIDCHARS will suffice for an integer, too */
pathlen = 5 + OIDCHARS + 2 + OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1;
path = (char*)malloc(pathlen);
path = (char *)malloc(pathlen);
if (forknum != MAIN_FORKNUM)
(void)snprintf(
path, pathlen, "base/%u/t%d_%u_%s", rnode.dbNode, backend, rnode.relNode, forkNames[forknum]);
rc = snprintf_s(path, pathlen, pathlen - 1, "base/%u/t%d_%u_%s", rnode.dbNode, backend,
rnode.relNode, forkNames[forknum]);
else
(void)snprintf(path, pathlen, "base/%u/t%d_%u", rnode.dbNode, backend, rnode.relNode);
rc = snprintf_s(path, pathlen, pathlen - 1, "base/%u/t%d_%u", rnode.dbNode, backend,
rnode.relNode);
}
} else {
/* All other tablespaces are accessed via symlinks */
if (backend == InvalidBackendId) {
pathlen = 9 + 1 + OIDCHARS + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS +
1
1
#ifdef PGXC
/* Postgres-XC tablespaces include node name */
+ strlen(PGXCNodeName) + 1
/* Postgres-XC tablespaces include node name */
+ strlen(PGXCNodeName) + 1
#endif
+ OIDCHARS + 1 + FORKNAMECHARS + 1;
path = (char*)malloc(pathlen);
+ OIDCHARS + 1 + FORKNAMECHARS + 1;
path = (char *)malloc(pathlen);
#ifdef PGXC
if (forknum != MAIN_FORKNUM)
(void)snprintf(path,
pathlen,
"pg_tblspc/%u/%s_%s/%u/%u_%s",
rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY,
PGXCNodeName,
rnode.dbNode,
rnode.relNode,
forkNames[forknum]);
rc = snprintf_s(path, pathlen, pathlen - 1, "pg_tblspc/%u/%s_%s/%u/%u_%s", rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY, PGXCNodeName, rnode.dbNode, rnode.relNode, forkNames[forknum]);
else
(void)snprintf(path,
pathlen,
"pg_tblspc/%u/%s_%s/%u/%u",
rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY,
PGXCNodeName,
rnode.dbNode,
rnode.relNode);
rc = snprintf_s(path, pathlen, pathlen - 1, "pg_tblspc/%u/%s_%s/%u/%u", rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY, PGXCNodeName, rnode.dbNode, rnode.relNode);
#else
if (forknum != MAIN_FORKNUM)
(void)snprintf(path,
pathlen,
"pg_tblspc/%u/%s/%u/%u_%s",
rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY,
rnode.dbNode,
rnode.relNode,
forkNames[forknum]);
rc = snprintf_s(path, pathlen, pathlen - 1, "pg_tblspc/%u/%s/%u/%u_%s", rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY, rnode.dbNode, rnode.relNode, forkNames[forknum]);
else
(void)snprintf(path,
pathlen,
"pg_tblspc/%u/%s/%u/%u",
rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY,
rnode.dbNode,
rnode.relNode);
rc = snprintf_s(path, pathlen, pathlen - 1, "pg_tblspc/%u/%s/%u/%u", rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY, rnode.dbNode, rnode.relNode);
#endif
} else {
/* OIDCHARS will suffice for an integer, too */
pathlen = 9 + 1 + OIDCHARS + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 2
#ifdef PGXC
+ strlen(PGXCNodeName) + 1
+ strlen(PGXCNodeName) + 1
#endif
+ OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1;
path = (char*)malloc(pathlen);
+ OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1;
path = (char *)malloc(pathlen);
#ifdef PGXC
if (forknum != MAIN_FORKNUM)
(void)snprintf(path,
pathlen,
"pg_tblspc/%u/%s_%s/%u/t%d_%u_%s",
rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY,
PGXCNodeName,
rnode.dbNode,
backend,
rnode.relNode,
rc = snprintf_s(path, pathlen, pathlen - 1, "pg_tblspc/%u/%s_%s/%u/t%d_%u_%s", rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY, PGXCNodeName, rnode.dbNode, backend, rnode.relNode,
forkNames[forknum]);
else
(void)snprintf(path,
pathlen,
"pg_tblspc/%u/%s_%s/%u/t%d_%u",
rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY,
PGXCNodeName,
rnode.dbNode,
backend,
rnode.relNode);
rc = snprintf_s(path, pathlen, pathlen - 1, "pg_tblspc/%u/%s_%s/%u/t%d_%u", rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY, PGXCNodeName, rnode.dbNode, backend, rnode.relNode);
#else
if (forknum != MAIN_FORKNUM)
(void)snprintf(path,
pathlen,
"pg_tblspc/%u/%s/%u/t%d_%u_%s",
rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY,
rnode.dbNode,
backend,
rnode.relNode,
forkNames[forknum]);
rc = snprintf_s(path, pathlen, pathlen - 1, "pg_tblspc/%u/%s/%u/t%d_%u_%s", rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY, rnode.dbNode, backend, rnode.relNode, forkNames[forknum]);
else
(void)snprintf(path,
pathlen,
"pg_tblspc/%u/%s/%u/t%d_%u",
rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY,
rnode.dbNode,
backend,
rnode.relNode);
rc = snprintf_s(path, pathlen, pathlen - 1, "pg_tblspc/%u/%s/%u/t%d_%u", rnode.spcNode,
TABLESPACE_VERSION_DIRECTORY, rnode.dbNode, backend, rnode.relNode);
#endif
}
}
}
securec_check_ss_c(rc, "\0", "\0");
return path;
}

View File

@ -32,6 +32,7 @@
#include "replication/replicainternal.h"
#include "rmgrdesc.h"
#include "storage/smgr/segment.h"
#include "storage/page_compression.h"
static const char* progname;
@ -518,6 +519,7 @@ static void XLogDumpDisplayRecord(XLogDumpConfig* config, XLogReaderState* recor
int block_id;
XLogRecPtr lsn;
XLogRecPtr xl_prev = XLogRecGetPrev(record);
RelFileCompressOption compOpt;
printf("REDO @ %X/%X; LSN %X/%X: prev %X/%X; xid " XID_FMT "; term %u; len %u; total %u; crc %u; "
"desc: %s - ",
@ -555,6 +557,15 @@ static void XLogDumpDisplayRecord(XLogDumpConfig* config, XLogReaderState* recor
if (IsBucketFileNode(rnode)) {
printf("/%d", rnode.bucketNode);
}
if (rnode.opt != 0) {
TransCompressOptions(rnode, &compOpt);
printf(", compressed file: byteConvert: %u, diffConvert: %u, PreallocChunks: %u, compressLevel: %u"
", compressAlgorithm: %u, compressChunkSize: %u.",
compOpt.byteConvert, compOpt.diffConvert, compOpt.compressPreallocChunks,
compOpt.compressLevelSymbol, compOpt.compressAlgorithm, CHUNK_SIZE_LIST[compOpt.compressChunkSize]);
}
StorageType storage_type = HEAP_DISK;
if (IsSegmentFileNode(rnode)) {
storage_type = SEGMENT_PAGE;

View File

@ -637,7 +637,7 @@ static ForeignScan *postgresGetForeignPlan(PlannerInfo *root, RelOptInfo *basere
* Note: because we actually run the query as a cursor, this assumes that
* DECLARE CURSOR ... FOR UPDATE is supported, which it isn't before 8.3.
*/
if (baserel->relid == (unsigned int)root->parse->resultRelation &&
if (baserel->relid == (unsigned int)linitial2_int(root->parse->resultRelations) &&
(root->parse->commandType == CMD_UPDATE || root->parse->commandType == CMD_DELETE)) {
/* Relation is UPDATE/DELETE target, so use FOR UPDATE */
appendStringInfoString(&sql, " FOR UPDATE");
@ -912,7 +912,8 @@ static void postgresAddForeignUpdateTargets(Query *parsetree, RangeTblEntry *tar
* In postgres_fdw, what we need is the ctid, same as for a regular table.
* Make a Var representing the desired value
*/
Var* var = makeVar((Index)parsetree->resultRelation, SelfItemPointerAttributeNumber, TIDOID, -1, InvalidOid, 0);
Var* var = makeVar((Index)linitial_int(parsetree->resultRelations), SelfItemPointerAttributeNumber,
TIDOID, -1, InvalidOid, 0);
/* Wrap it in a resjunk TLE with the right name ... */
const char *attrname = "ctid";

View File

@ -179,11 +179,8 @@ static bool is_valid_for_masking(const char* func_name, Oid funcnsp, int& funcid
{
CatCList *catlist = NULL;
#ifndef ENABLE_MULTIPLE_NODES
if (t_thrd.proc->workingVersionNum < 92470) {
catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(func_name));
} else {
catlist = SearchSysCacheList1(PROCALLARGS, CStringGetDatum(func_name));
}
int cacheId = (t_thrd.proc->workingVersionNum < 92470) ? PROCNAMEARGSNSP : PROCALLARGS;
catlist = SearchSysCacheList1(cacheId, CStringGetDatum(func_name));
#else
catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(func_name));
#endif

View File

@ -170,9 +170,10 @@ bool load_policy_labels(bool reload)
}
/* scan to load all labels */
if (all_labels == NULL) {
all_labels = new loaded_labels;
if (all_labels != NULL) {
delete all_labels;
}
all_labels = new loaded_labels;
scan_policy_labels(all_labels);
return true;

View File

@ -855,7 +855,7 @@ void gen_policy_labelitem(PolicyLabelItem &item, const ListCell *rel, int objtyp
switch (objtype) {
case O_VIEW:
case O_TABLE: {
Oid relid = RangeVarGetRelid((RangeVar *)rel, NoLock, false);
Oid relid = RangeVarGetRelid((const RangeVar *)rel, NoLock, true);
if (!OidIsValid(relid)) {
return;
}
@ -865,7 +865,7 @@ void gen_policy_labelitem(PolicyLabelItem &item, const ListCell *rel, int objtyp
}
case O_FUNCTION: {
FuncWithArgs *func = (FuncWithArgs *)(rel);
Oid funcid = LookupFuncNameTypeNames(func->funcname, func->funcargs, false);
Oid funcid = LookupFuncNameTypeNames(func->funcname, func->funcargs, true);
if (!OidIsValid(funcid)) {
return;
}
@ -974,4 +974,4 @@ CmdType get_rte_commandtype(RangeTblEntry *rte)
} else {
return CMD_UNKNOWN;
}
}
}

View File

@ -139,6 +139,7 @@ static THR_LOCAL MngEventsVector *mng_events = NULL;
using StrMap = gs_stl::gs_map<gs_stl::gs_string, masking_result>;
static void gsaudit_next_PostParseAnalyze_hook(ParseState *pstate, Query *query);
static void destroy_local_parameter();
static void destory_thread_variables()
{
@ -409,18 +410,16 @@ bool verify_copy_command_is_reparsed(List* parsetree_list, const char* query_str
gs_stl::gs_string& replaced_query_string)
{
/* do nothing when enable_security_policy is off */
if (!u_sess->attr.attr_security.Enable_Security_Policy || !is_masking_policy_exist()) {
return false;
}
bool is_exist = (!u_sess->attr.attr_security.Enable_Security_Policy ||
!is_masking_policy_exist());
if (is_exist) return false;
ListCell* item = NULL;
foreach(item, parsetree_list) {
Node* parsetree = (Node *) lfirst(item);
if (nodeTag(parsetree) == T_CopyStmt) {
CopyStmt* stmt = (CopyStmt*)parsetree;
if (stmt->is_from || stmt->query) {
return false;
}
bool is_from_query = (stmt->is_from || stmt->query);
if (is_from_query) return false;
/* verify policies */
IPV6 ip;
get_remote_addr(&ip);
@ -432,10 +431,8 @@ bool verify_copy_command_is_reparsed(List* parsetree_list, const char* query_str
checkSecurityPolicyFilter_hook(filter_item, &policy_ids);
}
}
if (policy_ids.empty() && !check_audit_policy_filter(&filter_item, &policy_ids)) {
return false;
}
bool is_empty_filter = (policy_ids.empty() && !check_audit_policy_filter(&filter_item, &policy_ids));
if (is_empty_filter) return false;
gs_stl::gs_string replace_buffer("(select ");
char replace_name[POLICY_TMP_BUFF_LEN] = {0};
@ -558,7 +555,7 @@ static void gsaudit_next_PostParseAnalyze_hook(ParseState *pstate, Query *query)
enable_dml_auditing = true;
}
if (u_sess->proc_cxt.IsInnerMaintenanceTools || (t_thrd.role != WORKER && t_thrd.role != THREADPOOL_WORKER) ||
if (u_sess->proc_cxt.IsNoMaskingInnerTools || (t_thrd.role != WORKER && t_thrd.role != THREADPOOL_WORKER) ||
(!enable_dml_auditing && !is_masking_policy_exist())) {
if (next_post_parse_analyze_hook) {
next_post_parse_analyze_hook(pstate, query);
@ -711,7 +708,14 @@ static inline bool get_prepare_command_object_name(Node *parsetree, RangeVar *&r
}
case T_UpdateStmt: {
UpdateStmt *_stmt = (UpdateStmt *)parsetree;
rel = _stmt->relation;
ListCell *l = NULL;
foreach (l, _stmt->relationClause) {
Node *n = (Node *)lfirst(l);
if (IsA(n, RangeVar)) {
rel = (RangeVar *)n;
return true;
}
}
return true;
}
case T_SelectStmt: {
@ -732,7 +736,7 @@ static inline bool get_prepare_command_object_name(Node *parsetree, RangeVar *&r
}
case T_DeleteStmt: {
DeleteStmt *_stmt = (DeleteStmt *)parsetree;
rel = _stmt->relation;
rel = (RangeVar*)linitial(_stmt->relations);
return true;
}
default:

View File

@ -48,7 +48,7 @@ declare
rd INTEGER;
size INTEGER := pg_catalog.length(col);
tmp text := col;
res text;
res text := '';
begin
while size > 0 loop
rd := pg_catalog.floor(pg_catalog.random() * pg_catalog.length(tmp) + 1);

View File

@ -1,5 +1,5 @@
# security_plugin extension
comment = 'provides security functionality'
default_version = '1.0'
module_pathname = '$libdir/security_plugin'
relocatable = true
# security_plugin extension
comment = 'provides security functionality'
default_version = '1.0'
module_pathname = '$libdir/security_plugin'
relocatable = true

View File

@ -36,7 +36,7 @@
<repoType>Generic</repoType>
<id>
<offering>DOPRA SSP</offering>
<version>DOPRA SSP V300R021C10SPC010B100</version>
<version>DOPRA SSP V300R021C10SPC120B500</version>
</id>
<copies>
<copy>
@ -50,7 +50,7 @@
<repoType>Generic</repoType>
<id>
<offering>BiSheng JDK Enterprise</offering>
<version>BiSheng JDK Enterprise 2.1.0.320.B001</version>
<version>BiSheng JDK Enterprise 2.1.0.330.B003</version>
</id>
<copies>
<copy>

View File

@ -233,6 +233,11 @@
<entry>security labels on database objects</entry>
</row>
<row>
<entry><link linkend="catalog-pg-set"><structname>pg_set</structname></link></entry>
<entry>set label and value definitions</entry>
</row>
<row>
<entry><link linkend="catalog-pg-shdepend"><structname>pg_shdepend</structname></link></entry>
<entry>dependencies on shared objects</entry>

View File

@ -1,16 +1,16 @@
<refentry id="SQL-ABORT">
<refmeta>
<refentrytitle>ABORT</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ABORT</refname>
<refpurpose>abort the current transaction</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ABORT [ WORK | TRANSACTION ] ;
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ABORT">
<refmeta>
<refentrytitle>ABORT</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ABORT</refname>
<refpurpose>abort the current transaction</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ABORT [ WORK | TRANSACTION ] ;
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,17 +1,17 @@
<refentry id="SQL-ALTER_APP_WORKLOAD_GROUP_MAPPING">
<refmeta>
<refentrytitle>ALTER APP WORKLOAD GROUP MAPPING</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER APP WORKLOAD GROUP MAPPING</refname>
<refpurpose>modify app group relate with group mapping</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER APP WORKLOAD GROUP MAPPING app_name
WITH ( WORKLOAD_GPNAME = wg_name );
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_APP_WORKLOAD_GROUP_MAPPING">
<refmeta>
<refentrytitle>ALTER APP WORKLOAD GROUP MAPPING</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER APP WORKLOAD GROUP MAPPING</refname>
<refpurpose>modify app group relate with group mapping</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER APP WORKLOAD GROUP MAPPING app_name
WITH ( WORKLOAD_GPNAME = wg_name );
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,35 +1,35 @@
<refentry id="SQL-ALTER_AUDIT_POLICY">
<refmeta>
<refentrytitle>ALTER AUDIT POLICY</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER AUDIT POLICY</refname>
<refpurpose>change a audit policy</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER AUDIT POLICY [ IF EXISTS ] policy_name { ADD | REMOVE } { [ privilege_audit_clause ] [ access_audit_clause ] };
ALTER AUDIT POLICY [ IF EXISTS ] policy_name MODIFY ( filter_group_clause );
ALTER AUDIT POLICY [ IF EXISTS ] policy_name DROP FILTER;
ALTER AUDIT POLICY [ IF EXISTS ] policy_name COMMENTS policy_comments;
ALTER AUDIT POLICY [ IF EXISTS ] policy_name { ENABLE | DISABLE };
where privilege_audit_clause can be:
PRIVILEGES { DDL | ALL }
where access_audit_clause can be:
ACCESS { DML | ALL }
where filter_group_clause can be:
FILTER ON { ( FILTER_TYPE ( filter_value [, ... ] ) ) [, ... ] }
where DDL can be:
{ ( ALTER | ANALYZE | COMMENT | CREATE | DROP | GRANT | REVOKE | SET | SHOW | LOGIN_ACCESS | LOGIN_FAILURE | LOGOUT | LOGIN ) }
where DML can be:
{ ( COPY | DEALLOCATE | DELETE_P | EXECUTE | REINDEX | INSERT | PREPARE | SELECT | TRUNCATE | UPDATE ) }
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-ALTER_AUDIT_POLICY">
<refmeta>
<refentrytitle>ALTER AUDIT POLICY</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER AUDIT POLICY</refname>
<refpurpose>change a audit policy</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER AUDIT POLICY [ IF EXISTS ] policy_name { ADD | REMOVE } { [ privilege_audit_clause ] [ access_audit_clause ] };
ALTER AUDIT POLICY [ IF EXISTS ] policy_name MODIFY ( filter_group_clause );
ALTER AUDIT POLICY [ IF EXISTS ] policy_name DROP FILTER;
ALTER AUDIT POLICY [ IF EXISTS ] policy_name COMMENTS policy_comments;
ALTER AUDIT POLICY [ IF EXISTS ] policy_name { ENABLE | DISABLE };
where privilege_audit_clause can be:
PRIVILEGES { DDL | ALL }
where access_audit_clause can be:
ACCESS { DML | ALL }
where filter_group_clause can be:
FILTER ON { ( FILTER_TYPE ( filter_value [, ... ] ) ) [, ... ] }
where DDL can be:
{ ( ALTER | ANALYZE | COMMENT | CREATE | DROP | GRANT | REVOKE | SET | SHOW | LOGIN_ACCESS | LOGIN_FAILURE | LOGOUT | LOGIN ) }
where DML can be:
{ ( COPY | DEALLOCATE | DELETE_P | EXECUTE | REINDEX | INSERT | PREPARE | SELECT | TRUNCATE | UPDATE ) }
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,24 +1,24 @@
<refentry id="SQL-ALTER_DATA_SOURCE">
<refmeta>
<refentrytitle>ALTER DATA SOURCE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER DATA SOURCE</refname>
<refpurpose>alter the data source</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER DATA SOURCE src_name
[TYPE 'type_str']
[VERSION {'version_str' | NULL}]
[OPTIONS ( { [ADD | SET | DROP] optname ['optvalue'] } [, ...] )];
ALTER DATA SOURCE src_name RENAME TO src_new_name;
ALTER DATA SOURCE src_name OWNER TO new_owner;
Valid optname are:
DSN, USERNAME, PASSWORD, ENCODING
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_DATA_SOURCE">
<refmeta>
<refentrytitle>ALTER DATA SOURCE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER DATA SOURCE</refname>
<refpurpose>alter the data source</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER DATA SOURCE src_name
[TYPE 'type_str']
[VERSION {'version_str' | NULL}]
[OPTIONS ( { [ADD | SET | DROP] optname ['optvalue'] } [, ...] )];
ALTER DATA SOURCE src_name RENAME TO src_new_name;
ALTER DATA SOURCE src_name OWNER TO new_owner;
Valid optname are:
DSN, USERNAME, PASSWORD, ENCODING
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,29 +1,29 @@
<refentry id="SQL-ALTER_DATABASE">
<refmeta>
<refentrytitle>ALTER DATABASE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER DATABASE</refname>
<refpurpose>change a database</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER DATABASE database_name
[ [ WITH ] CONNECTION LIMIT connlimit ];
ALTER DATABASE database_name
RENAME TO new_name;
ALTER DATABASE database_name
OWNER TO new_owner;
ALTER DATABASE database_name
SET TABLESPACE new_tablespace;
ALTER DATABASE database_name
SET configuration_parameter { { TO | = } { value | DEFAULT } | FROM CURRENT };
ALTER DATABASE database_name
RESET { configuration_parameter | ALL };
ALTER DATABASE database_name
[ WITH ] { ENABLE | DISABLE } PRIVATE OBJECT;
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-ALTER_DATABASE">
<refmeta>
<refentrytitle>ALTER DATABASE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER DATABASE</refname>
<refpurpose>change a database</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER DATABASE database_name
[ [ WITH ] CONNECTION LIMIT connlimit ];
ALTER DATABASE database_name
RENAME TO new_name;
ALTER DATABASE database_name
OWNER TO new_owner;
ALTER DATABASE database_name
SET TABLESPACE new_tablespace;
ALTER DATABASE database_name
SET configuration_parameter { { TO | = } { value | DEFAULT } | FROM CURRENT };
ALTER DATABASE database_name
RESET { configuration_parameter | ALL };
ALTER DATABASE database_name
[ WITH ] { ENABLE | DISABLE } PRIVATE OBJECT;
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,103 +1,103 @@
<refentry id="SQL-ALTER_DEFAULT_PRIVILEGES">
<refmeta>
<refentrytitle>ALTER DEFAULT PRIVILEGES</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER DEFAULT PRIVILEGES</refname>
<refpurpose>define default access privileges</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER DEFAULT PRIVILEGES
[ FOR { ROLE | USER } target_role [, ...] ]
[ IN SCHEMA schema_name [, ...] ]
abbreviated_grant_or_revoke;
where abbreviated_grant_or_revoke can be:
grant_on_tables_clause
| grant_on_sequences_clause
| grant_on_functions_clause
| grant_on_types_clause
| grant_on_client_master_keys_clause
| grant_on_column_encryption_keys_clause
| revoke_on_tables_clause
| revoke_on_sequences_clause
| revoke_on_functions_clause
| revoke_on_types_clause
| revoke_on_client_master_keys_clause
| revoke_on_column_encryption_keys_clause
where grant_on_tables_clause can be:
GRANT { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES |
ALTER | DROP | COMMENT | INDEX | VACUUM } [, ...] | ALL [ PRIVILEGES ] }
ON TABLES
TO { [ GROUP ] role_name | PUBLIC } [, ...]
[ WITH GRANT OPTION ]
where grant_on_sequences_clause can be:
GRANT { { SELECT | UPDATE | USAGE | ALTER | DROP | COMMENT }
[, ...] | ALL [ PRIVILEGES ] }
ON SEQUENCES
TO { [ GROUP ] role_name | PUBLIC } [, ...]
[ WITH GRANT OPTION ]
where grant_on_functions_clause can be:
GRANT { { EXECUTE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] }
ON FUNCTIONS
TO { [ GROUP ] role_name | PUBLIC } [, ...]
[ WITH GRANT OPTION ]
where grant_on_types_clause can be:
GRANT { { USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] }
ON TYPES
TO { [ GROUP ] role_name | PUBLIC } [, ...]
[ WITH GRANT OPTION ]
where grant_on_client_master_keys_clause can be:
GRANT { { USAGE | DROP } [, ...] | ALL [ PRIVILEGES ] }
ON CLIENT_MASTER_KEYS
TO { [ GROUP ] role_name | PUBLIC } [, ...]
[ WITH GRANT OPTION ]
where grant_on_column_encryption_keys_clause can be:
GRANT { { USAGE | DROP } [, ...] | ALL [ PRIVILEGES ] }
ON COLUMN_ENCRYPTION_KEYS
TO { [ GROUP ] role_name | PUBLIC } [, ...]
[ WITH GRANT OPTION ]
where revoke_on_tables_clause can be:
REVOKE [ GRANT OPTION FOR ]
{ { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES |
ALTER | DROP | COMMENT | INDEX | VACUUM } [, ...] | ALL [ PRIVILEGES ] }
ON TABLES
FROM { [ GROUP ] role_name | PUBLIC } [, ...]
[ CASCADE | RESTRICT | CASCADE CONSTRAINTS ]
where revoke_on_sequences_clause can be:
REVOKE [ GRANT OPTION FOR ]
{ { SELECT | UPDATE | USAGE | ALTER | DROP | COMMENT }
[, ...] | ALL [ PRIVILEGES ] }
ON SEQUENCES
FROM { [ GROUP ] role_name | PUBLIC } [, ...]
[ CASCADE | RESTRICT | CASCADE CONSTRAINTS ]
where revoke_on_functions_clause can be:
REVOKE [ GRANT OPTION FOR ]
{ { EXECUTE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] }
ON FUNCTIONS
FROM { [ GROUP ] role_name | PUBLIC } [, ...]
[ CASCADE | RESTRICT | CASCADE CONSTRAINTS ]
where revoke_on_types_clause can be:
REVOKE [ GRANT OPTION FOR ]
{ { USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] }
ON TYPES
FROM { [ GROUP ] role_name | PUBLIC } [, ...]
[ CASCADE | RESTRICT | CASCADE CONSTRAINTS ]
where revoke_on_client_master_keys_clause can be:
REVOKE [ GRANT OPTION FOR ]
{ { USAGE | DROP } [, ...] | ALL [ PRIVILEGES ] }
ON CLIENT_MASTER_KEYS
FROM { [ GROUP ] role_name | PUBLIC } [, ...]
[ CASCADE | RESTRICT | CASCADE CONSTRAINTS ]
where revoke_on_column_encryption_keys_clause can be:
REVOKE [ GRANT OPTION FOR ]
{ { USAGE | DROP } [, ...] | ALL [ PRIVILEGES ] }
ON COLUMN_ENCRYPTION_KEYS
FROM { [ GROUP ] role_name | PUBLIC } [, ...]
[ CASCADE | RESTRICT | CASCADE CONSTRAINTS ]
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_DEFAULT_PRIVILEGES">
<refmeta>
<refentrytitle>ALTER DEFAULT PRIVILEGES</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER DEFAULT PRIVILEGES</refname>
<refpurpose>define default access privileges</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER DEFAULT PRIVILEGES
[ FOR { ROLE | USER } target_role [, ...] ]
[ IN SCHEMA schema_name [, ...] ]
abbreviated_grant_or_revoke;
where abbreviated_grant_or_revoke can be:
grant_on_tables_clause
| grant_on_sequences_clause
| grant_on_functions_clause
| grant_on_types_clause
| grant_on_client_master_keys_clause
| grant_on_column_encryption_keys_clause
| revoke_on_tables_clause
| revoke_on_sequences_clause
| revoke_on_functions_clause
| revoke_on_types_clause
| revoke_on_client_master_keys_clause
| revoke_on_column_encryption_keys_clause
where grant_on_tables_clause can be:
GRANT { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES |
ALTER | DROP | COMMENT | INDEX | VACUUM } [, ...] | ALL [ PRIVILEGES ] }
ON TABLES
TO { [ GROUP ] role_name | PUBLIC } [, ...]
[ WITH GRANT OPTION ]
where grant_on_sequences_clause can be:
GRANT { { SELECT | UPDATE | USAGE | ALTER | DROP | COMMENT }
[, ...] | ALL [ PRIVILEGES ] }
ON SEQUENCES
TO { [ GROUP ] role_name | PUBLIC } [, ...]
[ WITH GRANT OPTION ]
where grant_on_functions_clause can be:
GRANT { { EXECUTE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] }
ON FUNCTIONS
TO { [ GROUP ] role_name | PUBLIC } [, ...]
[ WITH GRANT OPTION ]
where grant_on_types_clause can be:
GRANT { { USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] }
ON TYPES
TO { [ GROUP ] role_name | PUBLIC } [, ...]
[ WITH GRANT OPTION ]
where grant_on_client_master_keys_clause can be:
GRANT { { USAGE | DROP } [, ...] | ALL [ PRIVILEGES ] }
ON CLIENT_MASTER_KEYS
TO { [ GROUP ] role_name | PUBLIC } [, ...]
[ WITH GRANT OPTION ]
where grant_on_column_encryption_keys_clause can be:
GRANT { { USAGE | DROP } [, ...] | ALL [ PRIVILEGES ] }
ON COLUMN_ENCRYPTION_KEYS
TO { [ GROUP ] role_name | PUBLIC } [, ...]
[ WITH GRANT OPTION ]
where revoke_on_tables_clause can be:
REVOKE [ GRANT OPTION FOR ]
{ { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES |
ALTER | DROP | COMMENT | INDEX | VACUUM } [, ...] | ALL [ PRIVILEGES ] }
ON TABLES
FROM { [ GROUP ] role_name | PUBLIC } [, ...]
[ CASCADE | RESTRICT | CASCADE CONSTRAINTS ]
where revoke_on_sequences_clause can be:
REVOKE [ GRANT OPTION FOR ]
{ { SELECT | UPDATE | USAGE | ALTER | DROP | COMMENT }
[, ...] | ALL [ PRIVILEGES ] }
ON SEQUENCES
FROM { [ GROUP ] role_name | PUBLIC } [, ...]
[ CASCADE | RESTRICT | CASCADE CONSTRAINTS ]
where revoke_on_functions_clause can be:
REVOKE [ GRANT OPTION FOR ]
{ { EXECUTE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] }
ON FUNCTIONS
FROM { [ GROUP ] role_name | PUBLIC } [, ...]
[ CASCADE | RESTRICT | CASCADE CONSTRAINTS ]
where revoke_on_types_clause can be:
REVOKE [ GRANT OPTION FOR ]
{ { USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] }
ON TYPES
FROM { [ GROUP ] role_name | PUBLIC } [, ...]
[ CASCADE | RESTRICT | CASCADE CONSTRAINTS ]
where revoke_on_client_master_keys_clause can be:
REVOKE [ GRANT OPTION FOR ]
{ { USAGE | DROP } [, ...] | ALL [ PRIVILEGES ] }
ON CLIENT_MASTER_KEYS
FROM { [ GROUP ] role_name | PUBLIC } [, ...]
[ CASCADE | RESTRICT | CASCADE CONSTRAINTS ]
where revoke_on_column_encryption_keys_clause can be:
REVOKE [ GRANT OPTION FOR ]
{ { USAGE | DROP } [, ...] | ALL [ PRIVILEGES ] }
ON COLUMN_ENCRYPTION_KEYS
FROM { [ GROUP ] role_name | PUBLIC } [, ...]
[ CASCADE | RESTRICT | CASCADE CONSTRAINTS ]
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,17 +1,17 @@
<refentry id="SQL-ALTER_DIRECTORY">
<refmeta>
<refentrytitle>ALTER DIRECTORY</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER DIRECTORY</refname>
<refpurpose>change a directory</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER DIRECTORY directory_name
OWNER TO new_owner;
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-ALTER_DIRECTORY">
<refmeta>
<refentrytitle>ALTER DIRECTORY</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER DIRECTORY</refname>
<refpurpose>change a directory</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER DIRECTORY directory_name
OWNER TO new_owner;
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -27,19 +27,19 @@ ALTER EXTENSION <replaceable class="PARAMETER">name</replaceable> UPDATE [ TO <r
ALTER EXTENSION <replaceable class="PARAMETER">name</replaceable> SET SCHEMA <replaceable class="PARAMETER">new_schema</replaceable>;
ALTER EXTENSION <replaceable class="PARAMETER">name</replaceable> ADD <replaceable class="PARAMETER">member_object</replaceable>;
ALTER EXTENSION <replaceable class="PARAMETER">name</replaceable> DROP <replaceable class="PARAMETER">member_object</replaceable>;
<phrase>where <replaceable class="PARAMETER">member_object</replaceable> is:</phrase>
FOREIGN TABLE <replaceable class="PARAMETER">object_name</replaceable> |
FUNCTION <replaceable class="PARAMETER">function_name</replaceable> ( [ [ <replaceable class="parameter">argmode</replaceable> ] [ <replaceable class="parameter">argname</replaceable> ] <replaceable class="parameter">argtype</replaceable> [, ...] ] ) |
[ PROCEDURAL ] LANGUAGE <replaceable class="PARAMETER">object_name</replaceable> |
SCHEMA <replaceable class="PARAMETER">object_name</replaceable> |
SERVER <replaceable class="PARAMETER">object_name</replaceable> |
TABLE <replaceable class="PARAMETER">object_name</replaceable> |
TEXT SEARCH CONFIGURATION <replaceable class="PARAMETER">object_name</replaceable> |
TYPE <replaceable class="PARAMETER">object_name</replaceable> |
VIEW <replaceable class="PARAMETER">object_name</replaceable>
</synopsis>
<phrase>where <replaceable class="PARAMETER">member_object</replaceable> is:</phrase>
FOREIGN TABLE <replaceable class="PARAMETER">object_name</replaceable> |
FUNCTION <replaceable class="PARAMETER">function_name</replaceable> ( [ [ <replaceable class="parameter">argmode</replaceable> ] [ <replaceable class="parameter">argname</replaceable> ] <replaceable class="parameter">argtype</replaceable> [, ...] ] ) |
[ PROCEDURAL ] LANGUAGE <replaceable class="PARAMETER">object_name</replaceable> |
SCHEMA <replaceable class="PARAMETER">object_name</replaceable> |
SERVER <replaceable class="PARAMETER">object_name</replaceable> |
TABLE <replaceable class="PARAMETER">object_name</replaceable> |
TEXT SEARCH CONFIGURATION <replaceable class="PARAMETER">object_name</replaceable> |
TYPE <replaceable class="PARAMETER">object_name</replaceable> |
VIEW <replaceable class="PARAMETER">object_name</replaceable>
</synopsis>
</refsynopsisdiv>
<refsect1>

View File

@ -1,88 +1,88 @@
<refentry id="SQL-ALTER_FOREIGN_TABLE">
<refmeta>
<refentrytitle>ALTER FOREIGN TABLE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER FOREIGN TABLE</refname>
<refpurpose>change the definition of a foreign table</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
1. GDS:
ALTER FOREIGN TABLE [ IF EXISTS ] table_name
OPTIONS ( {[ ADD | SET | DROP ] option ['value']} [, ... ]);
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
OWNER TO new_owner;
2. HDFS:
ALTER FOREIGN TABLE [ IF EXISTS ] table_name
OPTIONS ( {[ ADD | SET | DROP ] option ['value']} [, ... ]);
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
OWNER TO new_owner;
ALTER FOREIGN TABLE [ IF EXISTS ] table_name
MODIFY ( { column_name data_type | column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ] | column_name [ CONSTRAINT constraint_name ] NULL } [, ...] );
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
ADD [CONSTRAINT constraint_name]
{PRIMARY KEY | UNIQUE} (column_name)
[NOT ENFORCED [ENABLE QUERY OPTIMIZATION | DISABLE QUERY OPTIMIZATION] | ENFORCED];
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
DROP CONSTRAINT constraint_name ;
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
action [, ... ];
where action can be:
ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type
| ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL
| ALTER [ COLUMN ] column_name SET STATISTICS integer
| ALTER [ COLUMN ] column_name OPTIONS ( {[ ADD | SET | DROP ] option ['value'] } [, ... ])
| MODIFY column_name data_type
| MODIFY column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ]
| MODIFY column_name [ CONSTRAINT constraint_name ] NULL
3. OBS:
ALTER FOREIGN TABLE [ IF EXISTS ] table_name
OPTIONS ( {[ ADD | SET | DROP ] option ['value']} [, ... ]);
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
OWNER TO new_owner;
ALTER FOREIGN TABLE [ IF EXISTS ] table_name
MODIFY ( { column_name data_type | column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ] | column_name [ CONSTRAINT constraint_name ] NULL } [, ...] );
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
ADD [CONSTRAINT constraint_name]
{PRIMARY KEY | UNIQUE} (column_name)
[NOT ENFORCED [ENABLE QUERY OPTIMIZATION | DISABLE QUERY OPTIMIZATION] | ENFORCED];
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
DROP CONSTRAINT constraint_name ;
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
action [, ... ];
where action can be:
ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type
| ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL
| ALTER [ COLUMN ] column_name SET STATISTICS integer
| ALTER [ COLUMN ] column_name OPTIONS ( {[ ADD | SET | DROP ] option ['value'] } [, ... ])
| MODIFY column_name data_type
| MODIFY column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ]
| MODIFY column_name [ CONSTRAINT constraint_name ] NULL
4. GC:
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
OPTIONS ( {[ SET ] option ['value']} [, ... ]);
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
OWNER TO new_owner;
ALTER FOREIGN TABLE [ IF EXISTS ] table_name
MODIFY ( { column_name data_type [, ...] );
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
action [, ... ];
where action can be:
ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type
| MODIFY column_name data_type
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-ALTER_FOREIGN_TABLE">
<refmeta>
<refentrytitle>ALTER FOREIGN TABLE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER FOREIGN TABLE</refname>
<refpurpose>change the definition of a foreign table</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
1. GDS:
ALTER FOREIGN TABLE [ IF EXISTS ] table_name
OPTIONS ( {[ ADD | SET | DROP ] option ['value']} [, ... ]);
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
OWNER TO new_owner;
2. HDFS:
ALTER FOREIGN TABLE [ IF EXISTS ] table_name
OPTIONS ( {[ ADD | SET | DROP ] option ['value']} [, ... ]);
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
OWNER TO new_owner;
ALTER FOREIGN TABLE [ IF EXISTS ] table_name
MODIFY ( { column_name data_type | column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ] | column_name [ CONSTRAINT constraint_name ] NULL } [, ...] );
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
ADD [CONSTRAINT constraint_name]
{PRIMARY KEY | UNIQUE} (column_name)
[NOT ENFORCED [ENABLE QUERY OPTIMIZATION | DISABLE QUERY OPTIMIZATION] | ENFORCED];
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
DROP CONSTRAINT constraint_name ;
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
action [, ... ];
where action can be:
ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type
| ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL
| ALTER [ COLUMN ] column_name SET STATISTICS integer
| ALTER [ COLUMN ] column_name OPTIONS ( {[ ADD | SET | DROP ] option ['value'] } [, ... ])
| MODIFY column_name data_type
| MODIFY column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ]
| MODIFY column_name [ CONSTRAINT constraint_name ] NULL
3. OBS:
ALTER FOREIGN TABLE [ IF EXISTS ] table_name
OPTIONS ( {[ ADD | SET | DROP ] option ['value']} [, ... ]);
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
OWNER TO new_owner;
ALTER FOREIGN TABLE [ IF EXISTS ] table_name
MODIFY ( { column_name data_type | column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ] | column_name [ CONSTRAINT constraint_name ] NULL } [, ...] );
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
ADD [CONSTRAINT constraint_name]
{PRIMARY KEY | UNIQUE} (column_name)
[NOT ENFORCED [ENABLE QUERY OPTIMIZATION | DISABLE QUERY OPTIMIZATION] | ENFORCED];
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
DROP CONSTRAINT constraint_name ;
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
action [, ... ];
where action can be:
ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type
| ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL
| ALTER [ COLUMN ] column_name SET STATISTICS integer
| ALTER [ COLUMN ] column_name OPTIONS ( {[ ADD | SET | DROP ] option ['value'] } [, ... ])
| MODIFY column_name data_type
| MODIFY column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ]
| MODIFY column_name [ CONSTRAINT constraint_name ] NULL
4. GC:
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
OPTIONS ( {[ SET ] option ['value']} [, ... ]);
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
OWNER TO new_owner;
ALTER FOREIGN TABLE [ IF EXISTS ] table_name
MODIFY ( { column_name data_type [, ...] );
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
action [, ... ];
where action can be:
ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type
| MODIFY column_name data_type
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,30 +1,30 @@
<refentry id="SQL-ALTER_FOREIGN_TABLE_FOR_HDFS">
<refmeta>
<refentrytitle>ALTER FOREIGN TABLE FOR HDFS</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER FOREIGN TABLE FOR HDFS</refname>
<refpurpose>change the definition of a foreign table</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
OWNER TO new_owner;
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
action [, ... ];
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
ADD [CONSTRAINT constraint_name]
{PRIMARY KEY | UNIQUE} (column_name)
[NOT ENFORCED [ENABLE QUERY OPTIMIZATION | DISABLE QUERY OPTIMIZATION] | ENFORCED];
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
DROP CONSTRAINT constraint_name ;
where action can be:
ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type
| ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL
| ALTER [ COLUMN ] column_name SET STATISTICS [PERCENT] integer
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_FOREIGN_TABLE_FOR_HDFS">
<refmeta>
<refentrytitle>ALTER FOREIGN TABLE FOR HDFS</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER FOREIGN TABLE FOR HDFS</refname>
<refpurpose>change the definition of a foreign table</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
OWNER TO new_owner;
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
action [, ... ];
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
ADD [CONSTRAINT constraint_name]
{PRIMARY KEY | UNIQUE} (column_name)
[NOT ENFORCED [ENABLE QUERY OPTIMIZATION | DISABLE QUERY OPTIMIZATION] | ENFORCED];
ALTER FOREIGN TABLE [ IF EXISTS ] tablename
DROP CONSTRAINT constraint_name ;
where action can be:
ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type
| ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL
| ALTER [ COLUMN ] column_name SET STATISTICS [PERCENT] integer
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,35 +1,35 @@
<refentry id="SQL-ALTER_FUNCTION">
<refmeta>
<refentrytitle>ALTER FUNCTION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER FUNCTION</refname>
<refpurpose>change the definition of a function</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER FUNCTION function_name ( [ {[ argmode ] [ argname ] argtype} [, ...] ] )
action [ ... ] [ RESTRICT ];
ALTER FUNCTION funname ( [ {[ argmode ] [ argname ] argtype} [, ...] ] )
RENAME TO new_name;
ALTER FUNCTION funname ( [ {[ argmode ] [ argname ] argtype} [, ...] ] )
OWNER TO new_owner;
ALTER FUNCTION funname ( [ {[ argmode ] [ argname ] argtype} [, ...] ] )
SET SCHEMA new_schema;
where action can be:
{CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT}
| {IMMUTABLE | STABLE | VOLATILE}
| {NOT FENCED | FENCED}
| [ NOT ] LEAKPROOF
| {[ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER}
| AUTHID { DEFINER | CURRENT_USER }
| COST execution_cost
| ROWS result_rows
| SET configuration_parameter {{ TO | = } { value | DEFAULT }| FROM CURRENT}
| RESET {configuration_parameter| ALL}
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-ALTER_FUNCTION">
<refmeta>
<refentrytitle>ALTER FUNCTION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER FUNCTION</refname>
<refpurpose>change the definition of a function</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER FUNCTION function_name ( [ {[ argmode ] [ argname ] argtype} [, ...] ] )
action [ ... ] [ RESTRICT ];
ALTER FUNCTION funname ( [ {[ argmode ] [ argname ] argtype} [, ...] ] )
RENAME TO new_name;
ALTER FUNCTION funname ( [ {[ argmode ] [ argname ] argtype} [, ...] ] )
OWNER TO new_owner;
ALTER FUNCTION funname ( [ {[ argmode ] [ argname ] argtype} [, ...] ] )
SET SCHEMA new_schema;
where action can be:
{CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT}
| {IMMUTABLE | STABLE | VOLATILE}
| {NOT FENCED | FENCED}
| [ NOT ] LEAKPROOF
| {[ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER}
| AUTHID { DEFINER | CURRENT_USER }
| COST execution_cost
| ROWS result_rows
| SET configuration_parameter {{ TO | = } { value | DEFAULT }| FROM CURRENT}
| RESET {configuration_parameter| ALL}
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,21 +1,21 @@
<refentry id="SQL-ALTER_GROUP">
<refmeta>
<refentrytitle>ALTER GROUP</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER GROUP</refname>
<refpurpose>change role name or membership</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER GROUP group_name
ADD USER user_name [, ... ];
ALTER GROUP group_name
DROP USER user_name [, ... ];
ALTER GROUP group_name
RENAME TO new_name;
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_GROUP">
<refmeta>
<refentrytitle>ALTER GROUP</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER GROUP</refname>
<refpurpose>change role name or membership</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER GROUP group_name
ADD USER user_name [, ... ];
ALTER GROUP group_name
DROP USER user_name [, ... ];
ALTER GROUP group_name
RENAME TO new_name;
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,31 +1,31 @@
<refentry id="SQL-ALTER_INDEX">
<refmeta>
<refentrytitle>ALTER INDEX</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER INDEX</refname>
<refpurpose>change the definition of an index</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER INDEX [ IF EXISTS ] index_name
RENAME TO new_name;
ALTER INDEX [ IF EXISTS ] index_name
SET TABLESPACE tablespace_name;
ALTER INDEX [ IF EXISTS ] index_name
SET ( {storage_parameter = value} [, ... ] );
ALTER INDEX [ IF EXISTS ] index_name
RESET ( storage_parameter [, ... ] ) ;
ALTER INDEX [ IF EXISTS ] index_name
[ MODIFY PARTITION partition_name ] UNUSABLE;
ALTER INDEX index_name
REBUILD [ PARTITION partition_name ];
ALTER INDEX [ IF EXISTS ] index_name
RENAME PARTITION partition_name TO new_partition_name;
ALTER INDEX [ IF EXISTS ] index_name
MOVE PARTITION index_partition_name TABLESPACE new_tablespace;
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-ALTER_INDEX">
<refmeta>
<refentrytitle>ALTER INDEX</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER INDEX</refname>
<refpurpose>change the definition of an index</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER INDEX [ IF EXISTS ] index_name
RENAME TO new_name;
ALTER INDEX [ IF EXISTS ] index_name
SET TABLESPACE tablespace_name;
ALTER INDEX [ IF EXISTS ] index_name
SET ( {storage_parameter = value} [, ... ] );
ALTER INDEX [ IF EXISTS ] index_name
RESET ( storage_parameter [, ... ] ) ;
ALTER INDEX [ IF EXISTS ] index_name
[ MODIFY PARTITION partition_name ] UNUSABLE;
ALTER INDEX index_name
REBUILD [ PARTITION partition_name ];
ALTER INDEX [ IF EXISTS ] index_name
RENAME PARTITION partition_name TO new_partition_name;
ALTER INDEX [ IF EXISTS ] index_name
MOVE PARTITION index_partition_name TABLESPACE new_tablespace;
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,17 +1,17 @@
<refentry id="SQL-ALTER_LARGE_OBJECT">
<refmeta>
<refentrytitle>ALTER LARGE OBJECT</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER LARGE OBJECT</refname>
<refpurpose>change the definition of a large object</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER LARGE OBJECT large_object_oid
OWNER TO new_owner;
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_LARGE_OBJECT">
<refmeta>
<refentrytitle>ALTER LARGE OBJECT</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER LARGE OBJECT</refname>
<refpurpose>change the definition of a large object</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER LARGE OBJECT large_object_oid
OWNER TO new_owner;
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,31 +1,31 @@
<!--
doc/src/sgml/ref/alter_materialized_view.sgml
PostgreSQL documentation
-->
<refentry id="SQL-ALTERMATERIALIZEDVIEW">
<refmeta>
<refentrytitle>ALTER MATERIALIZED VIEW</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER MATERIALIZED VIEW</refname>
<refpurpose>replace the contents of a materialized view</refpurpose>
</refnamediv>
<indexterm zone="sql-altermaterializedview">
<primary>ALTER MATERIALIZED VIEW</primary>
</indexterm>
<refsynopsisdiv>
<synopsis>
ALTER MATERIALIZED VIEW [ IF EXISTS ] mv_name
OWNER TO new_owner;
ALTER MATERIALIZED VIEW [ IF EXISTS ] mv_name
RENAME [COLUMN] column_name to new_column_name;
ALTER MATERIALIZED VIEW [ IF EXISTS ] mv_name
RENAME TO new_name;
</synopsis>
</refentry>
<!--
doc/src/sgml/ref/alter_materialized_view.sgml
PostgreSQL documentation
-->
<refentry id="SQL-ALTERMATERIALIZEDVIEW">
<refmeta>
<refentrytitle>ALTER MATERIALIZED VIEW</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER MATERIALIZED VIEW</refname>
<refpurpose>replace the contents of a materialized view</refpurpose>
</refnamediv>
<indexterm zone="sql-altermaterializedview">
<primary>ALTER MATERIALIZED VIEW</primary>
</indexterm>
<refsynopsisdiv>
<synopsis>
ALTER MATERIALIZED VIEW [ IF EXISTS ] mv_name
OWNER TO new_owner;
ALTER MATERIALIZED VIEW [ IF EXISTS ] mv_name
RENAME [COLUMN] column_name to new_column_name;
ALTER MATERIALIZED VIEW [ IF EXISTS ] mv_name
RENAME TO new_name;
</synopsis>
</refentry>

View File

@ -1,32 +1,32 @@
<refentry id="SQL-ALTER_NODE">
<refmeta>
<refentrytitle>ALTER NODE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER NODE</refname>
<refpurpose>alter a cluster node</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER NODE nodename WITH
(
[ TYPE = nodetype,]
[ HOST = hostname,]
[ PORT = portnum,]
[ HOST1 = 'hostname',]
[ PORT1 = portnum,]
[ HOSTPRIMARY [ = boolean ],]
[ PRIMARY [ = boolean ],]
[ PREFERRED [ = boolean ],]
[ SCTP_PORT = portnum,]
[ CONTROL_PORT = portnum,]
[ SCTP_PORT1 = portnum,]
[ CONTROL_PORT1 = portnum, ]
[ NODEIS_CENTRAL [= boolean], ]
[ NODEIS_ACTIVE [= boolean] ]
);
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_NODE">
<refmeta>
<refentrytitle>ALTER NODE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER NODE</refname>
<refpurpose>alter a cluster node</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER NODE nodename WITH
(
[ TYPE = nodetype,]
[ HOST = hostname,]
[ PORT = portnum,]
[ HOST1 = 'hostname',]
[ PORT1 = portnum,]
[ HOSTPRIMARY [ = boolean ],]
[ PRIMARY [ = boolean ],]
[ PREFERRED [ = boolean ],]
[ SCTP_PORT = portnum,]
[ CONTROL_PORT = portnum,]
[ SCTP_PORT1 = portnum,]
[ CONTROL_PORT1 = portnum, ]
[ NODEIS_CENTRAL [= boolean], ]
[ NODEIS_ACTIVE [= boolean] ]
);
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,17 +1,17 @@
<refentry id="SQL-ALTER_RESOURCE_POOL">
<refmeta>
<refentrytitle>ALTER RESOURCE POOL</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER RESOURCE POOL</refname>
<refpurpose>change the resource pool</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER RESOURCE POOL pool_name
WITH ({MEM_PERCENT=pct | CONTROL_GROUP="group_name" | ACTIVE_STATEMENTS=stmt | MAX_DOP = dop | MEMORY_LIMIT='memory_size' | io_limits=io_limits | io_priority='priority' | nodegroup='nodegroup_name' }[, ... ]);
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-ALTER_RESOURCE_POOL">
<refmeta>
<refentrytitle>ALTER RESOURCE POOL</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER RESOURCE POOL</refname>
<refpurpose>change the resource pool</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER RESOURCE POOL pool_name
WITH ({MEM_PERCENT=pct | CONTROL_GROUP="group_name" | ACTIVE_STATEMENTS=stmt | MAX_DOP = dop | MEMORY_LIMIT='memory_size' | io_limits=io_limits | io_priority='priority' | nodegroup='nodegroup_name' }[, ... ]);
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,51 +1,51 @@
<refentry id="SQL-ALTER_ROLE">
<refmeta>
<refentrytitle>ALTER ROLE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER ROLE</refname>
<refpurpose>change a database role</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER ROLE role_name [ [ WITH ] option [ ... ] ];
ALTER ROLE role_name
RENAME TO new_name;
ALTER ROLE role_name [ IN DATABASE database_name ]
SET configuration_parameter {{ TO | = } { value | DEFAULT }|FROM CURRENT};
ALTER ROLE role_name
[ IN DATABASE database_name ] RESET {configuration_parameter|ALL};
where option can be:
{CREATEDB | NOCREATEDB}
| {CREATEROLE | NOCREATEROLE}
| {INHERIT | NOINHERIT}
| {AUDITADMIN | NOAUDITADMIN}
| {SYSADMIN | NOSYSADMIN}
| {MONADMIN | NOMONADMIN}
| {OPRADMIN | NOOPRADMIN}
| {POLADMIN | NOPOLADMIN}
| {USEFT | NOUSEFT}
| {LOGIN | NOLOGIN}
| {REPLICATION | NOREPLICATION}
| {INDEPENDENT | NOINDEPENDENT}
| {VCADMIN | NOVCADMIN}
| {PERSISTENCE | NOPERSISTENCE}
| CONNECTION LIMIT connlimit
| [ ENCRYPTED | UNENCRYPTED ] PASSWORD { 'password' [ EXPIRED ] | DISABLE | EXPIRED }
| [ ENCRYPTED | UNENCRYPTED ] IDENTIFIED BY { 'password' [ REPLACE 'old_password' | EXPIRED ] | DISABLE }
| VALID BEGIN 'timestamp'
| VALID UNTIL 'timestamp'
| RESOURCE POOL 'respool'
| USER GROUP 'groupuser'
| PERM SPACE 'spacelimit'
| TEMP SPACE 'tmpspacelimit'
| SPILL SPACE 'spillspacelimit'
| NODE GROUP logic_cluster_name
| ACCOUNT { LOCK | UNLOCK }
| PGUSER
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-ALTER_ROLE">
<refmeta>
<refentrytitle>ALTER ROLE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER ROLE</refname>
<refpurpose>change a database role</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER ROLE role_name [ [ WITH ] option [ ... ] ];
ALTER ROLE role_name
RENAME TO new_name;
ALTER ROLE role_name [ IN DATABASE database_name ]
SET configuration_parameter {{ TO | = } { value | DEFAULT }|FROM CURRENT};
ALTER ROLE role_name
[ IN DATABASE database_name ] RESET {configuration_parameter|ALL};
where option can be:
{CREATEDB | NOCREATEDB}
| {CREATEROLE | NOCREATEROLE}
| {INHERIT | NOINHERIT}
| {AUDITADMIN | NOAUDITADMIN}
| {SYSADMIN | NOSYSADMIN}
| {MONADMIN | NOMONADMIN}
| {OPRADMIN | NOOPRADMIN}
| {POLADMIN | NOPOLADMIN}
| {USEFT | NOUSEFT}
| {LOGIN | NOLOGIN}
| {REPLICATION | NOREPLICATION}
| {INDEPENDENT | NOINDEPENDENT}
| {VCADMIN | NOVCADMIN}
| {PERSISTENCE | NOPERSISTENCE}
| CONNECTION LIMIT connlimit
| [ ENCRYPTED | UNENCRYPTED ] PASSWORD { 'password' [ EXPIRED ] | DISABLE | EXPIRED }
| [ ENCRYPTED | UNENCRYPTED ] IDENTIFIED BY { 'password' [ REPLACE 'old_password' | EXPIRED ] | DISABLE }
| VALID BEGIN 'timestamp'
| VALID UNTIL 'timestamp'
| RESOURCE POOL 'respool'
| USER GROUP 'groupuser'
| PERM SPACE 'spacelimit'
| TEMP SPACE 'tmpspacelimit'
| SPILL SPACE 'spillspacelimit'
| NODE GROUP logic_cluster_name
| ACCOUNT { LOCK | UNLOCK }
| PGUSER
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,20 +1,20 @@
<refentry id="SQL-ALTER_SCHEMA">
<refmeta>
<refentrytitle>ALTER SCHEMA</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER SCHEMA</refname>
<refpurpose>change the definition of a schema</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER SCHEMA schema_name
RENAME TO new_name;
ALTER SCHEMA schema_name
OWNER TO new_owner;
ALTER SCHEMA schema_name {WITH | WITHOUT} BLOCKCHAIN;
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_SCHEMA">
<refmeta>
<refentrytitle>ALTER SCHEMA</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER SCHEMA</refname>
<refpurpose>change the definition of a schema</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER SCHEMA schema_name
RENAME TO new_name;
ALTER SCHEMA schema_name
OWNER TO new_owner;
ALTER SCHEMA schema_name {WITH | WITHOUT} BLOCKCHAIN;
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,21 +1,21 @@
<refentry id="SQL-ALTER_SERVER">
<refmeta>
<refentrytitle>ALTER SERVER</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER SERVER</refname>
<refpurpose>change the definition of a foreign server</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER SERVER server_name [ VERSION 'new_version' ]
[ OPTIONS ( {[ ADD | SET | DROP ] option ['value']} [, ... ] ) ];
ALTER SERVER server_name
OWNER TO new_owner;
ALTER SERVER server_name
RENAME TO new_name;
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_SERVER">
<refmeta>
<refentrytitle>ALTER SERVER</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER SERVER</refname>
<refpurpose>change the definition of a foreign server</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER SERVER server_name [ VERSION 'new_version' ]
[ OPTIONS ( {[ ADD | SET | DROP ] option ['value']} [, ... ] ) ];
ALTER SERVER server_name
OWNER TO new_owner;
ALTER SERVER server_name
RENAME TO new_name;
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,27 +1,27 @@
<refentry id="SQL-ALTER_SESSION">
<refmeta>
<refentrytitle>ALTER SESSION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER SESSION</refname>
<refpurpose>define and modify the current session influenced by condition or parameter</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER SESSION SET
{{config_parameter { { TO | = } { value | DEFAULT }
| FROM CURRENT }} | CURRENT_SCHEMA [ TO | = ] { schema | DEFAULT }
| TIME ZONE time_zone
| SCHEMA schema
| NAMES encoding_name
| ROLE role_name PASSWORD 'password'
| SESSION AUTHORIZATION { role_name PASSWORD 'password' | DEFAULT }
| XML OPTION { DOCUMENT | CONTENT }
} ;
ALTER SESSION SET [ SESSION CHARACTERISTICS AS ] TRANSACTION
{ ISOLATION LEVEL { READ COMMITTED | READ UNCOMMITTED } | { READ ONLY | READ WRITE } } [, ...] ;
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_SESSION">
<refmeta>
<refentrytitle>ALTER SESSION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER SESSION</refname>
<refpurpose>define and modify the current session influenced by condition or parameter</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER SESSION SET
{{config_parameter { { TO | = } { value | DEFAULT }
| FROM CURRENT }} | CURRENT_SCHEMA [ TO | = ] { schema | DEFAULT }
| TIME ZONE time_zone
| SCHEMA schema
| NAMES encoding_name
| ROLE role_name PASSWORD 'password'
| SESSION AUTHORIZATION { role_name PASSWORD 'password' | DEFAULT }
| XML OPTION { DOCUMENT | CONTENT }
} ;
ALTER SESSION SET [ SESSION CHARACTERISTICS AS ] TRANSACTION
{ ISOLATION LEVEL { READ COMMITTED | READ UNCOMMITTED } | { READ ONLY | READ WRITE } } [, ...] ;
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,16 +1,16 @@
<refentry id="SQL-ALTER_SYSTEM_KILL_SESSION">
<refmeta>
<refentrytitle>ALTER SYSTEM KILL SESSION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER SYSTEM KILL SESSION</refname>
<refpurpose>kill and finish a system session</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER SYSTEM KILL SESSION 'session_sid, serial' [ IMMEDIATE ];
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_SYSTEM_KILL_SESSION">
<refmeta>
<refentrytitle>ALTER SYSTEM KILL SESSION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER SYSTEM KILL SESSION</refname>
<refpurpose>kill and finish a system session</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER SYSTEM KILL SESSION 'session_sid, serial' [ IMMEDIATE ];
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,16 +1,16 @@
<refentry id="SQL-ALTER_SYSTEM_SET">
<refmeta>
<refentrytitle>ALTER SYSTEM SET</refentrytitle>
<manvolnum>6</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER SYSTEM SET</refname>
<refpurpose>set a high-level(postmaster, sighup and backend) GUC.</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER SYSTEM SET { GUC_name } TO { GUC_value };
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-ALTER_SYSTEM_SET">
<refmeta>
<refentrytitle>ALTER SYSTEM SET</refentrytitle>
<manvolnum>6</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER SYSTEM SET</refname>
<refpurpose>set a high-level(postmaster, sighup and backend) GUC.</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER SYSTEM SET { GUC_name } TO { GUC_value };
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,102 +1,111 @@
<refentry id="SQL-ALTER_TABLE">
<refmeta>
<refentrytitle>ALTER TABLE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER TABLE</refname>
<refpurpose>change the definition of a table</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
action [, ... ];
ALTER TABLE [ IF EXISTS ] table_name
ADD ( { column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]} [, ...] );
ALTER TABLE [ IF EXISTS ] table_name
MODIFY ( { column_name data_type | column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ] | column_name [ CONSTRAINT constraint_name ] NULL } [, ...] );
ALTER TABLE [ IF EXISTS ] table_name
RENAME TO new_table_name;
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
RENAME [ COLUMN ] column_name TO new_column_name;
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
RENAME CONSTRAINT constraint_name TO new_constraint_name;
ALTER TABLE [ IF EXISTS ] table_name
SET SCHEMA new_schema;
where action can be:
column_clause
| ADD table_constraint [ NOT VALID ]
| ADD table_constraint_using_index
| VALIDATE CONSTRAINT constraint_name
| DROP CONSTRAINT [ IF EXISTS ] constraint_name [ RESTRICT | CASCADE ]
| CLUSTER ON index_name
| SET WITHOUT CLUSTER
| SET ( {storage_parameter = value} [, ... ] )
| RESET ( storage_parameter [, ... ] )
| OWNER TO new_owner
| SET TABLESPACE new_tablespace
| SET {COMPRESS|NOCOMPRESS}
| TO { GROUP groupname | NODE ( nodename [, ... ] ) }
| ADD NODE ( nodename [, ... ] )
| DELETE NODE ( nodename [, ... ] )
| UPDATE SLICE LIKE table_name
| DISABLE TRIGGER [ trigger_name | ALL | USER ]
| ENABLE TRIGGER [ trigger_name | ALL | USER ]
| ENABLE REPLICA TRIGGER trigger_name
| ENABLE ALWAYS TRIGGER trigger_name
| ENABLE ROW LEVEL SECURITY
| DISABLE ROW LEVEL SECURITY
| FORCE ROW LEVEL SECURITY
| NO FORCE ROW LEVEL SECURITY
| ENCRYPTION KEY ROTATION
where column_clause can be:
ADD [ COLUMN ] column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]
| MODIFY column_name data_type
| MODIFY column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ]
| MODIFY column_name [ CONSTRAINT constraint_name ] NULL
| DROP [ COLUMN ] [ IF EXISTS ] column_name [ RESTRICT | CASCADE ]
| ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type [ COLLATE collation ] [ USING expression ]
| ALTER [ COLUMN ] column_name { SET DEFAULT expression | DROP DEFAULT }
| ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL
| ALTER [ COLUMN ] column_name SET STATISTICS [PERCENT] integer
| ADD STATISTICS (( column_1_name, column_2_name [, ...] ))
| DELETE STATISTICS (( column_1_name, column_2_name [, ...] ))
| ALTER [ COLUMN ] column_name SET ( {attribute_option = value} [, ... ] )
| ALTER [ COLUMN ] column_name RESET ( attribute_option [, ... ] )
| ALTER [ COLUMN ] column_name SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN }
where column_constraint can be:
[ CONSTRAINT constraint_name ]
{ NOT NULL |
NULL |
CHECK ( expression ) |
DEFAULT default_expr |
GENERATED ALWAYS AS ( generation_expr ) STORED |
UNIQUE index_parameters |
PRIMARY KEY index_parameters |
ENCRYPTED WITH ( COLUMN_ENCRYPTION_KEY = column_encryption_key, ENCRYPTION_TYPE = encryption_type_value ) |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
where compress_mode can be:
{ DELTA | PREFIX | DICTIONARY | NUMSTR | NOCOMPRESS }
where table_constraint can be:
[ CONSTRAINT constraint_name ]
{ CHECK ( expression ) |
UNIQUE ( column_name [, ... ] ) index_parameters |
PRIMARY KEY ( column_name [, ... ] ) index_parameters |
PARTIAL CLUSTER KEY ( column_name [, ... ] ) |
FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ]
[ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
where index_parameters can be:
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ USING INDEX TABLESPACE tablespace_name ]
where table_constraint_using_index can be:
[ CONSTRAINT constraint_name ]
{ UNIQUE | PRIMARY KEY } USING INDEX index_name
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-ALTER_TABLE">
<refmeta>
<refentrytitle>ALTER TABLE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER TABLE</refname>
<refpurpose>change the definition of a table</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
action [, ... ];
ALTER TABLE [ IF EXISTS ] table_name
ADD ( { column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]} [, ...] );
ALTER TABLE [ IF EXISTS ] table_name
MODIFY ( { column_name data_type | column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ] | column_name [ CONSTRAINT constraint_name ] NULL } [, ...] );
ALTER TABLE [ IF EXISTS ] table_name
RENAME TO new_table_name;
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
RENAME [ COLUMN ] column_name TO new_column_name;
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
RENAME CONSTRAINT constraint_name TO new_constraint_name;
ALTER TABLE [ IF EXISTS ] table_name
SET SCHEMA new_schema;
where action can be:
column_clause
| ADD table_constraint [ NOT VALID ]
| ADD table_constraint_using_index
| VALIDATE CONSTRAINT constraint_name
| DROP CONSTRAINT [ IF EXISTS ] constraint_name [ RESTRICT | CASCADE ]
| CLUSTER ON index_name
| SET WITHOUT CLUSTER
| SET ( {storage_parameter = value} [, ... ] )
| RESET ( storage_parameter [, ... ] )
| OWNER TO new_owner
| SET TABLESPACE new_tablespace
| SET {COMPRESS|NOCOMPRESS}
| TO { GROUP groupname | NODE ( nodename [, ... ] ) }
| ADD NODE ( nodename [, ... ] )
| DELETE NODE ( nodename [, ... ] )
| UPDATE SLICE LIKE table_name
| DISABLE TRIGGER [ trigger_name | ALL | USER ]
| ENABLE TRIGGER [ trigger_name | ALL | USER ]
| ENABLE REPLICA TRIGGER trigger_name
| ENABLE ALWAYS TRIGGER trigger_name
| ENABLE ROW LEVEL SECURITY
| DISABLE ROW LEVEL SECURITY
| FORCE ROW LEVEL SECURITY
| NO FORCE ROW LEVEL SECURITY
| ENCRYPTION KEY ROTATION
| AUTO_INCREMENT [ = ] value
where column_clause can be:
ADD [ COLUMN ] column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]
| MODIFY column_name data_type
| MODIFY column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ]
| MODIFY column_name [ CONSTRAINT constraint_name ] NULL
| DROP [ COLUMN ] [ IF EXISTS ] column_name [ RESTRICT | CASCADE ]
| ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type [ COLLATE collation ] [ USING expression ]
| ALTER [ COLUMN ] column_name { SET DEFAULT expression | DROP DEFAULT }
| ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL
| ALTER [ COLUMN ] column_name SET STATISTICS [PERCENT] integer
| ADD STATISTICS (( column_1_name, column_2_name [, ...] ))
| DELETE STATISTICS (( column_1_name, column_2_name [, ...] ))
| ALTER [ COLUMN ] column_name SET ( {attribute_option = value} [, ... ] )
| ALTER [ COLUMN ] column_name RESET ( attribute_option [, ... ] )
| ALTER [ COLUMN ] column_name SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN }
where column_constraint can be:
[ CONSTRAINT constraint_name ]
{ NOT NULL |
NULL |
CHECK ( expression ) |
DEFAULT default_expr |
GENERATED ALWAYS AS ( generation_expr ) STORED |
AUTO_INCREMENT |
UNIQUE index_parameters |
PRIMARY KEY index_parameters |
ENCRYPTED WITH ( COLUMN_ENCRYPTION_KEY = column_encryption_key, ENCRYPTION_TYPE = encryption_type_value ) |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
where compress_mode can be:
{ DELTA | PREFIX | DICTIONARY | NUMSTR | NOCOMPRESS }
where table_constraint can be:
[ CONSTRAINT [ constraint_name ] ]
{ CHECK ( expression ) |
UNIQUE [ idx_name ] [ USING method ] ( { { column_name | ( expression ) } [ ASC | DESC ] } [, ... ] ) index_parameters |
PRIMARY KEY [ USING method ] ( { column_name [ ASC | DESC ] }[, ... ] ) index_parameters |
PARTIAL CLUSTER KEY ( column_name [, ... ] ) |
FOREIGN KEY [ idx_name ] ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ]
[ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
where index_parameters can be:
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ USING INDEX TABLESPACE tablespace_name ]
where table_constraint_using_index can be:
[ CONSTRAINT constraint_name ]
{ UNIQUE | PRIMARY KEY } USING INDEX index_name
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
NOTICE: '[ constraint_name ]' in table_constraint is optional in CENTRALIZED mode and B-format database, it is mandatory in other scenarios.
NOTICE: '[ index_name ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
NOTICE: '[ USING method ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
NOTICE: '[ ASC | DESC ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
NOTICE: '( expression )' in 'UNIQUE' clause of table_constraint is only avaliable in CENTRALIZED mode and B-format database!
NOTICE: 'AUTO_INCREMENT' is only avaliable in CENTRALIZED mode and B-format database!
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,25 +1,25 @@
<refentry id="SQL-ALTER_TABLESPACE">
<refmeta>
<refentrytitle>ALTER TABLESPACE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER TABLESPACE</refname>
<refpurpose>change the definition of a tablespace</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER TABLESPACE tablespace_name
RENAME TO new_tablespace_name;
ALTER TABLESPACE tablespace_name
OWNER TO new_owner;
ALTER TABLESPACE tablespace_name
SET ( {tablespace_option = value} [, ... ] );
ALTER TABLESPACE tablespace_name
RESET ( tablespace_option [, ... ] );
ALTER TABLESPACE tablespace_name
RESIZE MAXSIZE { UNLIMITED | 'space_size' };
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_TABLESPACE">
<refmeta>
<refentrytitle>ALTER TABLESPACE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER TABLESPACE</refname>
<refpurpose>change the definition of a tablespace</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER TABLESPACE tablespace_name
RENAME TO new_tablespace_name;
ALTER TABLESPACE tablespace_name
OWNER TO new_owner;
ALTER TABLESPACE tablespace_name
SET ( {tablespace_option = value} [, ... ] );
ALTER TABLESPACE tablespace_name
RESET ( tablespace_option [, ... ] );
ALTER TABLESPACE tablespace_name
RESIZE MAXSIZE { UNLIMITED | 'space_size' };
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,29 +1,29 @@
<refentry id="SQL-ALTER_TEXT_SEARCH_CONFIGURATION">
<refmeta>
<refentrytitle>ALTER TEXT SEARCH CONFIGURATION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER TEXT SEARCH CONFIGURATION</refname>
<refpurpose>change the definition of a text search configuration</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER TEXT SEARCH CONFIGURATION name
ADD MAPPING FOR token_type [, ... ] WITH dictionary_name [, ... ];
ALTER TEXT SEARCH CONFIGURATION name
ALTER MAPPING FOR token_type [, ... ] WITH dictionary_name [, ... ];
ALTER TEXT SEARCH CONFIGURATION name
ALTER MAPPING FOR token_type [, ... ] REPLACE old_dictionary WITH new_dictionary;
ALTER TEXT SEARCH CONFIGURATION name
ALTER MAPPING REPLACE old_dictionary WITH new_dictionary;
ALTER TEXT SEARCH CONFIGURATION name
DROP MAPPING [ IF EXISTS ] FOR token_type [, ... ];
ALTER TEXT SEARCH CONFIGURATION name RENAME TO new_name;
ALTER TEXT SEARCH CONFIGURATION name OWNER TO new_owner;
ALTER TEXT SEARCH CONFIGURATION name SET SCHEMA new_schema;
ALTER TEXT SEARCH CONFIGURATION name SET ( {configuration_option = value} [, ...] );
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_TEXT_SEARCH_CONFIGURATION">
<refmeta>
<refentrytitle>ALTER TEXT SEARCH CONFIGURATION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER TEXT SEARCH CONFIGURATION</refname>
<refpurpose>change the definition of a text search configuration</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER TEXT SEARCH CONFIGURATION name
ADD MAPPING FOR token_type [, ... ] WITH dictionary_name [, ... ];
ALTER TEXT SEARCH CONFIGURATION name
ALTER MAPPING FOR token_type [, ... ] WITH dictionary_name [, ... ];
ALTER TEXT SEARCH CONFIGURATION name
ALTER MAPPING FOR token_type [, ... ] REPLACE old_dictionary WITH new_dictionary;
ALTER TEXT SEARCH CONFIGURATION name
ALTER MAPPING REPLACE old_dictionary WITH new_dictionary;
ALTER TEXT SEARCH CONFIGURATION name
DROP MAPPING [ IF EXISTS ] FOR token_type [, ... ];
ALTER TEXT SEARCH CONFIGURATION name RENAME TO new_name;
ALTER TEXT SEARCH CONFIGURATION name OWNER TO new_owner;
ALTER TEXT SEARCH CONFIGURATION name SET SCHEMA new_schema;
ALTER TEXT SEARCH CONFIGURATION name SET ( {configuration_option = value} [, ...] );
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,51 +1,51 @@
<refentry id="SQL-ALTER_USER">
<refmeta>
<refentrytitle>ALTER USER</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER USER</refname>
<refpurpose>change a database role</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER USER [ IF EXISTS ] user_name [ [ WITH ] option [ ... ] ];
ALTER USER user_name
RENAME TO new_name;
ALTER USER user_name [ IN DATABASE database_name ]
SET configuration_parameter {{ TO | = } { value | DEFAULT }|FROM CURRENT};
ALTER USER user_name
[ IN DATABASE database_name ] RESET {configuration_parameter|ALL};
where option can be:
{CREATEDB | NOCREATEDB}
| {CREATEROLE | NOCREATEROLE}
| {INHERIT | NOINHERIT}
| {AUDITADMIN | NOAUDITADMIN}
| {SYSADMIN | NOSYSADMIN}
| {MONADMIN | NOMONADMIN}
| {OPRADMIN | NOOPRADMIN}
| {POLADMIN | NOPOLADMIN}
| {USEFT | NOUSEFT}
| {LOGIN | NOLOGIN}
| {REPLICATION | NOREPLICATION}
| {INDEPENDENT | NOINDEPENDENT}
| {VCADMIN | NOVCADMIN}
| {PERSISTENCE | NOPERSISTENCE}
| CONNECTION LIMIT connlimit
| [ ENCRYPTED | UNENCRYPTED ] PASSWORD { 'password' [ EXPIRED ] | DISABLE | EXPIRED }
| [ ENCRYPTED | UNENCRYPTED ] IDENTIFIED BY { 'password' [ REPLACE 'old_password' | EXPIRED ] | DISABLE }
| VALID BEGIN 'timestamp'
| VALID UNTIL 'timestamp'
| RESOURCE POOL 'respool'
| USER GROUP 'groupuser'
| PERM SPACE 'spacelimit'
| TEMP SPACE 'tmpspacelimit'
| SPILL SPACE 'spillspacelimit'
| NODE GROUP logic_cluster_name
| ACCOUNT { LOCK | UNLOCK }
| PGUSER
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-ALTER_USER">
<refmeta>
<refentrytitle>ALTER USER</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER USER</refname>
<refpurpose>change a database role</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER USER [ IF EXISTS ] user_name [ [ WITH ] option [ ... ] ];
ALTER USER user_name
RENAME TO new_name;
ALTER USER user_name [ IN DATABASE database_name ]
SET configuration_parameter {{ TO | = } { value | DEFAULT }|FROM CURRENT};
ALTER USER user_name
[ IN DATABASE database_name ] RESET {configuration_parameter|ALL};
where option can be:
{CREATEDB | NOCREATEDB}
| {CREATEROLE | NOCREATEROLE}
| {INHERIT | NOINHERIT}
| {AUDITADMIN | NOAUDITADMIN}
| {SYSADMIN | NOSYSADMIN}
| {MONADMIN | NOMONADMIN}
| {OPRADMIN | NOOPRADMIN}
| {POLADMIN | NOPOLADMIN}
| {USEFT | NOUSEFT}
| {LOGIN | NOLOGIN}
| {REPLICATION | NOREPLICATION}
| {INDEPENDENT | NOINDEPENDENT}
| {VCADMIN | NOVCADMIN}
| {PERSISTENCE | NOPERSISTENCE}
| CONNECTION LIMIT connlimit
| [ ENCRYPTED | UNENCRYPTED ] PASSWORD { 'password' [ EXPIRED ] | DISABLE | EXPIRED }
| [ ENCRYPTED | UNENCRYPTED ] IDENTIFIED BY { 'password' [ REPLACE 'old_password' | EXPIRED ] | DISABLE }
| VALID BEGIN 'timestamp'
| VALID UNTIL 'timestamp'
| RESOURCE POOL 'respool'
| USER GROUP 'groupuser'
| PERM SPACE 'spacelimit'
| TEMP SPACE 'tmpspacelimit'
| SPILL SPACE 'spillspacelimit'
| NODE GROUP logic_cluster_name
| ACCOUNT { LOCK | UNLOCK }
| PGUSER
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,31 +1,31 @@
<refentry id="SQL-ALTER_VIEW">
<refmeta>
<refentrytitle>ALTER VIEW</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER VIEW</refname>
<refpurpose>change the definition of a view</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER VIEW [ IF EXISTS ] view_name
ALTER [ COLUMN ] column_name SET DEFAULT expression;
ALTER VIEW [ IF EXISTS ] view_name
ALTER [ COLUMN ] column_name DROP DEFAULT;
ALTER VIEW [ IF EXISTS ] view_name
OWNER TO new_owner;
ALTER VIEW [ IF EXISTS ] view_name
RENAME TO new_name;
ALTER VIEW [ IF EXISTS ] view_name
SET SCHEMA new_schema;
ALTER VIEW [ IF EXISTS ] view_name
SET ( {view_option_name [= view_option_value]} [, ... ] );
ALTER VIEW [ IF EXISTS ] view_name
RESET ( view_option_name [, ... ] );
ALTER [DEFINER = user] VIEW view_name [ ( column_name [, ...] ) ]
AS query;
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_VIEW">
<refmeta>
<refentrytitle>ALTER VIEW</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER VIEW</refname>
<refpurpose>change the definition of a view</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER VIEW [ IF EXISTS ] view_name
ALTER [ COLUMN ] column_name SET DEFAULT expression;
ALTER VIEW [ IF EXISTS ] view_name
ALTER [ COLUMN ] column_name DROP DEFAULT;
ALTER VIEW [ IF EXISTS ] view_name
OWNER TO new_owner;
ALTER VIEW [ IF EXISTS ] view_name
RENAME TO new_name;
ALTER VIEW [ IF EXISTS ] view_name
SET SCHEMA new_schema;
ALTER VIEW [ IF EXISTS ] view_name
SET ( {view_option_name [= view_option_value]} [, ... ] );
ALTER VIEW [ IF EXISTS ] view_name
RESET ( view_option_name [, ... ] );
ALTER [DEFINER = user] VIEW view_name [ ( column_name [, ...] ) ]
AS query;
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,17 +1,17 @@
<refentry id="SQL-ALTER_WORKLOAD_GROUP">
<refmeta>
<refentrytitle>ALTER WORKLOAD GROUP</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER WORKLOAD GROUP</refname>
<refpurpose>change the definition of a workload group</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER WORKLOAD GROUP wg_name
USING RESOURCE POOL pool_name [ WITH ( ACT_STATEMENTS = count ) ];
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_WORKLOAD_GROUP">
<refmeta>
<refentrytitle>ALTER WORKLOAD GROUP</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER WORKLOAD GROUP</refname>
<refpurpose>change the definition of a workload group</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER WORKLOAD GROUP wg_name
USING RESOURCE POOL pool_name [ WITH ( ACT_STATEMENTS = count ) ];
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,29 +1,29 @@
<refentry id="SQL-ANALYSE">
<refmeta>
<refentrytitle>ANALYSE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ANALYSE</refname>
<refpurpose>collect statistics about a database</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
{ANALYZE | ANALYSE} [ VERBOSE ]
[ table_name [ ( column_name [, ...] ) ] ];
{ANALYZE | ANALYSE} [ VERBOSE ]
[ table_name [ ( column_name [, ...] ) ] ]
PARTITION patrition_name;
{ANALYZE | ANALYSE} [ VERBOSE ]
{ foreign_table_name | FOREIGN TABLES };
{ANALYZE | ANALYSE} [ VERBOSE ]
table_name (( column_1_name, column_2_name [, ...] ));
{ANALYZE | ANALYSE} VERIFY {FAST|COMPLETE};
{ANALYZE | ANALYSE} VERIFY {FAST|COMPLETE}
table_name|index_name [CASCADE];
{ANALYZE | ANALYSE} VERIFY {FAST|COMPLETE}
table_name PARTITION (patrition_name) [CASCADE];
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-ANALYSE">
<refmeta>
<refentrytitle>ANALYSE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ANALYSE</refname>
<refpurpose>collect statistics about a database</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
{ANALYZE | ANALYSE} [ VERBOSE ]
[ table_name [ ( column_name [, ...] ) ] ];
{ANALYZE | ANALYSE} [ VERBOSE ]
[ table_name [ ( column_name [, ...] ) ] ]
PARTITION patrition_name;
{ANALYZE | ANALYSE} [ VERBOSE ]
{ foreign_table_name | FOREIGN TABLES };
{ANALYZE | ANALYSE} [ VERBOSE ]
table_name (( column_1_name, column_2_name [, ...] ));
{ANALYZE | ANALYSE} VERIFY {FAST|COMPLETE};
{ANALYZE | ANALYSE} VERIFY {FAST|COMPLETE}
table_name|index_name [CASCADE];
{ANALYZE | ANALYSE} VERIFY {FAST|COMPLETE}
table_name PARTITION (patrition_name) [CASCADE];
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,28 +1,28 @@
<refentry id="SQL-ANALYZE">
<refmeta>
<refentrytitle>ANALYZE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ANALYZE</refname>
<refpurpose>collect statistics about a database</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
{ ANALYZE | ANALYSE } [ VERBOSE ]
[ table_name [ ( column_name [, ...] ) ] ];
{ ANALYZE | ANALYSE } [ VERBOSE ]
[ table_name [ ( column_name [, ...] ) ] ]
PARTITION patrition_name;
{ ANALYZE | ANALYSE } [ VERBOSE ]
{ foreign_table_name | FOREIGN TABLES };
{ ANALYZE | ANALYSE } [ VERBOSE ]
table_name (( column_1_name, column_2_name [, ...] ));
{ ANALYZE | ANALYSE } VERIFY { FAST | COMPLETE };
{ ANALYZE | ANALYSE } VERIFY { FAST | COMPLETE } table_name | index_name [ CASCADE ];
{ ANALYZE | ANALYSE } VERIFY { FAST | COMPLETE }
table_name PARTITION (patrition_name) [ CASCADE ];
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-ANALYZE">
<refmeta>
<refentrytitle>ANALYZE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ANALYZE</refname>
<refpurpose>collect statistics about a database</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
{ ANALYZE | ANALYSE } [ VERBOSE ]
[ table_name [ ( column_name [, ...] ) ] ];
{ ANALYZE | ANALYSE } [ VERBOSE ]
[ table_name [ ( column_name [, ...] ) ] ]
PARTITION patrition_name;
{ ANALYZE | ANALYSE } [ VERBOSE ]
{ foreign_table_name | FOREIGN TABLES };
{ ANALYZE | ANALYSE } [ VERBOSE ]
table_name (( column_1_name, column_2_name [, ...] ));
{ ANALYZE | ANALYSE } VERIFY { FAST | COMPLETE };
{ ANALYZE | ANALYSE } VERIFY { FAST | COMPLETE } table_name | index_name [ CASCADE ];
{ ANALYZE | ANALYSE } VERIFY { FAST | COMPLETE }
table_name PARTITION (patrition_name) [ CASCADE ];
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,31 +1,31 @@
<refentry id="SQL-BEGIN">
<refmeta>
<refentrytitle>BEGIN</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>BEGIN</refname>
<refpurpose>1. start a anonymous block. 2. start a transaction.</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
start a anonymous block:
[DECLARE [declare_statements]]
BEGIN
execution_statements
END;
/
start a transaction:
BEGIN [ WORK | TRANSACTION ]
[
{
ISOLATION LEVEL { READ COMMITTED | READ UNCOMMITTED | SERIALIZABLE | REPEATABLE READ }
| { READ WRITE | READ ONLY }
} [, ...]
];
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-BEGIN">
<refmeta>
<refentrytitle>BEGIN</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>BEGIN</refname>
<refpurpose>1. start a anonymous block. 2. start a transaction.</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
start a anonymous block:
[DECLARE [declare_statements]]
BEGIN
execution_statements
END;
/
start a transaction:
BEGIN [ WORK | TRANSACTION ]
[
{
ISOLATION LEVEL { READ COMMITTED | READ UNCOMMITTED | SERIALIZABLE | REPEATABLE READ }
| { READ WRITE | READ ONLY }
} [, ...]
];
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,16 +1,16 @@
<refentry id="SQL-CALL">
<refmeta>
<refentrytitle>CALL</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CALL</refname>
<refpurpose>call a defined function</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CALL [ schema. ] func_name ( param_expr );
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-CALL">
<refmeta>
<refentrytitle>CALL</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CALL</refname>
<refpurpose>call a defined function</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CALL [ schema. ] func_name ( param_expr );
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,19 +1,19 @@
<refentry id="SQL-CLEAN_CONNECTION">
<refmeta>
<refentrytitle>CLEAN CONNECTION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CLEAN CONNECTION</refname>
<refpurpose>clean up pooler connections in a cluster</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CLEAN CONNECTION
TO { COORDINATOR ( nodename [, ... ] ) | NODE ( nodename [, ... ] ) | ALL [ CHECK ] [ FORCE ] }
[ FOR DATABASE dbname ]
[ TO USER username ];
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-CLEAN_CONNECTION">
<refmeta>
<refentrytitle>CLEAN CONNECTION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CLEAN CONNECTION</refname>
<refpurpose>clean up pooler connections in a cluster</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CLEAN CONNECTION
TO { COORDINATOR ( nodename [, ... ] ) | NODE ( nodename [, ... ] ) | ALL [ CHECK ] [ FORCE ] }
[ FOR DATABASE dbname ]
[ TO USER username ];
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,16 +1,16 @@
<refentry id="SQL-CLOSE">
<refmeta>
<refentrytitle>CLOSE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CLOSE</refname>
<refpurpose>close a cursor</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CLOSE { cursor_name | ALL };
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-CLOSE">
<refmeta>
<refentrytitle>CLOSE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CLOSE</refname>
<refpurpose>close a cursor</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CLOSE { cursor_name | ALL };
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,18 +1,18 @@
<refentry id="SQL-CLUSTER">
<refmeta>
<refentrytitle>CLUSTER</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CLUSTER</refname>
<refpurpose>cluster a table according to an index</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CLUSTER [ VERBOSE ] table_name [ USING index_name ];
CLUSTER [ VERBOSE ] table_name PARTITION ( partition_name ) [ USING index_name ];
CLUSTER [ VERBOSE ];
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-CLUSTER">
<refmeta>
<refentrytitle>CLUSTER</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CLUSTER</refname>
<refpurpose>cluster a table according to an index</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CLUSTER [ VERBOSE ] table_name [ USING index_name ];
CLUSTER [ VERBOSE ] table_name PARTITION ( partition_name ) [ USING index_name ];
CLUSTER [ VERBOSE ];
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,49 +1,49 @@
<refentry id="SQL-COMMENT">
<refmeta>
<refentrytitle>COMMENT</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>COMMENT</refname>
<refpurpose>define or change the comment of an object</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
COMMENT ON
{
AGGREGATE agg_name (agg_type [, ...] ) |
CAST (source_type AS target_type) |
COLLATION object_name |
COLUMN { table_name.column_name | view_name.column_name } |
CONSTRAINT constraint_name ON table_name |
CONVERSION object_name |
DATABASE object_name |
DOMAIN object_name |
EXTENSION object_name |
FOREIGN DATA WRAPPER object_name |
FOREIGN TABLE object_name |
FUNCTION function_name ( [ {[ argmode ] [ argname ] argtype} [, ...] ] ) |
INDEX object_name |
LARGE OBJECT large_object_oid |
OPERATOR operator_name (left_type, right_type) |
OPERATOR CLASS object_name USING index_method |
OPERATOR FAMILY object_name USING index_method |
[ PROCEDURAL ] LANGUAGE object_name |
ROLE object_name |
RULE rule_name ON table_name |
SCHEMA object_name |
SERVER object_name |
TABLE object_name |
TABLESPACE object_name |
TEXT SEARCH CONFIGURATION object_name |
TEXT SEARCH DICTIONARY object_name |
TEXT SEARCH PARSER object_name |
TEXT SEARCH TEMPLATE object_name |
TYPE object_name |
VIEW object_name
}
IS 'text';
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-COMMENT">
<refmeta>
<refentrytitle>COMMENT</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>COMMENT</refname>
<refpurpose>define or change the comment of an object</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
COMMENT ON
{
AGGREGATE agg_name (agg_type [, ...] ) |
CAST (source_type AS target_type) |
COLLATION object_name |
COLUMN { table_name.column_name | view_name.column_name } |
CONSTRAINT constraint_name ON table_name |
CONVERSION object_name |
DATABASE object_name |
DOMAIN object_name |
EXTENSION object_name |
FOREIGN DATA WRAPPER object_name |
FOREIGN TABLE object_name |
FUNCTION function_name ( [ {[ argmode ] [ argname ] argtype} [, ...] ] ) |
INDEX object_name |
LARGE OBJECT large_object_oid |
OPERATOR operator_name (left_type, right_type) |
OPERATOR CLASS object_name USING index_method |
OPERATOR FAMILY object_name USING index_method |
[ PROCEDURAL ] LANGUAGE object_name |
ROLE object_name |
RULE rule_name ON table_name |
SCHEMA object_name |
SERVER object_name |
TABLE object_name |
TABLESPACE object_name |
TEXT SEARCH CONFIGURATION object_name |
TEXT SEARCH DICTIONARY object_name |
TEXT SEARCH PARSER object_name |
TEXT SEARCH TEMPLATE object_name |
TYPE object_name |
VIEW object_name
}
IS 'text';
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,16 +1,16 @@
<refentry id="SQL-COMMIT">
<refmeta>
<refentrytitle>COMMIT</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>COMMIT</refname>
<refpurpose>commit the current transaction</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
{ COMMIT | END } [ WORK | TRANSACTION ];
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-COMMIT">
<refmeta>
<refentrytitle>COMMIT</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>COMMIT</refname>
<refpurpose>commit the current transaction</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
{ COMMIT | END } [ WORK | TRANSACTION ];
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,16 +1,16 @@
<refentry id="SQL-COMMIT_PREPARED">
<refmeta>
<refentrytitle>COMMIT PREPARED</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>COMMIT PREPARED</refname>
<refpurpose>commit a transaction that was earlier prepared for two-phase commit</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
COMMIT PREPARED transaction_id;
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-COMMIT_PREPARED">
<refmeta>
<refentrytitle>COMMIT PREPARED</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>COMMIT PREPARED</refname>
<refpurpose>commit a transaction that was earlier prepared for two-phase commit</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
COMMIT PREPARED transaction_id;
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,17 +1,17 @@
<refentry id="SQL-CREATE_APP_WORKLOAD_GROUP_MAPPING">
<refmeta>
<refentrytitle>CREATE APP WORKLOAD GROUP MAPPING</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE APP WORKLOAD GROUP MAPPING</refname>
<refpurpose>create a workload group mapping</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE APP WORKLOAD GROUP MAPPING app_name
[ WITH ( WORKLOAD_GPNAME = workload_gpname ) ];
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-CREATE_APP_WORKLOAD_GROUP_MAPPING">
<refmeta>
<refentrytitle>CREATE APP WORKLOAD GROUP MAPPING</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE APP WORKLOAD GROUP MAPPING</refname>
<refpurpose>create a workload group mapping</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE APP WORKLOAD GROUP MAPPING app_name
[ WITH ( WORKLOAD_GPNAME = workload_gpname ) ];
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,34 +1,34 @@
<refentry id="SQL-CREATE_AUDIT_POLICY">
<refmeta>
<refentrytitle>CREATE AUDIT POLICY</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE AUDIT POLICY</refname>
<refpurpose>define a new audit policy</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE AUDIT POLICY [ IF NOT EXISTS ] policy_name { { privilege_audit_clause | access_audit_clause } [ filter_group_clause ] [ ENABLED | DISABLED ] };
where privilege_audit_clause can be:
PRIVILEGES { DDL | ALL } [ ON LABEL ( resource_label_name [, ... ] ) ]
where access_audit_clause can be:
ACCESS { DML | ALL } [ ON LABEL ( resource_label_name [, ... ] ) ]
where filter_group_clause can be:
FILTER ON { ( FILTER_TYPE ( filter_value [, ... ] ) ) [, ... ] }
where DDL can be:
{ ( ALTER | ANALYZE | COMMENT | CREATE | DROP | GRANT | REVOKE | SET | SHOW | LOGIN_ACCESS | LOGIN_FAILURE | LOGOUT | LOGIN ) }
where DML can be:
{ ( COPY | DEALLOCATE | DELETE_P | EXECUTE | REINDEX | INSERT | PREPARE | SELECT | TRUNCATE | UPDATE ) }
where FILTER_TYPE can be:
{ APP | ROLES | IP }
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-CREATE_AUDIT_POLICY">
<refmeta>
<refentrytitle>CREATE AUDIT POLICY</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE AUDIT POLICY</refname>
<refpurpose>define a new audit policy</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE AUDIT POLICY [ IF NOT EXISTS ] policy_name { { privilege_audit_clause | access_audit_clause } [ filter_group_clause ] [ ENABLED | DISABLED ] };
where privilege_audit_clause can be:
PRIVILEGES { DDL | ALL } [ ON LABEL ( resource_label_name [, ... ] ) ]
where access_audit_clause can be:
ACCESS { DML | ALL } [ ON LABEL ( resource_label_name [, ... ] ) ]
where filter_group_clause can be:
FILTER ON { ( FILTER_TYPE ( filter_value [, ... ] ) ) [, ... ] }
where DDL can be:
{ ( ALTER | ANALYZE | COMMENT | CREATE | DROP | GRANT | REVOKE | SET | SHOW | LOGIN_ACCESS | LOGIN_FAILURE | LOGOUT | LOGIN ) }
where DML can be:
{ ( COPY | DEALLOCATE | DELETE_P | EXECUTE | REINDEX | INSERT | PREPARE | SELECT | TRUNCATE | UPDATE ) }
where FILTER_TYPE can be:
{ APP | ROLES | IP }
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,16 +1,16 @@
<refentry id="SQL-CREATE_BARRIER">
<refmeta>
<refentrytitle>CREATE BARRIER</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE BARRIER</refname>
<refpurpose>create a new barrier</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE BARRIER [ barrier_name ];
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-CREATE_BARRIER">
<refmeta>
<refentrytitle>CREATE BARRIER</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE BARRIER</refname>
<refpurpose>create a new barrier</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE BARRIER [ barrier_name ];
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,17 +1,17 @@
<refentry id="SQL-CREATE_CLIENT_MASTER_KEY">
<refmeta>
<refentrytitle>SQL-CREATE CLIENT MASTER KEY</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE CLIENT MASTER KEY</refname>
<refpurpose>create client master key</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE CLIENT MASTER KEY client_master_key_name
[WITH] ( ['KEY_STORE' , 'KEY_PATH' , 'ALGORITHM'] );
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-CREATE_CLIENT_MASTER_KEY">
<refmeta>
<refentrytitle>SQL-CREATE CLIENT MASTER KEY</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE CLIENT MASTER KEY</refname>
<refpurpose>create client master key</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE CLIENT MASTER KEY client_master_key_name
[WITH] ( ['KEY_STORE' , 'KEY_PATH' , 'ALGORITHM'] );
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,17 +1,17 @@
<refentry id="SQL-CREATE_COLUMN_ENCRYPTION_KEY">
<refmeta>
<refentrytitle>SQL-CREATE COLUMN ENCRYPTION KEY</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE COLUMN ENCRYPTION KEY</refname>
<refpurpose>create column encryption key</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE COLUMN ENCRYPTION KEY column_encryption_key_name
WITH VALUES ( ['CLIENT_MASTER_KEY' , 'ALGORITHM'] );
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-CREATE_COLUMN_ENCRYPTION_KEY">
<refmeta>
<refentrytitle>SQL-CREATE COLUMN ENCRYPTION KEY</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE COLUMN ENCRYPTION KEY</refname>
<refpurpose>create column encryption key</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE COLUMN ENCRYPTION KEY column_encryption_key_name
WITH VALUES ( ['CLIENT_MASTER_KEY' , 'ALGORITHM'] );
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,22 +1,22 @@
<refentry id="SQL-CREATE_DATA_SOURCE">
<refmeta>
<refentrytitle>CREATE DATA SOURCE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE DATA SOURCE</refname>
<refpurpose>define a data source</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE DATA SOURCE src_name
[TYPE 'type_str']
[VERSION {'version_str' | NULL}]
[OPTIONS (optname 'optvalue' [, ...])];
Valid optname are:
DSN, USERNAME, PASSWORD, ENCODING
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-CREATE_DATA_SOURCE">
<refmeta>
<refentrytitle>CREATE DATA SOURCE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE DATA SOURCE</refname>
<refpurpose>define a data source</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE DATA SOURCE src_name
[TYPE 'type_str']
[VERSION {'version_str' | NULL}]
[OPTIONS (optname 'optvalue' [, ...])];
Valid optname are:
DSN, USERNAME, PASSWORD, ENCODING
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,24 +1,24 @@
<refentry id="SQL-CREATE_DATABASE">
<refmeta>
<refentrytitle>CREATE DATABASE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE DATABASE</refname>
<refpurpose>create a new database</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE DATABASE [ IF NOT EXISTS ] database_name
[ [ WITH ] {[ OWNER [=] user_name ]|
[ TEMPLATE [=] template ]|
[ ENCODING [=] encoding ]|
[ LC_COLLATE [=] lc_collate ]|
[ LC_CTYPE [=] lc_ctype ]|
[ DBCOMPATIBILITY [=] compatibility_type ]|
[ TABLESPACE [=] tablespace_name ]|
[ CONNECTION LIMIT [=] connlimit ]}[...] ];
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-CREATE_DATABASE">
<refmeta>
<refentrytitle>CREATE DATABASE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE DATABASE</refname>
<refpurpose>create a new database</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE DATABASE [ IF NOT EXISTS ] database_name
[ [ WITH ] {[ OWNER [=] user_name ]|
[ TEMPLATE [=] template ]|
[ ENCODING [=] encoding ]|
[ LC_COLLATE [=] lc_collate ]|
[ LC_CTYPE [=] lc_ctype ]|
[ DBCOMPATIBILITY [=] compatibility_type ]|
[ TABLESPACE [=] tablespace_name ]|
[ CONNECTION LIMIT [=] connlimit ]}[...] ];
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,17 +1,17 @@
<refentry id="SQL-CREATE_DIRECTORY">
<refmeta>
<refentrytitle>CREATE DIRECTORY</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE DIRECTORY</refname>
<refpurpose>create a new directory</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE [OR REPLACE] DIRECTORY directory_name
AS 'path_name';
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-CREATE_DIRECTORY">
<refmeta>
<refentrytitle>CREATE DIRECTORY</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE DIRECTORY</refname>
<refpurpose>create a new directory</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE [OR REPLACE] DIRECTORY directory_name
AS 'path_name';
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,52 +1,52 @@
<refentry id="SQL-CREATE_FOREIGN_TABLE">
<refmeta>
<refentrytitle>CREATE FOREIGN TABLE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE FOREIGN TABLE</refname>
<refpurpose>define a new foreign table</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name
( { column_name type_name POSITION ( offset, length ) [column_constraint ]
| LIKE source_table | table_constraint } [, ...] )
SEVER gsmpp_server
OPTIONS ( { option_name ' value ' } [, ...] )
[ { WRITE ONLY | READ ONLY }]
[ WITH error_table_name | LOG INTO error_table_name ]
[ REMOTE LOG 'name' ]
[PER NODE REJECT LIMIT 'value']
[ TO { GROUP groupname | NODE ( nodename [, ... ] ) } ];
CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name
( { column_name type_name
[ { [CONSTRAINT constraint_name] NULL |
[CONSTRAINT constraint_name] NOT NULL |
column_constraint [...]} ] |
table_constraint} [, ...] )
SERVER server_name
OPTIONS ( { option_name ' value ' } [, ...] )
DISTRIBUTE BY {ROUNDROBIN | REPLICATION}
[ TO { GROUP groupname | NODE ( nodename [, ... ] ) } ]
[ PARTITION BY ( column_name ) [AUTOMAPPED]] ;
CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name
( [ { column_name type_name | LIKE source_table } [, ...] ] )
SERVER server_name
OPTIONS ( { option_name ' value ' } [, ...] )
[ READ ONLY ]
[ DISTRIBUTE BY {ROUNDROBIN} ]
[ TO { GROUP groupname | NODE ( nodename [, ... ] ) } ];
where column_constraint can be:
[ CONSTRAINT constraint_name ]
{ PRIMARY KEY | UNIQUE }
[ NOT ENFORCED [ ENABLE QUERY OPTIMIZATION | DISABLE QUERY OPTIMIZATION ] | ENFORCED ]
where table_constraint can be:
[ CONSTRAINT constraint_name ]
{ PRIMARY KEY | UNIQUE } ( column_name )
[ NOT ENFORCED [ ENABLE QUERY OPTIMIZATION | DISABLE QUERY OPTIMIZATION ] | ENFORCED ]
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-CREATE_FOREIGN_TABLE">
<refmeta>
<refentrytitle>CREATE FOREIGN TABLE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE FOREIGN TABLE</refname>
<refpurpose>define a new foreign table</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name
( { column_name type_name POSITION ( offset, length ) [column_constraint ]
| LIKE source_table | table_constraint } [, ...] )
SEVER gsmpp_server
OPTIONS ( { option_name ' value ' } [, ...] )
[ { WRITE ONLY | READ ONLY }]
[ WITH error_table_name | LOG INTO error_table_name ]
[ REMOTE LOG 'name' ]
[PER NODE REJECT LIMIT 'value']
[ TO { GROUP groupname | NODE ( nodename [, ... ] ) } ];
CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name
( { column_name type_name
[ { [CONSTRAINT constraint_name] NULL |
[CONSTRAINT constraint_name] NOT NULL |
column_constraint [...]} ] |
table_constraint} [, ...] )
SERVER server_name
OPTIONS ( { option_name ' value ' } [, ...] )
DISTRIBUTE BY {ROUNDROBIN | REPLICATION}
[ TO { GROUP groupname | NODE ( nodename [, ... ] ) } ]
[ PARTITION BY ( column_name ) [AUTOMAPPED]] ;
CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name
( [ { column_name type_name | LIKE source_table } [, ...] ] )
SERVER server_name
OPTIONS ( { option_name ' value ' } [, ...] )
[ READ ONLY ]
[ DISTRIBUTE BY {ROUNDROBIN} ]
[ TO { GROUP groupname | NODE ( nodename [, ... ] ) } ];
where column_constraint can be:
[ CONSTRAINT constraint_name ]
{ PRIMARY KEY | UNIQUE }
[ NOT ENFORCED [ ENABLE QUERY OPTIMIZATION | DISABLE QUERY OPTIMIZATION ] | ENFORCED ]
where table_constraint can be:
[ CONSTRAINT constraint_name ]
{ PRIMARY KEY | UNIQUE } ( column_name )
[ NOT ENFORCED [ ENABLE QUERY OPTIMIZATION | DISABLE QUERY OPTIMIZATION ] | ENFORCED ]
</synopsis>
</refsynopsisdiv>
</refentry>

Some files were not shown because too many files have changed in this diff Show More