sync code

This commit is contained in:
LiHeng 2022-03-04 23:22:16 +08:00
parent d26ec83e7b
commit de223dd152
2618 changed files with 382415 additions and 163216 deletions

4
.gitignore vendored
View File

@ -1,7 +1,7 @@
/.gitee/
/.vscode/
/.idea/
/cmake-build-debug/
*.a
*.o
*.so
@ -21,4 +21,4 @@ objfiles.txt
/config.status
/ereport.txt
/build/script/version.cfg
/build/script/version.cfg

View File

@ -66,6 +66,7 @@ install:
+@echo "openGauss installation complete."
else
ifeq ($(enable_privategauss), yes)
ifneq ($(enable_lite_mode), yes)
install:
$(MAKE) install_mysql_fdw
$(MAKE) install_oracle_fdw
@ -76,6 +77,16 @@ install:
$(MAKE) -C $(root_builddir)/contrib/gsredistribute $@
+@echo "openGauss installation complete."
else
install:
$(MAKE) install_mysql_fdw
$(MAKE) install_oracle_fdw
$(MAKE) install_pldebugger
$(MAKE) -C contrib/postgres_fdw $@
$(MAKE) -C contrib/hstore $@
$(MAKE) -C $(root_builddir)/privategauss/kernel/extension/packages $@
+@echo "openGauss installation complete."
endif
else
install:
$(MAKE) install_mysql_fdw
$(MAKE) install_oracle_fdw
@ -131,6 +142,8 @@ qunitcheck: all
fastcheck_single: all
upgradecheck_single: all
fastcheck_single_comm_proxy: all
redocheck: all

1222
README.md

File diff suppressed because it is too large Load Diff

View File

@ -1,62 +0,0 @@
[overwrite]
CMakeLists.txt
License
GNUmakefile.in
Makefile
README.en.md
README.md
Third_Party_Open_Source_Software_Notice
aclocal.m4
build
build.sh
cmake
config
configure
contrib
doc
docker
escan.txt
package
simpleInstall
src/DEVELOPERS
src/Makefile
src/Makefile.global.in
src/Makefile.shlib
src/bcc32.mak
src/bin
src/common
src/gausskernel
src/get_PlatForm_str.sh
src/include
src/lib
src/makefiles
src/manager
src/mtlocal.pl
src/nls-global.mk
src/test
src/tools
src/win32.mak
Tools/memory_check
[delete]
third_party
contrib/secbox
contrib/carbondata
contrib/gtmtester
src/bin/gds
src/bin/pg_redis
src/include/ssl/openssl_etcd.cnf
src/test/regress/jar
src/test/regress/krbclient
src/test/regress/obstools
src/tools/casedb
build/script/mpp_release_list_centos
build/script/mpp_release_list_centos_aarch64
build/script/mpp_release_list_centos_single
build/script/mpp_release_list_euleros
build/script/mpp_release_list_euleros_aarch64
build/script/mpp_release_list_euleros_aarch64_single
build/script/mpp_release_list_euleros_single
build/script/mpp_release_list_linux_x86_64
build/script/mpp_release_list_openeuler_aarch64
build/script/mpp_release_list_openeuler_aarch64_single
build/script/mpp_release_list_kylin_aarch64

View File

@ -1,116 +0,0 @@
#!/usr/bin/perl
use strict;
use warnings;
use File::Basename;
use File::Path qw(make_path remove_tree);
use Cwd;
my $gausskernel_dir = $ARGV[0];
my $opengauss_dir = $ARGV[1];
sub usage
{
print " usage:\n";
print " perl port_openGauss.pl GaussDBKernel-server-directory openGauss-server-directory\n";
print " \n";
}
sub valid_line
{
my ($l) = @_;
$l =~ s/^\s+//g;
$l =~ s/\s+$//g;
return 1 if $l;
return 0;
}
sub prepare_parentdir
{
my $dir = $_[0];
$dir =~ s/\/*$//g;
die "there is no such a directory $dir" unless $dir;
my $parentdir = dirname($dir);
make_path $parentdir unless -d $parentdir;
}
if ( !$opengauss_dir || !$gausskernel_dir || $gausskernel_dir eq "-h" || $gausskernel_dir eq "--help" ) {
usage();
exit(-1);
}
if (! -d $opengauss_dir || ! -d $gausskernel_dir ) {
print "ERROR: $opengauss_dir or $gausskernel_dir does not exist!";
}
$opengauss_dir =~ s{/+$}{}g;
$gausskernel_dir =~ s{/+$}{}g;
my $open_assist_dir = dirname(__FILE__);
if ($open_assist_dir !~ m/^\//) {
$open_assist_dir = cwd() . '/' . $open_assist_dir;
}
$open_assist_dir =~ s/\/\.$//;
my $opengauss_fileset = "$open_assist_dir/opengauss_fileset";
my @overwrite_fileset;
my @delete_fileset;
open my $fset, "<", $opengauss_fileset or die "cannot open $opengauss_fileset: $!\n";
my $file_type = "none";
while(my $line=<$fset>) {
chomp $line;
if ($line =~ /\[overwrite\]/) {
$file_type = "overwrite";
next;
}
elsif ($line =~ /\[delete\]/) {
$file_type = "delete";
next;
}
if ($file_type eq "overwrite") {
push @overwrite_fileset, $line;
}
elsif ($file_type eq "delete") {
push @delete_fileset, $line;
}
}
print "[" . localtime() . "] synchronizing directories and files.\n";
foreach my $d(qw/src contrib/) {
if ( -d "$opengauss_dir/$d" ) {
remove_tree("$opengauss_dir/$d");
print "removed $opengauss_dir/$d\n";
}
make_path("$opengauss_dir/$d");
print "created $opengauss_dir/$d\n";
}
foreach my $f(@overwrite_fileset) {
next unless valid_line($f);
if ( -d "$gausskernel_dir/$f") {
prepare_parentdir("$opengauss_dir/$f");
remove_tree("$opengauss_dir/$f") if -d "$opengauss_dir/$f";
system("cp -fr $gausskernel_dir/$f $opengauss_dir/$f") == 0 or print "ERROR: copy $gausskernel_dir/$f failed\n";
print "copied $opengauss_dir/$f\n";
}
elsif ( -f "$gausskernel_dir/$f") {
system("cp -f $gausskernel_dir/$f $opengauss_dir/$f") == 0 or print "ERROR: copy $gausskernel_dir/$f failed\n";
print "copied $opengauss_dir/$f\n";
}
}
foreach my $f(@delete_fileset) {
next unless valid_line($f);
if ( -d "$opengauss_dir/$f") {
remove_tree("$opengauss_dir/$f");
print "deleted $opengauss_dir/$f\n";
}
elsif ( -f "$opengauss_dir/$f") {
unlink "$opengauss_dir/$f";
print "deleted $opengauss_dir/$f\n";
}
}
print "[" . localtime() . "] synchronized directories and files.\n";

View File

@ -1,5 +1,12 @@
#!/bin/bash
#######################################################################
# Copyright (c): 2020-2025, Huawei Tech. Co., Ltd.
# descript: Compile and pack openGauss
# Return 0 means OK.
# Return 1 means failed.
# version: 2.0
# date: 2020-08-08
#######################################################################
declare build_version_mode='release'
declare build_binarylib_dir='None'
declare wrap_binaries='NO'
@ -74,10 +81,10 @@ ROOT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
echo "ROOT_DIR : $ROOT_DIR"
cd build/script
chmod a+x build_opengauss.sh
sh build_opengauss.sh -m ${build_version_mode} -3rd ${build_binarylib_dir} ${not_optimized} -pkg server -mc off
./build_opengauss.sh -m ${build_version_mode} -3rd ${build_binarylib_dir} ${not_optimized} -pkg server
if [ "${wrap_binaries}"X = "YES"X ]
then
chmod a+x build_opengauss.sh
sh package_opengauss.sh -3rd ${build_binarylib_dir} -m ${build_version_mode} -f ${config_file}
chmod a+x package_opengauss.sh
./package_opengauss.sh -3rd ${build_binarylib_dir} -m ${build_version_mode} -f ${config_file}
fi
exit 0

View File

@ -2,47 +2,20 @@
./bin/gsql
./bin/gaussdb
./bin/gstrace
./bin/gs_basebackup
./bin/gs_probackup
./bin/gs_tar
./bin/gs_encrypt
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_ctl
./bin/gs_initdb
./bin/gs_guc
./bin/encrypt
./bin/openssl
./bin/gs_restore
./bin/gs_cgroup
./bin/openssl
./bin/pg_config
./bin/pg_controldata
./bin/pg_format_cu
./bin/pg_resetxlog
./bin/pg_recvlogical
./bin/alarmItem.conf
./bin/retry_errcodes.conf
./bin/cluster_guc.conf
./bin/bind_net_irq.sh
./bin/setArmOptimization.sh
./bin/krb5kdc
./bin/klist
./bin/kinit
./bin/kdestroy
./bin/kdb5_util
./bin/kadmin.local
./bin/lz4
./bin/kadmind
./bin/dbmind
./bin/server.key.cipher
./bin/server.key.rand
./bin/gs_plan_simulator.sh
./etc/kerberos/kadm5.acl
./etc/kerberos/kdc.conf
./etc/kerberos/krb5.conf
./etc/kerberos/mppdb-site.xml
./share/postgresql/tmp/udstools.py
./share/postgresql/db4ai
./share/postgresql/snowball_create.sql
./share/postgresql/pg_hba.conf.sample
@ -54,7 +27,6 @@
./share/postgresql/pg_ident.conf.sample
./share/postgresql/postgres.description
./share/postgresql/postgresql.conf.sample
./share/postgresql/mot.conf.sample
./share/postgresql/extension/plpgsql--1.0.sql
./share/postgresql/extension/hstore.control
./share/postgresql/extension/security_plugin.control
@ -72,8 +44,6 @@
./share/postgresql/extension/hdfs_fdw.control
./share/postgresql/extension/log_fdw--1.0.sql
./share/postgresql/extension/log_fdw.control
./share/postgresql/extension/mot_fdw--1.0.sql
./share/postgresql/extension/mot_fdw.control
./share/postgresql/extension/postgres_fdw--1.0.sql
./share/postgresql/extension/postgres_fdw.control
./share/postgresql/timezone/GB-Eire
@ -282,7 +252,6 @@
./share/postgresql/timezone/Canada/Newfoundland
./share/postgresql/timezone/Canada/Saskatchewan
./share/postgresql/timezone/Canada/Pacific
./share/postgresql/timezone/Canada/East-Saskatchewan
./share/postgresql/timezone/Canada/Mountain
./share/postgresql/timezone/Canada/Central
./share/postgresql/timezone/CST6CDT
@ -664,7 +633,6 @@
./share/postgresql/timezone/Navajo
./share/postgresql/timezone/GMT
./share/postgresql/system_views.sql
./share/postgresql/private_system_views.sql
./share/postgresql/performance_views.sql
./share/postgresql/sql_features.txt
./share/postgresql/pg_cast_oid.txt
@ -703,11 +671,45 @@
./share/postgresql/timezonesets/Default
./share/postgresql/timezonesets/Etc.txt
./share/postgresql/postgres.bki
./share/llvmir/GaussDB_expr.ir
./share/sslcert/gsql/openssl.cnf
./share/sslcert/grpc/openssl.cnf
./share/sslcert/om/openssl.cnf
./lib/libsimsearch/
./lib/libnuma.so
./lib/libnuma.so.1
./lib/libnuma.so.1.0.0
./lib/libpq.so
./lib/libpq.so.5
./lib/libpq.so.5.5
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libcgroup.so
./lib/libcgroup.so.1
./lib/libz.so
./lib/libz.so.1
./lib/libz.so.1.2.11
./lib/liblz4.so
./lib/liblz4.so.1
./lib/liblz4.so.1.9.2
./lib/libcjson.so
./lib/libcjson.so.1
./lib/libcjson.so.1.7.13
./lib/libcjson_utils.so
./lib/libcjson_utils.so.1
./lib/libcjson_utils.so.1.7.13
./lib/libstdc++.so.6
./lib/libgcc_s.so.1
./lib/libgomp.so
./lib/libgomp.so.1
./lib/libgomp.so.1.0.0
./lib/libdcf.so
./lib/libzstd.so
./lib/libzstd.so.1
./lib/libzstd.so.1.5.0
./lib/libcurl.so
./lib/libcurl.so.4
./lib/libcurl.so.4.6.0
./lib/libxgboost.so
./lib/postgresql/latin2_and_win1250.so
./lib/postgresql/euc2004_sjis2004.so
./lib/postgresql/euc_kr_and_mic.so
@ -716,12 +718,6 @@
./lib/postgresql/cyrillic_and_mic.so
./lib/postgresql/utf8_and_johab.so
./lib/postgresql/utf8_and_gb18030.so
./lib/postgresql/pgxs/src/makefiles/pgxs.mk
./lib/postgresql/pgxs/src/Makefile.shlib
./lib/postgresql/pgxs/src/Makefile.port
./lib/postgresql/pgxs/src/nls-global.mk
./lib/postgresql/pgxs/src/Makefile.global
./lib/postgresql/pgxs/src/get_PlatForm_str.sh
./lib/postgresql/pgxs/config/install-sh
./lib/postgresql/euc_cn_and_mic.so
./lib/postgresql/latin_and_mic.so
@ -747,142 +743,9 @@
./lib/postgresql/pg_plugin
./lib/postgresql/proc_srclib
./lib/postgresql/security_plugin.so
./lib/postgresql/pg_upgrade_support.so
./lib/postgresql/java/pljava.jar
./lib/postgresql/postgres_fdw.so
./lib/postgresql/pgoutput.so
./lib/libpljava.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
./lib/libpq.so.5.5
./lib/libpq_ce.so
./lib/libpq_ce.so.5
./lib/libpq_ce.so.5.5
./lib/libgauss_cl_jni.so
./lib/libcgroup.so
./lib/libcgroup.so.1
./lib/libcom_err_gauss.so
./lib/libcom_err_gauss.so.3
./lib/libcom_err_gauss.so.3.0
./lib/libatomic.so
./lib/libatomic.so.1
./lib/libatomic.so.1.2.0
./lib/libmasstree.so
./lib/libupb.so
./lib/libupb.so.9
./lib/libupb.so.9.0.0
./lib/libabsl_str_format_internal.so
./lib/libabsl_strings.so
./lib/libabsl_throw_delegate.so
./lib/libabsl_strings_internal.so
./lib/libabsl_base.so
./lib/libabsl_dynamic_annotations.so
./lib/libabsl_spinlock_wait.so
./lib/libabsl_int128.so
./lib/libabsl_bad_optional_access.so
./lib/libabsl_raw_logging_internal.so
./lib/libabsl_log_severity.so
./lib/libaddress_sorting.so
./lib/libaddress_sorting.so.9
./lib/libgssapi_krb5_gauss.so
./lib/libgssapi_krb5_gauss.so.2
./lib/libgssapi_krb5_gauss.so.2.2
./lib/libgssrpc_gauss.so
./lib/libgssrpc_gauss.so.4
./lib/libgssrpc_gauss.so.4.2
./lib/libk5crypto_gauss.so
./lib/libk5crypto_gauss.so.3
./lib/libk5crypto_gauss.so.3.1
./lib/libkadm5clnt.so
./lib/libkadm5clnt_mit.so
./lib/libkadm5clnt_mit.so.11
./lib/libkadm5clnt_mit.so.11.0
./lib/libkadm5clnt_mit.so.12
./lib/libkadm5clnt_mit.so.12.0
./lib/libkadm5srv.so
./lib/libkadm5srv_mit.so
./lib/libkadm5srv_mit.so.11
./lib/libkadm5srv_mit.so.11.0
./lib/libkadm5srv_mit.so.12
./lib/libkadm5srv_mit.so.12.0
./lib/libkdb5.so
./lib/libkdb5.so.9
./lib/libkdb5.so.9.0
./lib/libkdb5.so.10
./lib/libkdb5.so.10.0
./lib/libkrad.so
./lib/libkrad.so.0
./lib/libkrad.so.0.0
./lib/libkrb5_gauss.so
./lib/libkrb5_gauss.so.3
./lib/libkrb5_gauss.so.3.3
./lib/libkrb5support_gauss.so
./lib/libkrb5support_gauss.so.0
./lib/libkrb5support_gauss.so.0.1
./lib/krb5/plugins/kdb/db2.so
./lib/libverto.so
./lib/libverto.so.0
./lib/libverto.so.0.0
./lib/libcurl.so
./lib/libcurl.so.4
./lib/libcurl.so.4.6.0
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libgcc_s.so.1
./lib/libstdc++.so.6
./lib/libz.so
./lib/libz.so.1
./lib/libz.so.1.2.11
./lib/liblz4.so
./lib/liblz4.so.1
./lib/liblz4.so.1.9.2
./lib/libcjson.so
./lib/libcjson.so.1
./lib/libcjson.so.1.7.13
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libpgport_tool.so
./lib/libpgport_tool.so.1
./share/llvmir/GaussDB_expr.ir
./lib/libeSDKLogAPI.so
./lib/libeSDKOBS.so
./lib/liblog4cpp.so
./lib/liblog4cpp.so.5
./lib/liblog4cpp.so.5.0.6
./lib/libcharset.so
./lib/libcharset.so.1
./lib/libcharset.so.1.0.0
./lib/libiconv.so
./lib/libiconv.so.2
./lib/libiconv.so.2.6.1
./lib/libnghttp2.so
./lib/libnghttp2.so.14
./lib/libnghttp2.so.14.20.0
./lib/libpcre.so
./lib/libpcre.so.1
./lib/libpcre.so.1.2.12
./lib/libsecurec.so
./lib/libxml2.so
./lib/libxml2.so.2
./lib/libxml2.so.2.9.9
./lib/libparquet.so
./lib/libparquet.so.14
./lib/libparquet.so.14.1.0
./lib/libarrow.so
./lib/libarrow.so.14
./lib/libarrow.so.14.1.0
./lib/OBS.ini
./lib/postgresql/latin2_and_win1250.so
./lib/postgresql/euc2004_sjis2004.so
./lib/libdcf.so
./lib/libzstd.so
./lib/libzstd.so.1
./lib/libzstd.so.1.4.4
./lib/libxgboost.so
./include/postgresql/server/postgres_ext.h
./include/postgresql/server/pg_config_os.h
./include/postgresql/server/pgtime.h
@ -1012,6 +875,7 @@
./include/postgresql/server/storage/backendid.h
./include/postgresql/server/storage/lock/lock.h
./include/postgresql/server/storage/lock/lwlock.h
./include/postgresql/server/storage/lwlocknames.h
./include/postgresql/server/storage/barrier.h
./include/postgresql/server/storage/shmem.h
./include/postgresql/server/pg_config.h
@ -1035,418 +899,14 @@
./include/postgresql/server/lib/ilist.h
./include/postgresql/server/pgxc/locator.h
./include/postgresql/server/gstrace/gstrace_infra.h
./include/postgresql/server/extension_dependency.h
./include/postgresql/server/libpq/libpq-fe.h
./include/postgresql/server/access/clog.h
./include/postgresql/server/storage/proc.h
./include/postgresql/server/access/xlog.h
./include/postgresql/server/storage/lwlocknames.h
./include/postgresql/server/access/xloginsert.h
./include/postgresql/server/catalog/pg_control.h
./include/postgresql/server/access/parallel_recovery/redo_item.h
./include/postgresql/server/access/parallel_recovery/posix_semaphore.h
./include/postgresql/server/replication/replicainternal.h
./include/postgresql/server/knl/knl_instance.h
./include/postgresql/server/knl/knl_guc.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_sql.h
./include/postgresql/server/knl/knl_guc/knl_guc_common.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_sql.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_storage.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_storage.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_storage.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_security.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_security.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_network.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_network.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_memory.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_memory.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_resource.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_resource.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_common.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_common.h
./include/postgresql/server/lib/circularqueue.h
./include/postgresql/server/access/double_write_basic.h
./include/postgresql/server/knl/knl_thread.h
./include/postgresql/server/access/sdir.h
./include/postgresql/server/gssignal/gs_signal.h
./include/postgresql/server/knl/knl_session.h
./include/postgresql/server/libpq/pqcomm.h
./include/postgresql/server/cipher.h
./include/postgresql/server/portability/instr_time.h
./include/postgresql/server/utils/memgroup.h
./include/postgresql/server/storage/latch.h
./include/postgresql/server/workload/qnode.h
./include/postgresql/server/streaming/init.h
./include/postgresql/server/streaming/launcher.h
./include/postgresql/server/pgxc/barrier.h
./include/postgresql/server/libcomm/libcomm.h
./include/postgresql/server/hotpatch/hotpatch.h
./include/postgresql/server/hotpatch/hotpatch_backend.h
./include/postgresql/server/postmaster/bgwriter.h
./include/postgresql/server/postmaster/pagewriter.h
./include/postgresql/server/replication/heartbeat.h
./include/postgresql/server/access/multi_redo_settings.h
./include/postgresql/server/access/redo_statistic_msg.h
./include/postgresql/server/replication/rto_statistic.h
./include/postgresql/server/replication/walprotocol.h
./include/postgresql/server/storage/mot/jit_def.h
./include/postgresql/server/threadpool/threadpool.h
./include/postgresql/server/threadpool/threadpool_controler.h
./include/postgresql/server/threadpool/threadpool_group.h
./include/postgresql/server/knl/knl_variable.h
./include/postgresql/server/threadpool/threadpool_listener.h
./include/postgresql/server/threadpool/threadpool_sessctl.h
./include/postgresql/server/storage/procsignal.h
./include/postgresql/server/threadpool/threadpool_worker.h
./include/postgresql/server/threadpool/threadpool_scheduler.h
./include/postgresql/server/threadpool/threadpool_stream.h
./include/postgresql/server/replication/dataqueuedefs.h
./include/postgresql/server/gtm/gtm_c.h
./include/postgresql/server/cm/etcdapi.h
./include/postgresql/server/alarm/alarm.h
./include/postgresql/server/access/xact.h
./include/postgresql/server/access/cstore_am.h
./include/postgresql/server/access/cstore_roughcheck_func.h
./include/postgresql/server/access/cstoreskey.h
./include/postgresql/server/storage/cu.h
./include/postgresql/server/vecexecutor/vectorbatch.h
./include/postgresql/server/cstore.h
./include/postgresql/server/storage/cstore/cstore_mem_alloc.h
./include/postgresql/server/access/cstore_minmax_func.h
./include/postgresql/server/storage/custorage.h
./include/postgresql/server/storage/fd.h
./include/postgresql/server/postmaster/aiocompleter.h
./include/postgresql/server/storage/buf/bufmgr.h
./include/postgresql/server/storage/buf/buf_internals.h
./include/postgresql/server/storage/smgr.h
./include/postgresql/server/catalog/pg_am.h
./include/postgresql/server/catalog/pg_class.h
./include/postgresql/server/catalog/pg_index.h
./include/postgresql/server/rewrite/prs2lock.h
./include/postgresql/server/tcop/stmt_retry.h
./include/postgresql/server/catalog/pg_hashbucket_fn.h
./include/postgresql/server/utils/rel_gs.h
./include/postgresql/server/catalog/pg_partition.h
./include/postgresql/server/catalog/pg_hashbucket.h
./include/postgresql/server/catalog/catalog.h
./include/postgresql/server/catalog/catversion.h
./include/postgresql/server/catalog/pg_namespace.h
./include/postgresql/server/utils/partitionmap_gs.h
./include/postgresql/server/access/heapam.h
./include/postgresql/server/storage/pagecompress.h
./include/postgresql/server/replication/bcm.h
./include/postgresql/server/storage/cstore/cstorealloc.h
./include/postgresql/server/storage/cucache_mgr.h
./include/postgresql/server/storage/cache_mgr.h
./include/postgresql/server/nodes/plannodes.h
./include/postgresql/server/foreign/foreign.h
./include/postgresql/server/access/obs/obs_am.h
./include/postgresql/server/storage/buf/buffile.h
./include/postgresql/server/replication/slot.h
./include/postgresql/server/access/obs/eSDKOBS.h
./include/postgresql/server/commands/defrem.h
./include/postgresql/server/optimizer/pruning.h
./include/postgresql/server/nodes/relation.h
./include/postgresql/server/optimizer/bucketinfo.h
./include/postgresql/server/pgxc/nodemgr.h
./include/postgresql/server/bulkload/dist_fdw.h
./include/postgresql/server/bulkload/importerror.h
./include/postgresql/server/commands/gds_stream.h
./include/postgresql/server/bulkload/utils.h
./include/postgresql/server/cjson/cJSON.h
./include/postgresql/server/ssl/gs_openssl_client.h
./include/postgresql/server/funcapi.h
./include/postgresql/server/executor/executor.h
./include/postgresql/server/executor/execdesc.h
./include/postgresql/server/nodes/execnodes.h
./include/postgresql/server/access/genam.h
./include/postgresql/server/nodes/tidbitmap.h
./include/postgresql/server/access/relscan.h
./include/postgresql/server/access/itup.h
./include/postgresql/server/executor/instrument.h
./include/postgresql/server/miscadmin.h
./include/postgresql/server/libpq/libpq-be.h
./include/postgresql/server/libpq/hba.h
./include/postgresql/server/libpq/sha2.h
./include/postgresql/server/utils/anls_opt.h
./include/postgresql/server/pgxc/pgxc.h
./include/postgresql/server/catalog/namespace.h
./include/postgresql/server/commands/trigger.h
./include/postgresql/server/executor/spi.h
./include/postgresql/server/access/ustore/undo/knl_uundotype.h
./include/postgresql/server/access/ustore/knl_uheap.h
./include/postgresql/server/access/ustore/knl_utuple.h
./include/postgresql/server/access/ustore/knl_utype.h
./include/postgresql/server/access/ustore/knl_upage.h
./include/postgresql/server/access/ustore/knl_uredo.h
./include/postgresql/server/access/ustore/knl_uundovec.h
./include/postgresql/server/access/ustore/knl_uundorecord.h
./include/postgresql/server/access/ustore/undo/knl_uundoxlog.h
./include/postgresql/server/access/ustore/undo/knl_uundotxn.h
./include/postgresql/server/access/ustore/undo/knl_uundozone.h
./include/postgresql/server/access/ustore/undo/knl_uundospace.h
./include/postgresql/server/communication/commproxy_basic.h
./include/postgresql/server/access/parallel_recovery/page_redo.h
./include/postgresql/server/access/parallel_recovery/spsc_blocking_queue.h
./include/postgresql/server/executor/exec/execdesc.h
./include/postgresql/server/db4ai/matrix.h
./include/postgresql/server/db4ai/scores.h
./jre/ASSEMBLY_EXCEPTION
./jre/bin/java
./jre/bin/jjs
./jre/bin/keytool
./jre/bin/orbd
./jre/bin/pack200
./jre/bin/policytool
./jre/bin/rmid
./jre/bin/rmiregistry
./jre/bin/servertool
./jre/bin/tnameserv
./jre/bin/unpack200
./jre/lib/amd64/jli/libjli.so
./jre/lib/amd64/jvm.cfg
./jre/lib/amd64/libattach.so
./jre/lib/amd64/libavplugin-ffmpeg-58.so
./jre/lib/amd64/libawt_headless.so
./jre/lib/amd64/libawt.so
./jre/lib/amd64/libawt_xawt.so
./jre/lib/amd64/libdecora_sse.so
./jre/lib/amd64/libdt_socket.so
./jre/lib/amd64/libfontmanager.so
./jre/lib/amd64/libfxplugins.so
./jre/lib/amd64/libglassgtk2.so
./jre/lib/amd64/libglassgtk3.so
./jre/lib/amd64/libglass.so
./jre/lib/amd64/libgstreamer-lite.so
./jre/lib/amd64/libhprof.so
./jre/lib/amd64/libinstrument.so
./jre/lib/amd64/libj2gss.so
./jre/lib/amd64/libj2pcsc.so
./jre/lib/amd64/libj2pkcs11.so
./jre/lib/amd64/libjaas_unix.so
./jre/lib/amd64/libjava_crw_demo.so
./jre/lib/amd64/libjavafx_font_freetype.so
./jre/lib/amd64/libjavafx_font_pango.so
./jre/lib/amd64/libjavafx_font.so
./jre/lib/amd64/libjavafx_iio.so
./jre/lib/amd64/libjava.so
./jre/lib/amd64/libjawt.so
./jre/lib/amd64/libjdwp.so
./jre/lib/amd64/libjfxmedia.so
./jre/lib/amd64/libjfxwebkit.so
./jre/lib/amd64/libjpeg.so
./jre/lib/amd64/libjsdt.so
./jre/lib/amd64/libjsig.so
./jre/lib/amd64/libjsoundalsa.so
./jre/lib/amd64/libjsound.so
./jre/lib/amd64/liblcms.so
./jre/lib/amd64/libmanagement.so
./jre/lib/amd64/libmlib_image.so
./jre/lib/amd64/libnet.so
./jre/lib/amd64/libnio.so
./jre/lib/amd64/libnpt.so
./jre/lib/amd64/libprism_common.so
./jre/lib/amd64/libprism_es2.so
./jre/lib/amd64/libprism_sw.so
./jre/lib/amd64/libsaproc.so
./jre/lib/amd64/libsctp.so
./jre/lib/amd64/libsplashscreen.so
./jre/lib/amd64/libsunec.so
./jre/lib/amd64/libunpack.so
./jre/lib/amd64/libverify.so
./jre/lib/amd64/libzip.so
./jre/lib/amd64/server/libjvm.so
./jre/lib/amd64/server/Xusage.txt
./jre/lib/calendars.properties
./jre/lib/charsets.jar
./jre/lib/classlist
./jre/lib/cmm/CIEXYZ.pf
./jre/lib/cmm/GRAY.pf
./jre/lib/cmm/LINEAR_RGB.pf
./jre/lib/cmm/PYCC.pf
./jre/lib/cmm/sRGB.pf
./jre/lib/content-types.properties
./jre/lib/currency.data
./jre/lib/ext/cldrdata.jar
./jre/lib/ext/dnsns.jar
./jre/lib/ext/jaccess.jar
./jre/lib/ext/jfxrt.jar
./jre/lib/ext/localedata.jar
./jre/lib/ext/meta-index
./jre/lib/ext/nashorn.jar
./jre/lib/ext/sunec.jar
./jre/lib/ext/sunjce_provider.jar
./jre/lib/ext/sunpkcs11.jar
./jre/lib/ext/zipfs.jar
./jre/lib/flavormap.properties
./jre/lib/fontconfig.Euler.properties
./jre/lib/fontconfig.properties
./jre/lib/fontconfig.Ubuntu.properties
./jre/lib/fonts/Roboto-Regular.ttf
./jre/lib/hijrah-config-umalqura.properties
./jre/lib/images/cursors/cursors.properties
./jre/lib/images/cursors/invalid32x32.gif
./jre/lib/images/cursors/motif_CopyDrop32x32.gif
./jre/lib/images/cursors/motif_CopyNoDrop32x32.gif
./jre/lib/images/cursors/motif_LinkDrop32x32.gif
./jre/lib/images/cursors/motif_LinkNoDrop32x32.gif
./jre/lib/images/cursors/motif_MoveDrop32x32.gif
./jre/lib/images/cursors/motif_MoveNoDrop32x32.gif
./jre/lib/javafx-mx.jar
./jre/lib/javafx.properties
./jre/lib/jce.jar
./jre/lib/jexec
./jre/lib/jfr/default.jfc
./jre/lib/jfr.jar
./jre/lib/jfr/profile.jfc
./jre/lib/jfxswt.jar
./jre/lib/jsse.jar
./jre/lib/jvm.hprof.txt
./jre/lib/logging.properties
./jre/lib/management-agent.jar
./jre/lib/management/jmxremote.access
./jre/lib/management/jmxremote.password.template
./jre/lib/management/management.properties
./jre/lib/management/snmp.acl.template
./jre/lib/meta-index
./jre/lib/net.properties
./jre/lib/psfontj2d.properties
./jre/lib/psfont.properties.ja
./jre/lib/resources.jar
./jre/lib/rt.jar
./jre/lib/security/blacklisted.certs
./jre/lib/security/cacerts
./jre/lib/security/java.policy
./jre/lib/security/java.security
./jre/lib/security/policy/limited/local_policy.jar
./jre/lib/security/policy/limited/US_export_policy.jar
./jre/lib/security/policy/unlimited/local_policy.jar
./jre/lib/security/policy/unlimited/US_export_policy.jar
./jre/lib/sound.properties
./jre/lib/tzdb.dat
./jre/LICENSE
./jre/THIRD_PARTY_README
[client]
./bin/gsql
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_restore
./bin/gs_basebackup
./bin/gs_probackup
./lib/postgresql/latin2_and_win1250.so
./lib/postgresql/euc2004_sjis2004.so
./lib/postgresql/euc_kr_and_mic.so
./lib/postgresql/utf8_and_uhc.so
./lib/postgresql/euc_tw_and_big5.so
./lib/postgresql/cyrillic_and_mic.so
./lib/postgresql/utf8_and_johab.so
./lib/postgresql/utf8_and_gb18030.so
./lib/postgresql/pgxs/src/makefiles/pgxs.mk
./lib/postgresql/pgxs/src/Makefile.shlib
./lib/postgresql/pgxs/src/Makefile.port
./lib/postgresql/pgxs/src/nls-global.mk
./lib/postgresql/pgxs/src/Makefile.global
./lib/postgresql/pgxs/config/install-sh
./lib/postgresql/euc_cn_and_mic.so
./lib/postgresql/latin_and_mic.so
./lib/postgresql/utf8_and_sjis2004.so
./lib/postgresql/utf8_and_euc_jp.so
./lib/postgresql/utf8_and_sjis.so
./lib/postgresql/utf8_and_cyrillic.so
./lib/postgresql/utf8_and_euc_kr.so
./lib/postgresql/ascii_and_mic.so
./lib/postgresql/utf8_and_iso8859_1.so
./lib/postgresql/euc_jp_and_sjis.so
./lib/postgresql/dict_snowball.so
./lib/postgresql/utf8_and_ascii.so
./lib/postgresql/utf8_and_euc_tw.so
./lib/postgresql/utf8_and_iso8859.so
./lib/postgresql/utf8_and_win.so
./lib/postgresql/utf8_and_euc_cn.so
./lib/postgresql/utf8_and_gbk.so
./lib/postgresql/utf8_and_euc2004.so
./lib/postgresql/utf8_and_big5.so
./lib/postgresql/java/pljava.jar
./lib/libpljava.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
./lib/libpq.so.5.5
./lib/libpq_ce.so
./lib/libpq_ce.so.5
./lib/libpq_ce.so.5.5
./lib/libgauss_cl_jni.so
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libstdc++.so.6
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libpgport_tool.so
./lib/libpgport_tool.so.1
./lib/libgssapi_krb5_gauss.so
./lib/libgssapi_krb5_gauss.so.2
./lib/libgssapi_krb5_gauss.so.2.2
./lib/libgssrpc_gauss.so
./lib/libgssrpc_gauss.so.4
./lib/libgssrpc_gauss.so.4.2
./lib/libk5crypto_gauss.so
./lib/libk5crypto_gauss.so.3
./lib/libk5crypto_gauss.so.3.1
./lib/libkrb5support_gauss.so
./lib/libkrb5support_gauss.so.0
./lib/libkrb5support_gauss.so.0.1
./lib/libkrb5_gauss.so
./lib/libkrb5_gauss.so.3
./lib/libkrb5_gauss.so.3.3
./lib/libcom_err_gauss.so
./lib/libcom_err_gauss.so.3
./lib/libcom_err_gauss.so.3.0
[libpq]
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
./lib/libpq.so.5.5
./lib/libpq_ce.so
./lib/libpq_ce.so.5
./lib/libpq_ce.so.5.5
./lib/libgauss_cl_jni.so
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libstdc++.so.6
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libpgport_tool.so
./lib/libpgport_tool.so.1
./lib/libgssapi_krb5_gauss.so
./lib/libgssapi_krb5_gauss.so.2
./lib/libgssapi_krb5_gauss.so.2.2
./lib/libgssrpc_gauss.so
./lib/libgssrpc_gauss.so.4
./lib/libgssrpc_gauss.so.4.2
./lib/libk5crypto_gauss.so
./lib/libk5crypto_gauss.so.3
./lib/libk5crypto_gauss.so.3.1
./lib/libkrb5support_gauss.so
./lib/libkrb5support_gauss.so.0
./lib/libkrb5support_gauss.so.0.1
./lib/libkrb5_gauss.so
./lib/libkrb5_gauss.so.3
./lib/libkrb5_gauss.so.3.3
./lib/libcom_err_gauss.so
./lib/libcom_err_gauss.so.3
./lib/libcom_err_gauss.so.3.0
./include/gs_thread.h
./include/gs_threadlocal.h
./include/postgres_ext.h
./include/libpq-fe.h
./include/libpq-events.h
./include/libpq/libpq-fs.h
[version]
V500R002C00
[header]
./include/libpq-fe.h
./include/postgres_ext.h
@ -1455,14 +915,8 @@ V500R002C00
./include/pg_config.h
./include/pg_config_manual.h
./include/pg_config_os.h
./include/cm_config.h
./include/c.h
./include/port.h
./include/cm_msg.h
./include/cm_c.h
./include/cm_misc.h
./include/libpq-int.h
./include/pqcomm.h
./include/pqexpbuffer.h
./include/xlogdefs.h
./include/cm-libpq-fe.h

View File

@ -35,6 +35,7 @@
./bin/lz4
./bin/kadmind
./bin/dbmind
./bin/gs_dbmind
./bin/server.key.cipher
./bin/server.key.rand
./bin/gs_plan_simulator.sh
@ -824,9 +825,7 @@
./lib/libverto.so
./lib/libverto.so.0
./lib/libverto.so.0.0
./lib/libcurl.so
./lib/libcurl.so.4
./lib/libcurl.so.4.6.0
./lib/libcurl.so*
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libssl.so
@ -839,9 +838,7 @@
./lib/liblz4.so
./lib/liblz4.so.1
./lib/liblz4.so.1.9.2
./lib/libcjson.so
./lib/libcjson.so.1
./lib/libcjson.so.1.7.13
./lib/libcjson.so*
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libpgport_tool.so
@ -849,25 +846,13 @@
./share/llvmir/GaussDB_expr.ir
./lib/libeSDKLogAPI.so
./lib/libeSDKOBS.so
./lib/liblog4cpp.so
./lib/liblog4cpp.so.5
./lib/liblog4cpp.so.5.0.6
./lib/libcharset.so
./lib/libcharset.so.1
./lib/libcharset.so.1.0.0
./lib/libiconv.so
./lib/libiconv.so.2
./lib/libiconv.so.2.6.1
./lib/libnghttp2.so
./lib/libnghttp2.so.14
./lib/libnghttp2.so.14.20.0
./lib/libpcre.so
./lib/libpcre.so.1
./lib/libpcre.so.1.2.12
./lib/liblog4cpp.so*
./lib/libcharset.so*
./lib/libiconv.so*
./lib/libnghttp2.so*
./lib/libpcre.so*
./lib/libsecurec.so
./lib/libxml2.so
./lib/libxml2.so.2
./lib/libxml2.so.2.9.9
./lib/libxml2.so*
./lib/libparquet.so
./lib/libparquet.so.14
./lib/libparquet.so.14.1.0
@ -880,7 +865,8 @@
./lib/libdcf.so
./lib/libzstd.so
./lib/libzstd.so.1
./lib/libzstd.so.1.4.4
./lib/libzstd.so.1.5.0
./lib/libxgboost.so
./include/postgresql/server/postgres_ext.h
./include/postgresql/server/pg_config_os.h
@ -1011,6 +997,7 @@
./include/postgresql/server/storage/backendid.h
./include/postgresql/server/storage/lock/lock.h
./include/postgresql/server/storage/lock/lwlock.h
./include/postgresql/server/storage/lwlocknames.h
./include/postgresql/server/storage/barrier.h
./include/postgresql/server/storage/shmem.h
./include/postgresql/server/pg_config.h
@ -1444,8 +1431,6 @@
./include/libpq-fe.h
./include/libpq-events.h
./include/libpq/libpq-fs.h
[version]
V500R002C00
[header]
./include/libpq-fe.h
./include/postgres_ext.h
@ -1457,9 +1442,6 @@ V500R002C00
./include/cm_config.h
./include/c.h
./include/port.h
./include/cm_msg.h
./include/cm_c.h
./include/cm_misc.h
./include/libpq-int.h
./include/pqcomm.h
./include/pqexpbuffer.h

View File

@ -12,33 +12,158 @@
# Example: ./build_opengauss.sh -3rd /path/to/your/third_party_binarylibs/
# change it to "N", if you want to build with original build system based on solely Makefiles
CMAKE_PKG="N"
declare CMAKE_PKG="Y"
declare SCRIPT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd)
declare ROOT_DIR=$(dirname "${SCRIPT_DIR}")
declare ROOT_DIR=$(dirname "${ROOT_DIR}")
declare package_type='server'
declare product_mode='opengauss'
declare version_mode='release'
declare binarylib_dir='None'
declare make_check='off'
declare separate_symbol='on'
#(0) pre-check
if [ ! -f opengauss.spec ] || [ ! -f package_internal.sh ]; then
echo "ERROR: there is no opengauss.spec/mpp_package.sh"
function print_help()
{
echo "Usage: $0 [OPTION]
-h|--help show help information.
-V|--version show version information.
-3rd|--binarylib_dir the directory of third party binarylibs.
-pkg|--package provode type of installation packages, values parameter is server.
-m|--version_mode this values of paramenter is debug, release, memcheck, the default value is release.
-pm product mode, values parameter is opengauss.
-mc|--make_check this values of paramenter is on or off, the default value is on.
-s|--symbol_mode whether separate symbol in debug mode, the default value is on.
-co|--cmake_opt more cmake options
"
}
function print_version()
{
echo $(cat ${SCRIPT_DIR}/gaussdb.ver | grep 'VERSION' | awk -F "=" '{print $2}')
}
if [ $# = 0 ] ; then
echo "missing option"
print_help
exit 1
fi
#(1) prepare
cp opengauss.spec gauss.spec
#########################################################################
##read command line paramenters
#######################################################################
while [ $# -gt 0 ]; do
case "$1" in
-h|--help)
print_help
exit 1
;;
-V|--version)
print_version
exit 1
;;
-3rd|--binarylib_dir)
if [ "$2"X = X ]; then
echo "no given binarylib directory values"
exit 1
fi
binarylib_dir=$2
shift 2
;;
-pkg)
if [ "$2"X = X ]; then
echo "no given package type name"
exit 1
fi
package_type=$2
shift 2
;;
-m|--version_mode)
if [ "$2"X = X ]; then
echo "no given version number values"
exit 1
fi
version_mode=$2
shift 2
;;
-pm)
if [ "$2"X = X ]; then
echo "no given product mode"
exit 1
fi
product_mode=$2
shift 2
;;
-mc|--make_check)
if [ "$2"X = X ]; then
echo "no given make check values"
exit 1
fi
make_check=$2
shift 2
;;
-s|--symbol_mode)
if [ "$2"X = X ]; then
echo "no given symbol parameter"
exit 1
fi
separate_symbol=$2
shift 2
;;
--cmake_opt)
if [ "$2"X = X ]; then
echo "no extra configure options provided"
exit 1
fi
extra_cmake_opt=$2
shift 2
;;
--config_opt)
if [ "$2"X = X ]; then
echo "no extra configure options provided"
exit 1
fi
extra_config_opt=$2
shift 2
;;
*)
echo "Internal Error: option processing error: $1" 1>&2
echo "please input right paramtenter, the following command may help you"
echo "${0} --help or ${0} -h"
exit 1
esac
done
#(2) invoke package_internal.sh
if [ "$CMAKE_PKG" == "N" ]; then
chmod a+x package_internal.sh
echo "package_internal.sh $@ -nopkg -pm opengauss"
./package_internal.sh $@ -nopkg -pm opengauss
if [ $? != "0" ]; then
echo "failed in build opengauss"
fi
if [ -e "$SCRIPT_DIR/utils/common.sh" ];then
source $SCRIPT_DIR/utils/common.sh
else
chmod a+x cmake_package_internal.sh
echo "cmake_package_internal.sh $@ -nopkg -pm opengauss"
./cmake_package_internal.sh $@ -nopkg -pm opengauss
if [ $? != "0" ]; then
echo "failed in build opengauss"
fi
exit 1
fi
#(3) remove files which are not necessary
BUILD_DIR="${ROOT_DIR}/mppdb_temp_install"
#(1) invoke package_internal.sh
if [ "$CMAKE_PKG" == "N" ]; then
declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install"
source $SCRIPT_DIR/utils/make_compile.sh || exit 1
else
echo "begin config cmake options:" >> "$LOG_FILE" 2>&1
declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install"
declare CMAKE_BUILD_DIR=${ROOT_DIR}/tmp_build
declare CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_MOT=ON ${extra_cmake_opt}"
echo "[cmake options] cmake options is:${CMAKE_OPT}" >> "$LOG_FILE" 2>&1
source $SCRIPT_DIR/utils/cmake_compile.sh || exit 1
fi
function main()
{
echo "[makegaussdb] $(date +%y-%m-%d' '%T): script dir : ${SCRIPT_DIR}"
echo "[makegaussdb] $(date +%y-%m-%d' '%T): Work root dir : ${ROOT_DIR}"
read_gaussdb_version
read_gaussdb_number
gaussdb_pkg_pre_clean
gaussdb_build
}
main
echo "now, all build has finished!"
exit 0

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,811 @@
#!/bin/bash
#######################################################################
# Copyright (c): 2020-2021, Huawei Tech. Co., Ltd.
# descript: Compile and pack MPPDB
# Return 0 means OK.
# Return 1 means failed.
# version: 2.0
# date: 2021-12-12
#######################################################################
##default package type is server
declare package_type='server'
declare install_package_format='tar'
##default version mode is relase
declare version_mode='release'
declare binarylib_dir='None'
declare separate_symbol='on'
#detect platform information.
PLATFORM=32
bit=$(getconf LONG_BIT)
if [ "$bit" -eq 64 ]; then
PLATFORM=64
fi
#get OS distributed version.
kernel=""
version=""
ext_version=""
if [ -f "/etc/euleros-release" ]; then
kernel=$(cat /etc/euleros-release | awk -F ' ' '{print $1}' | tr A-Z a-z)
version=$(cat /etc/euleros-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z)
ext_version=$version
elif [ -f "/etc/openEuler-release" ]; then
kernel=$(cat /etc/openEuler-release | awk -F ' ' '{print $1}' | tr A-Z a-z)
version=$(cat /etc/openEuler-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z)
elif [ -f "/etc/centos-release" ]; then
kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr A-Z a-z)
version=$(cat /etc/centos-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z)
else
kernel=$(lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z)
version=$(lsb_release -r | awk -F ' ' '{print $2}')
fi
if [ X"$kernel" == X"euleros" ]; then
dist_version="EULER"
elif [ X"$kernel" == X"centos" ]; then
dist_version="CENTOS"
elif [ X"$kernel" == X"openeuler" ]; then
dist_version="OPENEULER"
else
echo "Only support EulerOS platform."
echo "Kernel is $kernel"
exit 1
fi
show_package=false
gcc_version="7.3.0"
##add platform architecture information
cpus_num=$(grep -w processor /proc/cpuinfo|wc -l)
PLATFORM_ARCH=$(uname -p)
if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then
ARCHITECTURE_EXTRA_FLAG=_euleros2.0_${ext_version}_$PLATFORM_ARCH
release_file_list="opengauss_release_list_${kernel}_${PLATFORM_ARCH}_mini_single"
else
ARCHITECTURE_EXTRA_FLAG=_euleros2.0_sp5_${PLATFORM_ARCH}
release_file_list="opengauss_release_list_${kernel}_mini_single"
fi
##default install version storage path
declare mppdb_version='GaussDB Kernel'
declare mppdb_name_for_package="$(echo ${mppdb_version} | sed 's/ /-/g')"
declare package_path='./'
declare version_number=''
declare make_check='off'
declare zip_package='on'
declare extra_config_opt=''
#######################################################################
##putout the version of mppdb
#######################################################################
function print_version()
{
echo "$version_number"
}
#######################################################################
## print help information
#######################################################################
function print_help()
{
echo "Usage: $0 [OPTION]
-h|--help show help information.
-V|--version show version information.
-f|--file provide the file list released.
-3rd|--binarylib_dir the directory of third party binarylibs.
-pkg|--package provode type of installation packages, values parameter is all, server, jdbc, odbc, agent.
-pm product mode, values parameter is single, multiple or opengauss, default value is multiple.
-p|--path generation package storage path.
-t packaging format, values parameter is tar or rpm, the default value is tar.
-m|--version_mode this values of paramenter is debug, release, memcheck, the default value is release.
-mc|--make_check this values of paramenter is on or off, the default value is on.
-s|--symbol_mode whether separate symbol in debug mode, the default value is on.
-cv|--gcc_version gcc-version option: 7.3.0.
-nopkg|--no_package don't zip binaries into packages
-co|--config_opt more config options
-S|--show_pkg show server package name and Bin name base on current configuration.
"
}
if [ $# = 0 ] ; then
echo "missing option"
print_help
exit 1
fi
SCRIPT_PATH=${0}
FIRST_CHAR=$(expr substr "$SCRIPT_PATH" 1 1)
if [ "$FIRST_CHAR" = "/" ]; then
SCRIPT_PATH=${0}
else
SCRIPT_PATH="$(pwd)/${SCRIPT_PATH}"
fi
SCRIPT_NAME=$(basename $SCRIPT_PATH)
SCRIPT_DIR=$(dirname "${SCRIPT_PATH}")
SCRIPT_DIR=$(dirname "$SCRIPT_DIR")
if [ ! -f "$SCRIPT_DIR/$SCRIPT_NAME" ] ; then
SCRIPT_DIR=$SCRIPT_DIR/script
fi
package_path=$SCRIPT_DIR
#######################################################################
##read version from $release_file_list
#######################################################################
function read_mpp_version()
{
cd $SCRIPT_DIR
local head=$(cat $release_file_list | grep "\[version\]" -n | awk -F: '{print $1}')
if [ ! -n "$head" ]; then
echo "error: no find version in the $release_file_list file "
exit 1
fi
local tail=$(cat $release_file_list | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}')
if [ ! -n "$tail" ]; then
local all=$(cat $release_file_list | wc -l)
let tail=$all+1-$head
fi
version_number=$(cat $release_file_list | awk "NR==$head+1,NR==$tail+$head-1")
echo "${mppdb_name_for_package}-${version_number}">version.cfg
#auto read the number from kernal globals.cpp, no need to change it here
}
#########################################################################
##read command line paramenters
#######################################################################
while [ $# -gt 0 ]; do
case "$1" in
-h|--help)
print_help
exit 1
;;
-V|--version)
print_version
exit 1
;;
-f|--file)
if [ "$2"X = X ]; then
echo "no given file name"
exit 1
fi
release_file_list=$2
shift 2
;;
-3rd|--binarylib_dir)
if [ "$2"X = X ]; then
echo "no given binarylib directory values"
exit 1
fi
binarylib_dir=$2
shift 2
;;
-p|--path)
if [ "$2"X = X ]; then
echo "no given generration package path"
exit 1
fi
package_path=$2
if [ ! -d "$package_path" ]; then
mkdir -p $package_path
fi
shift 2
;;
-pkg)
if [ "$2"X = X ]; then
echo "no given package type name"
exit 1
fi
package_type=$2
shift 2
;;
-s|--symbol_mode)
if [ "$2"X = X ]; then
echo "no given symbol parameter"
exit 1
fi
separate_symbol=$2
shift 2
;;
-t)
if [ "$2"X = X ]; then
echo "no given installation package format values"
exit 1
fi
if [ "$2" = rpm ]; then
echo "error: do not suport rpm package now!"
exit 1
fi
install_package_format=$2
shift 1
;;
-m|--version_mode)
if [ "$2"X = X ]; then
echo "no given version number values"
exit 1
fi
version_mode=$2
shift 2
;;
-mc|--make_check)
if [ "$2"X = X ]; then
echo "no given make check values"
exit 1
fi
make_check=$2
shift 2
;;
-cv|--gcc_version)
if [ "$2"X = X ]; then
echo "no given gcc version"
exit 1
fi
gcc_version=$2
shift 2
;;
-nopkg|--no_package)
zip_package='off'
shift 1
;;
-co|--config_opt)
if [ "$2"X = X ]; then
echo "no extra configure options provided"
exit 1
fi
extra_config_opt=$2
shift 2
;;
-S|--show_pkg)
show_package=true
shift
;;
*)
echo "Internal Error: option processing error: $1" 1>&2
echo "please input right paramtenter, the following command may help you"
echo "./cmake_package_internal.sh --help or ./cmake_package_internal.sh -h"
exit 1
esac
done
read_mpp_version
if [ "$gcc_version" = "7.3.0" ]; then
gcc_version=${gcc_version:0:3}
else
echo "Unknown gcc version $gcc_version"
exit 1
fi
#######################################################################
## declare all package name
#######################################################################
declare version_string="${mppdb_name_for_package}-${version_number}"
declare package_pre_name="${version_string}-${dist_version}-${PLATFORM}bit"
declare server_package_name="${package_pre_name}.${install_package_format}.gz"
declare libpq_package_name="${package_pre_name}-Libpq.${install_package_format}.gz"
declare symbol_package_name="${package_pre_name}-symbol.${install_package_format}.gz"
echo "[makemppdb] $(date +%y-%m-%d' '%T): script dir : ${SCRIPT_DIR}"
ROOT_DIR=$(dirname "$SCRIPT_DIR")
ROOT_DIR=$(dirname "$ROOT_DIR")
PLAT_FORM_STR=$(sh "${ROOT_DIR}/src/get_PlatForm_str.sh")
if [ "${PLAT_FORM_STR}"x == "Failed"x ]
then
echo "Only support EulerOS openEuler platform."
exit 1
fi
CMAKE_BUILD_DIR=${ROOT_DIR}/tmp_build
declare LOG_FILE="${ROOT_DIR}/build/script/makemppdb_pkg.log"
declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install"
declare ERR_MKGS_FAILED=1
declare MKGS_OK=0
if [ "${binarylib_dir}" != 'None' ] && [ -d "${binarylib_dir}" ]; then
BUILD_TOOLS_PATH="${binarylib_dir}/buildtools/${PLAT_FORM_STR}"
PLATFORM_PATH="${binarylib_dir}/platform/${PLAT_FORM_STR}"
BINARYLIBS_PATH="${binarylib_dir}/dependency"
else
BUILD_TOOLS_PATH="${ROOT_DIR}/buildtools/${PLAT_FORM_STR}"
PLATFORM_PATH="${ROOT_DIR}/platform/${PLAT_FORM_STR}"
BINARYLIBS_PATH="${ROOT_DIR}/binarylibs"
fi
declare UPGRADE_SQL_DIR="${ROOT_DIR}/src/include/catalog/upgrade_sql"
export CC="$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/gcc"
export CXX="$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/g++"
export LD_LIBRARY_PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/lib64:$BUILD_TOOLS_PATH/gcc$gcc_version/isl/lib:$BUILD_TOOLS_PATH/gcc$gcc_version/mpc/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/mpfr/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/gmp/lib/:$LD_LIBRARY_PATH
export PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin:$PATH
jdkpath=${binarylib_dir}/platform/huaweijdk8/${PLATFORM_ARCH}/jdk
if [ ! -d "${jdkpath}" ]; then
jdkpath=${binarylib_dir}/platform/openjdk8/${PLATFORM_ARCH}/jdk
fi
export JAVA_HOME=${jdkpath}
declare p7zpath="${BUILD_TOOLS_PATH}/p7z/bin"
###################################
# build parameter about enable-llt
##################################
echo "[makemppdb] $(date +%y-%m-%d' '%T): Work root dir : ${ROOT_DIR}"
###################################
# get version number from globals.cpp
##################################
function read_mpp_number()
{
global_kernal="${ROOT_DIR}/src/common/backend/utils/init/globals.cpp"
version_name="GRAND_VERSION_NUM"
version_num=""
line=$(cat $global_kernal | grep ^const* | grep $version_name)
version_num1=${line#*=}
#remove the symbol;
version_num=$(echo $version_num1 | tr -d ";")
#remove the blank
version_num=$(echo $version_num)
if echo $version_num | grep -qE '^92[0-9]+$'
then
# get the last three number
latter=${version_num:2}
echo "92.${latter}" >>${SCRIPT_DIR}/version.cfg
else
echo "Cannot get the version number from globals.cpp."
exit 1
fi
}
read_mpp_number
#######################################################################
# Print log.
#######################################################################
log()
{
echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@"
echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@" >> "$LOG_FILE" 2>&1
}
#######################################################################
# print log and exit.
#######################################################################
die()
{
log "$@"
echo "$@"
exit $ERR_MKGS_FAILED
}
#######################################################################
## Check the installation package production environment
#######################################################################
function mpp_pkg_pre_check()
{
if [ -d "$BUILD_DIR" ]; then
rm -rf $BUILD_DIR
fi
if [ -d "$LOG_FILE" ]; then
rm -rf $LOG_FILE
fi
}
#######################################################################
# Install all SQL files from distribute/include/catalog/upgrade_sql
# to INSTALL_DIR/bin/script/upgrade_sql.
# Package all SQL files and then verify them with SHA256.
#######################################################################
function package_upgrade_sql()
{
echo "Begin to install upgrade_sql files..."
UPGRADE_SQL_TAR="upgrade_sql.tar.gz"
UPGRADE_SQL_SHA256="upgrade_sql.sha256"
MULTIP_IGNORE_VERSION=(289 294 296)
cp -r "${UPGRADE_SQL_DIR}" .
[ $? -ne 0 ] && die "Failed to cp upgrade_sql files"
tar -czf ${UPGRADE_SQL_TAR} upgrade_sql
[ $? -ne 0 ] && die "Failed to package ${UPGRADE_SQL_TAR}"
rm -rf ./upgrade_sql > /dev/null 2>&1
sha256sum ${UPGRADE_SQL_TAR} | awk -F" " '{print $1}' > "${UPGRADE_SQL_SHA256}"
[ $? -ne 0 ] && die "Failed to generate sha256 sum file for ${UPGRADE_SQL_TAR}"
chmod 600 ${UPGRADE_SQL_TAR}
chmod 600 ${UPGRADE_SQL_SHA256}
echo "Successfully packaged upgrade_sql files."
}
#######################################################################
##install gaussdb database and others
##select to install something according to variables package_type need
#######################################################################
function mpp_pkg_bld()
{
install_gaussdb
}
#######################################################################
##install gaussdb database contained server,client and libpq
#######################################################################
function install_gaussdb()
{
# Generate the license control file, and set md5sum string to the code.
echo "Modify gaussdb_version.cpp file." >> "$LOG_FILE" 2>&1
echo "Modify gaussdb_version.cpp file success." >> "$LOG_FILE" 2>&1
cd "$ROOT_DIR/"
if [ $? -ne 0 ]; then
die "change dir to $SRC_DIR failed."
fi
if [ "$version_mode" = "debug" -a "$separate_symbol" = "on" ]; then
echo "WARNING: do not separate symbol in debug mode!"
fi
binarylibs_path=${ROOT_DIR}/binarylibs
if [ "${binarylib_dir}"x != "None"x ]; then
binarylibs_path=${binarylib_dir}
fi
export BUILD_TUPLE=${PLATFORM_ARCH}
export THIRD_BIN_PATH="${binarylibs_path}"
export PREFIX_HOME="${BUILD_DIR}"
if [ "$version_mode"x == "release"x ]; then
CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_PRIVATEGAUSS=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_LITE_MODE=ON"
export DEBUG_TYPE=release
elif [ "$version_mode"x == "memcheck"x ]; then
CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_PRIVATEGAUSS=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_LITE_MODE=ON"
export DEBUG_TYPE=memcheck
else
CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_PRIVATEGAUSS=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_LITE_MODE=ON"
export DEBUG_TYPE=debug
fi
echo "Begin run cmake for gaussdb server" >> "$LOG_FILE" 2>&1
echo "CMake options: ${CMAKE_OPT}" >> "$LOG_FILE" 2>&1
echo "CMake release: ${DEBUG_TYPE}" >> "$LOG_FILE" 2>&1
export GAUSSHOME=${BUILD_DIR}
export LD_LIBRARY_PATH=${BUILD_DIR}/lib:${BUILD_DIR}/lib/postgresql:${LD_LIBRARY_PATH}
cd ${ROOT_DIR}
[ -d "${CMAKE_BUILD_DIR}" ] && rm -rf ${CMAKE_BUILD_DIR}
[ -d "${BUILD_DIR}" ] && rm -rf ${BUILD_DIR}
mkdir -p ${CMAKE_BUILD_DIR}
cd ${CMAKE_BUILD_DIR}
cmake .. ${CMAKE_OPT}
echo "Begin make and install gaussdb server" >> "$LOG_FILE" 2>&1
make VERBOSE=1 -sj ${cpus_num}
if [ $? -ne 0 ]; then
die "make failed."
fi
make install -sj ${cpus_num}
if [ $? -ne 0 ]; then
die "make install failed."
fi
## check build specification
spec="gaussdbkernel"
if ( cat $SCRIPT_DIR/gauss.spec | grep 'PRODUCT' | grep 'GaussDB Kernel' >/dev/null 2>&1 ); then
spec="gaussdbkernel"
elif ( cat $SCRIPT_DIR/gauss.spec | grep 'PRODUCT' | grep 'openGauss' >/dev/null 2>&1 ); then
spec="opengauss"
fi
chmod 444 ${BUILD_DIR}/bin/cluster_guc.conf
dos2unix ${BUILD_DIR}/bin/cluster_guc.conf > /dev/null 2>&1
#back to separate_debug_symbol.sh dir
cd $SCRIPT_DIR
if [ "$version_mode" = "release" -a "$separate_symbol" = "on" -a "$zip_package" = "on" ]; then
chmod +x ./separate_debug_information.sh
./separate_debug_information.sh
cd $SCRIPT_DIR
mv symbols.tar.gz $symbol_package_name
fi
#back to root dir
cd $ROOT_DIR
#insert the commitid to version.cfg as the upgrade app path specification
export PATH=${BUILD_DIR}:$PATH
export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH
commitid=$(LD_PRELOAD='' ${BUILD_DIR}/bin/gaussdb -V | cut -d ")" -f 1 | awk '{print $NF}')
echo "${commitid}" >>${SCRIPT_DIR}/version.cfg
echo "End insert commitid into version.cfg" >> "$LOG_FILE" 2>&1
cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/iperf/comm/bin/iperf3 ${BUILD_DIR}/bin
if [ $? -ne 0 ]; then
die "cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/iperf/comm/bin/iperf3 ${BUILD_DIR}/bin failed"
fi
cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/iperf/comm/lib/libiperf.so.0 ${BUILD_DIR}/lib
if [ $? -ne 0 ]; then
die "cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/iperf/comm/lib/libiperf.so.0 ${BUILD_DIR}/lib failed"
fi
cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/fio/comm/bin/fio ${BUILD_DIR}/bin
if [ $? -ne 0 ]; then
die "cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/fio/comm/bin/fio ${BUILD_DIR}/bin failed"
fi
}
#######################################################################
##select package type according to variable package_type
#######################################################################
function mpp_pkg_make()
{
case "$package_type" in
server)
echo "file list: $release_file_list"
make_package $release_file_list 'server'
make_package $release_file_list 'libpq'
;;
libpq)
make_package $release_file_list 'libpq'
;;
esac
}
declare package_command
#######################################################################
##select package command accroding to install_package_format
#######################################################################
function select_package_command()
{
case "$install_package_format" in
tar)
tar='tar'
option=' -zcvf'
package_command="$tar$option"
;;
rpm)
rpm='rpm'
option=' -i'
package_command="$rpm$option"
;;
esac
}
###############################################################
## client tools package
## Roach no
## sslcert no
## Data Studio no
## Database Manager no
## Migration Toolkit no
## Cluster Configuration Assistant (CCA) no
## CAT no
###############################################################
function target_file_copy_for_non_server()
{
for file in $(echo $1)
do
tar -cpf - $file | ( cd $2; tar -xpf - )
done
}
declare bin_name="${package_pre_name}.bin"
declare sha256_name=''
declare script_dir="${ROOT_DIR}/script"
#######################################################################
##copy target file into temporary directory temp
#######################################################################
function target_file_copy()
{
###################################################
# make bin package
###################################################
for file in $(echo $1)
do
tar -cpf - $file | ( cd $2; tar -xpf - )
done
cd $BUILD_DIR
if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then
# do nothing in current version
echo ""
else
sed -i '/^process_cpu_affinity|/d' $2/bin/cluster_guc.conf
fi
if [ "$(ls -A /lib64/libaio.so*)" != "" ]
then
cp /lib64/libaio.so* $2/lib/
elif [ "$(ls -A /lib/libaio.so*)" != "" ]
then
cp /lib/libaio.so* $2/lib/
fi
if [ "$(ls -A /lib64/libnuma.so*)" != "" ]
then
cp /lib64/libnuma.so* $2/lib/
elif [ "$(ls -A /lib/libnuma.so*)" != "" ]
then
cp /lib/libnuma.so* $2/lib/
fi
#generate bin file
echo "Begin generate ${bin_name} bin file..." >> "$LOG_FILE" 2>&1
${p7zpath}/7z a -t7z -sfx "${bin_name}" "$2/*" >> "$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
echo "Please check and makesure '7z' exist. "
die "generate ${bin_name} failed."
fi
echo "End generate ${bin_name} bin file" >> "$LOG_FILE" 2>&1
#generate sha256 file
sha256_name="${package_pre_name}.sha256"
echo "Begin generate ${sha256_name} sha256 file..." >> "$LOG_FILE" 2>&1
sha256sum "${bin_name}" | awk -F" " '{print $1}' > "$sha256_name"
if [ $? -ne 0 ]; then
die "generate sha256 file failed."
fi
echo "End generate ${sha256_name} sha256 file" >> "$LOG_FILE" 2>&1
cp $2/lib/libstdc++.so.6 ./
###################################################
# make server package
###################################################
if [ -d "${2}" ]; then
rm -rf ${2}
fi
mkdir -p ${2}
mkdir -p $2/dependency
cp libstdc++.so.6 $2/dependency
mv ${bin_name} ${sha256_name} $2
}
#######################################################################
##function make_package have three actions
##1.parse release_file_list variable represent file
##2.copy target file into a newly created temporary directory temp
##3.package all file in the temp directory and renome to destination package_path
#######################################################################
function make_package()
{
cd $SCRIPT_DIR
releasefile=$1
pkgname=$2
local head=$(cat $releasefile | grep "\[$pkgname\]" -n | awk -F: '{print $1}')
if [ ! -n "$head" ]; then
die "error: ono find $pkgname in the $releasefile file "
fi
local tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}')
if [ ! -n "$tail" ]; then
local all=$(cat $releasefile | wc -l)
let tail=$all+1-$head
fi
dest=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1")
if [ "$pkgname"x = "libpq"x -a \( "$version_mode" = "debug" -o "$version_mode" = "release" \) ]; then
# copy include file
head=$(cat $releasefile | grep "\[header\]" -n | awk -F: '{print $1}')
if [ ! -n "$head" ]; then
die "error: ono find header in the $releasefile file "
fi
tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}')
if [ ! -n "$tail" ]; then
all=$(cat $releasefile | wc -l)
let tail=$all+1-$head
fi
dest1=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1")
dest=$(echo "$dest";echo "$dest1")
fi
mkdir -p ${BUILD_DIR}
cd ${BUILD_DIR}
rm -rf temp
mkdir temp
case "$pkgname" in
server)
mkdir -p ${BUILD_DIR}/temp/etc
target_file_copy "$dest" ${BUILD_DIR}/temp
;;
*)
target_file_copy_for_non_server "$dest" ${BUILD_DIR}/temp $pkgname
;;
esac
cd ${BUILD_DIR}/temp
select_package_command
case "$pkgname" in
server)
echo "packaging server..."
cp ${SCRIPT_DIR}/version.cfg ${BUILD_DIR}/temp
if [ $? -ne 0 ]; then
die "copy ${SCRIPT_DIR}/version.cfg to ${BUILD_DIR}/temp failed"
fi
cp ${ROOT_DIR}/${open_gauss}/liteom/install.sh ./
if [ $? -ne 0 ]
then
die "copy ${ROOT_DIR}/${open_gauss}/liteom/install.sh to ${BUILD_DIR}/temp failed"
fi
cp ${ROOT_DIR}/${open_gauss}/liteom/uninstall.sh ./
if [ $? -ne 0 ]
then
die "copy ${ROOT_DIR}/${open_gauss}/liteom/uninstall.sh to ${BUILD_DIR}/temp failed"
fi
cp ${ROOT_DIR}/${open_gauss}/liteom/opengauss_lite.conf ./
if [ $? -ne 0 ]
then
die "copy ${ROOT_DIR}/${open_gauss}/liteom/opengauss_lite.conf to ${BUILD_DIR}/temp failed"
fi
# pkg upgrade scripts:upgrade_GAUSSV5.sh, upgrade_common.sh, upgrade_config.sh, upgrade_errorcode.sh
for filename in upgrade_GAUSSV5.sh upgrade_common.sh upgrade_config.sh upgrade_errorcode.sh
do
if ! cp ${ROOT_DIR}/${open_gauss}/liteom/${filename} ./ ; then
die "copy ${ROOT_DIR}/${open_gauss}/liteom/${filename} to ${BUILD_DIR}/temp failed"
fi
done
# install upgrade_sql.* files.
package_upgrade_sql
$package_command "${server_package_name}" ./* >>"$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "$package_command ${server_package_name} failed"
fi
mv ${server_package_name} ${package_path}
echo "install $pkgname tools is ${server_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1
echo "success!"
;;
libpq)
echo "packaging libpq..."
$package_command "${libpq_package_name}" ./* >>"$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "$package_command ${libpq_package_name} failed"
fi
mv ${libpq_package_name} ${package_path}
echo "install $pkgname tools is ${libpq_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1
echo "success!"
;;
esac
}
#############################################################
# show package for hotpatch sdv.
#############################################################
if [ "$show_package" = true ]; then
echo "package: "$server_package_name
echo "bin: "$bin_name
exit 0
fi
#############################################################
# main function
#############################################################
# 1. clean install path and log file
mpp_pkg_pre_check
# 2. chose action
mpp_pkg_bld
if [ "$zip_package" = "off" ]; then
echo "The option 'nopkg' is on, no package will be zipped."
exit 0
fi
# 3. make package
mpp_pkg_make
#clean mpp_install directory
echo "clean enviroment"
echo "[makemppdb] $(date +%y-%m-%d' '%T): remove ${BUILD_DIR}" >>"$LOG_FILE" 2>&1
mkdir ${ROOT_DIR}/output
mv ${ROOT_DIR}/build/script/*.tar.gz ${ROOT_DIR}/output/
echo "now, all packages has finished!"
exit 0

View File

@ -1,2 +0,0 @@
PRODUCT=GaussDB Kernel
VERSION=V500R002C00

View File

@ -1,2 +1,2 @@
PRODUCT=openGauss
VERSION=2.1.0
VERSION=2.1.0

View File

@ -1,80 +0,0 @@
#!/bin/bash
#-----------------------------------------------------
#Copyright (c): 2020, Huawei Tech. Co., Ltd.
#FileName : gsql_env.sh
#Version : V500R001C10
#Date : 2020-08-06
#Description : This file is to configure environment variables of gsql
#-----------------------------------------------------
#find the absolute path of this script
LOCAL_PATH=${0}
if [ x${LOCAL_PATH:0:1} = "x-" ] || [ "x${LOCAL_PATH}" = "x/bin/bash" ] || [ "x${LOCAL_PATH}" = "x/bin/sh" ]; then
LOCAL_PATH="$(pwd)"
elif [ x${LOCAL_PATH:0:1} != "x/" ]; then
LOCAL_PATH="$(pwd)/$(dirname ${LOCAL_PATH})";
fi
function logerr()
{
printf "ERROR: $* \n" >&2
}
function loghint()
{
printf "HINT: $* \n" >&2
}
function logwarning()
{
printf "WARNING: $* \n" >&2
}
function doing()
{
length_of_line=60
printf "$1 ";
for ((i=${#1};i<$length_of_line;i++)); do
printf '.';
done;
printf " "
}
#------------------------------
# gsql things
#------------------------------
function cofig_gsql_and_gs_ktool()
{
doing 'Configuring LD_LIBRARY_PATH, PATH and GS_KTOOL_FILE_PATH for gsql and gs_ktool...'
LIB_PATH="${LOCAL_PATH}/lib"
BIN_PATH="${LOCAL_PATH}/bin"
GS_KT_FILE_PATH="${LOCAL_PATH}/gs_ktool_file"
if [ ! -f "${LOCAL_PATH}/bin/gsql" ]; then
logerr "failed to locate ./bin/gsql, please source this file at the path where it is. "
return 1;
fi;
if [ ! -f "${LOCAL_PATH}/bin/gs_ktool" ]; then
logerr "failed to locate ./bin/gs_ktool, please source this file at the path where it is. "
return 1;
fi;
if [ ! -f "${LOCAL_PATH}/gs_ktool_file/gs_ktool_conf.ini" ]; then
logerr "failed to locate ./gs_ktool_file/gs_ktool_con.ini, please source this file at the path where it is. "
return 1;
fi;
export LD_LIBRARY_PATH=${LIB_PATH}:${LD_LIBRARY_PATH}
export PATH=${BIN_PATH}:${PATH}
export GS_KTOOL_FILE_PATH=${GS_KT_FILE_PATH}
echo 'done'
return 0
}
if [ ! -z "$1" ]; then
echo "Usage:"
echo " source $0"
else
cofig_gsql_and_gs_ktool
if [ 0 -eq $? ]; then
echo 'All things done.'
fi
fi

View File

@ -1,5 +0,0 @@
V=100
R=003
C=00
OfficialVersion=
InternalVersion=B100

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
#!/bin/bash
#############################################################################
#######################################################################
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
@ -14,65 +14,47 @@
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
# Description : gs_backup is a utility to back up or restore binary files and parameter files.
#############################################################################
# descript: Compile and pack GaussDB
# Return 0 means OK.
# Return 1 means failed.
# version: 2.0
# date: 2021-02-28
#######################################################################
declare SCRIPT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd)
declare ROOT_DIR=$(dirname "${SCRIPT_DIR}")
declare ROOT_DIR=$(dirname "${ROOT_DIR}")
declare package_type='server'
declare product_mode='opengauss'
declare version_mode='release'
declare binarylib_dir='None'
declare config_file=''
declare om_dir='None'
declare cm_dir='None'
declare show_package='false'
declare install_package_format='tar'
#detect platform information.
PLATFORM=32
bit=$(getconf LONG_BIT)
if [ "$bit" -eq 64 ]; then
PLATFORM=64
fi
#get OS distributed version.
kernel=""
version=""
if [ -f "/etc/openEuler-release" ]
then
kernel=$(cat /etc/openEuler-release | awk -F ' ' '{print $1}' | tr A-Z a-z)
version=$(cat /etc/openEuler-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z)
elif [ -f "/etc/centos-release" ]
then
kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr A-Z a-z)
version=$(cat /etc/centos-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z)
elif [ -f "/etc/euleros-release" ]
then
kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr A-Z a-z)
version=$(cat /etc/centos-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z)
else
kernel=$(lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z)
version=$(lsb_release -r | awk -F ' ' '{print $2}')
fi
## to solve kernel="name=openeuler"
if echo $kernel | grep -q 'openeuler'
then
kernel="openeuler"
fi
function print_help()
{
echo "Usage: $0 [OPTION]
-h|--help show help information.
-3rd|--binarylib_dir the directory of third party binarylibs.
-pkg|--package provode type of installation packages, values parameter is server.
-m|--version_mode this values of paramenter is debug, release, memcheck, the default value is release.
-pm|--product_mode this values of paramenter is opengauss or lite, the default value is opengauss.
"
}
if [ X"$kernel" == X"centos" ]; then
dist_version="CentOS"
elif [ X"$kernel" == X"openeuler" ]; then
dist_version="openEuler"
elif [ X"$kernel" == X"euleros" ]; then
dist_version="EulerOS"
elif [ X"$kernel" == X"kylin" ]; then
dist_version="Kylin"
elif [ X"$kernel" = X"ubuntu" ]; then
dist_version="Ubuntu"
else
echo "We only support openEuler(aarch64), EulerOS(aarch64), CentOS, Kylin(aarch64) and Ubuntu(x86) platform."
echo "Kernel is $kernel"
if [ $# = 0 ] ; then
echo "missing option"
print_help
exit 1
fi
declare release_file_list="opengauss_release_list_${kernel}_single"
declare dest_list=""
#########################################################################
##read command line paramenters
#######################################################################
@ -82,9 +64,29 @@ while [ $# -gt 0 ]; do
print_help
exit 1
;;
-v|--version)
print_version
exit 1
-3rd|--binarylib_dir)
if [ "$2"X = X ]; then
echo "no given binarylib directory values"
exit 1
fi
binarylib_dir=$2
shift 2
;;
-pkg)
if [ "$2"X = X ]; then
echo "no given package type name"
exit 1
fi
package_type=$2
shift 2
;;
-pm)
if [ "$2"X = X ]; then
echo "no given product mode"
exit 1
fi
product_mode=$2
shift 2
;;
-m|--version_mode)
if [ "$2"X = X ]; then
@ -94,22 +96,9 @@ while [ $# -gt 0 ]; do
version_mode=$2
shift 2
;;
-3rd|--binarylibs_dir)
if [ "$2"X = X ]; then
echo "no given binarylib directory values"
exit 1
fi
binarylib_dir=$2
shift 2
;;
-f|--config_file)
if [ "$2"X = X ]; then
echo "no given config file"
shift 1
else
config_file=$2
shift 2
fi
-S|--show_pkg)
show_package=true
shift
;;
*)
echo "Internal Error: option processing error: $1" 1>&2
@ -119,436 +108,37 @@ while [ $# -gt 0 ]; do
esac
done
##add platform architecture information
PLATFORM_ARCH=$(uname -p)
if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then
if [ "$dist_version" != "openEuler" ] && [ "$dist_version" != "EulerOS" ] && [ "$dist_version" != "Kylin" ] ; then
echo "We only support NUMA on openEuler(aarch64), EulerOS(aarch64), Kylin(aarch64) platform."
exit 1
fi
release_file_list="opengauss_release_list_${kernel}_aarch64_single"
fi
if [ "$version_mode" = "mini" ]; then
release_file_list="opengauss_release_list_mini"
fi
##default install version storage path
declare server_version='openGauss'
declare server_name_for_package="$(echo ${server_version} | sed 's/ /-/g')" # replace blank with '-' for package name.
declare version_number=''
#######################################################################
##putout the version of server
#######################################################################
function print_version()
{
echo "$version_number"
}
#######################################################################
## print help information
#######################################################################
function print_help()
{
echo "Usage: $0 [OPTION]
-h|--help show help information
-V|--version show version information
-m|--version_mode this values of paramenter is debug, release or memcheck, the default value is release
-3rd|--binarylibs_dir the parent directory of binarylibs
"
}
#######################################################################
##version 2.0.0
#######################################################################
function read_srv_version()
{
cd $SCRIPT_DIR
version_number=$(grep 'VERSION' opengauss.spec | awk -F "=" '{print $2}')
echo "${server_name_for_package}-${version_number}">version.cfg
}
###################################
# get version number from globals.cpp
##################################
function read_srv_number()
{
global_kernal="${ROOT_DIR}/src/common/backend/utils/init/globals.cpp"
version_name="GRAND_VERSION_NUM"
version_num=""
line=$(cat $global_kernal | grep ^const* | grep $version_name)
version_num1=${line#*=}
#remove the symbol;
version_num=$(echo $version_num1 | tr -d ";")
#remove the blank
version_num=$(echo $version_num)
if echo $version_num | grep -qE '^92[0-9]+$'
then
# get the last three number
latter=${version_num:2}
echo "92.${latter}" >>${SCRIPT_DIR}/version.cfg
else
echo "Cannot get the version number from globals.cpp."
exit 1
fi
}
SCRIPT_DIR=$(cd $(dirname $0) && pwd)
test -d ${SCRIPT_DIR}/../../output || mkdir -p ${SCRIPT_DIR}/../../output && rm -fr ${SCRIPT_DIR}/../../output/*
output_path=$(cd ${SCRIPT_DIR}/../../output && pwd)
read_srv_version
#######################################################################
## declare all package name
#######################################################################
declare version_string="${server_name_for_package}-${version_number}"
declare package_pre_name="${version_string}-${dist_version}-${PLATFORM}bit"
declare libpq_package_name="${package_pre_name}-Libpq.tar.gz"
declare tools_package_name="${package_pre_name}-tools.tar.gz"
declare kernel_package_name="${package_pre_name}.tar.bz2"
declare kernel_symbol_package_name="${package_pre_name}-symbol.tar.gz"
declare sha256_name="${package_pre_name}.sha256"
echo "[make single db] $(date +%y-%m-%d' '%T): script dir : ${SCRIPT_DIR}"
ROOT_DIR=$(dirname "$SCRIPT_DIR")
ROOT_DIR=$(dirname "$ROOT_DIR")
PLAT_FORM_STR=$(sh "${ROOT_DIR}/src/get_PlatForm_str.sh")
if [ "${PLAT_FORM_STR}"x == "Failed"x ]
then
echo "We only support openEuler(aarch64), EulerOS(aarch64), CentOS, Kylin(aarch64) platform."
exit 1;
fi
PG_REG_TEST_ROOT="${ROOT_DIR}/"
PMK_SCHEMA="${ROOT_DIR}/script/pmk_schema.sql"
declare LOG_FILE="${SCRIPT_DIR}/make_package.log"
declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install"
BUILD_TOOLS_PATH="${ROOT_DIR}/binarylibs/buildtools/${PLAT_FORM_STR}"
BINARYLIBS_PATH="${ROOT_DIR}/binarylibs/dependency/${PLAT_FORM_STR}"
declare UPGRADE_SQL_DIR="${ROOT_DIR}/src/include/catalog/upgrade_sql"
if [ "${binarylib_dir}"x != "None"x ]
then
echo "binarylib dir : ${binarylib_dir}"
BUILD_TOOLS_PATH="${binarylib_dir}/buildtools/${PLAT_FORM_STR}"
BINARYLIBS_PATH="${binarylib_dir}/dependency/${PLAT_FORM_STR}"
fi
gcc_version="7.3"
export CC=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/gcc
export CXX=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/g++
export LD_LIBRARY_PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/lib64:$BUILD_TOOLS_PATH/gcc$gcc_version/isl/lib:$BUILD_TOOLS_PATH/gcc$gcc_version/mpc/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/mpfr/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/gmp/lib/:$LD_LIBRARY_PATH
export PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin:$PATH
read_srv_number
#######################################################################
# move pkgs to output directory
#######################################################################
function deploy_pkgs()
{
for pkg in $@; do
if [ -f $pkg ]; then
mv $pkg $output_path/
fi
done
}
#######################################################################
# Print log.
#######################################################################
log()
{
echo "[make single db] $(date +%y-%m-%d' '%T): $@"
echo "[make single db] $(date +%y-%m-%d' '%T): $@" >> "$LOG_FILE" 2>&1
}
#######################################################################
# print log and exit.
#######################################################################
die()
{
log "$@"
echo "$@"
if [ -e "$SCRIPT_DIR/utils/common.sh" ];then
source $SCRIPT_DIR/utils/common.sh
else
exit 1
}
#######################################################################
##install gaussdb database contained server
#######################################################################
function install_gaussdb()
{
cd $SCRIPT_DIR
if [ "$version_mode" = "release" ] || [ "$version_mode" = "mini" ]; then
chmod +x ./separate_debug_information.sh
./separate_debug_information.sh
cd $SCRIPT_DIR
mv symbols.tar.gz $kernel_symbol_package_name
deploy_pkgs $kernel_symbol_package_name
fi
#insert the commitid to version.cfg as the upgrade app path specification
export PATH=${BUILD_DIR}:$PATH
export LD_LIBRARY_PATH=${BUILD_DIR}/lib:$LD_LIBRARY_PATH
commitid=$(LD_PRELOAD='' ${BUILD_DIR}/bin/gaussdb -V | awk '{print $5}' | cut -d ")" -f 1)
if [ -z "$commitid" ]
then
commitid=$(date "+%Y%m%d%H%M%S")
commitid=${commitid:4:8}
fi
echo "${commitid}" >>${SCRIPT_DIR}/version.cfg
echo "End insert commitid into version.cfg" >> "$LOG_FILE" 2>&1
}
#######################################################################
# copy directory's files list to $2
#######################################################################
function copy_files_list()
{
for file in $(echo $1)
do
test -e $file && tar -cpf - $file | ( cd $2; tar -xpf - )
done
}
#######################################################################
# set postgresql.conf.sample from config_file when packing
#######################################################################
function set_config_sample()
{
if [[ -f $config_file ]]
then
config_sample_file=${BUILD_DIR}/share/postgresql/postgresql.conf.sample
if [[ ! -f "$config_sample_file" ]]
then
echo "postgresql.conf.sample does not exist"
exit 1
else
echo "#------------------------------------------------------------------------------" >> $config_sample_file
echo "# USER SET CONFIG ON COMPILING TIME" >> $config_sample_file
echo "#------------------------------------------------------------------------------" >> $config_sample_file
while IFS= read -r line; do
SUBSTRING=$(echo $line | cut -d'=' -f 1)"= "
if grep -q "$SUBSTRING" $config_sample_file ; then
sed -i "/$SUBSTRING/c$line" $config_sample_file
else
echo $line >> $config_sample_file
fi
done < $config_file
fi
fi
}
#######################################################################
##copy target file into temporary directory temp
#######################################################################
function target_file_copy()
{
cd ${BUILD_DIR}
set_config_sample
copy_files_list "$1" $2
cp ${SCRIPT_DIR}/version.cfg ${BUILD_DIR}/temp
cp -rf ${SCRIPT_DIR}/../../simpleInstall ${BUILD_DIR}/temp
if [ $? -ne 0 ]; then
die "copy ${SCRIPT_DIR}/version.cfg to ${BUILD_DIR}/temp failed"
fi
sed -i '/^process_cpu_affinity|/d' $2/bin/cluster_guc.conf
#generate tar file
echo "Begin generate ${kernel_package_name} tar file..." >> "$LOG_FILE" 2>&1
cd $2
tar -jcvpf "${kernel_package_name}" ./* >> "$LOG_FILE" 2>&1
cd '-'
mv $2/"${kernel_package_name}" ./
if [ $? -ne 0 ]; then
die "generate ${kernel_package_name} failed."
fi
echo "End generate ${kernel_package_name} tar file" >> "$LOG_FILE" 2>&1
#generate sha256 file
sha256_name="${package_pre_name}.sha256"
echo "Begin generate ${sha256_name} sha256 file..." >> "$LOG_FILE" 2>&1
sha256sum "${kernel_package_name}" | awk -F" " '{print $1}' > "$sha256_name"
if [ $? -ne 0 ]; then
die "generate sha256 file failed."
fi
echo "End generate ${sha256_name} sha256 file" >> "$LOG_FILE" 2>&1
###################################################
# make server package
###################################################
if [ -d "${2}" ]; then
rm -rf ${2}
fi
}
function target_file_copy_for_non_server()
{
cd ${BUILD_DIR}
copy_files_list "$1" $2
}
#######################################################################
##function make_package_prep have two actions
##1.parse release_file_list variable represent file
##2.copy target file into a newly created temporary directory temp
#######################################################################
function prep_dest_list()
{
cd $SCRIPT_DIR
releasefile=$1
pkgname=$2
local head=$(cat $releasefile | grep "\[$pkgname\]" -n | awk -F: '{print $1}')
if [ ! -n "$head" ]; then
die "error: ono find $pkgname in the $releasefile file "
fi
local tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}')
if [ ! -n "$tail" ]; then
local all=$(cat $releasefile | wc -l)
let tail=$all+1-$head
fi
dest_list=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1")
}
function make_package_srv()
{
echo "Begin package server"
cd $SCRIPT_DIR
prep_dest_list $release_file_list 'server'
rm -rf ${BUILD_DIR}/temp
mkdir -p ${BUILD_DIR}/temp/etc
target_file_copy "$dest_list" ${BUILD_DIR}/temp
deploy_pkgs ${sha256_name} ${kernel_package_name}
echo "make server(all) package success!"
}
#######################################################################
# Install all SQL files from src/distribute/include/catalog/upgrade_sql
# to INSTALL_DIR/bin/script/upgrade_sql.
# Package all SQL files and then verify them with SHA256.
#######################################################################
function make_package_upgrade_sql()
{
echo "Begin to install upgrade_sql files..."
UPGRADE_SQL_TAR="upgrade_sql.tar.gz"
UPGRADE_SQL_SHA256="upgrade_sql.sha256"
cd $SCRIPT_DIR
mkdir -p ${BUILD_DIR}
cd ${BUILD_DIR}
rm -rf temp
mkdir temp
cd ${BUILD_DIR}/temp
cp -r "${UPGRADE_SQL_DIR}" ./upgrade_sql
[ $? -ne 0 ] && die "Failed to cp upgrade_sql files"
tar -czf ${UPGRADE_SQL_TAR} upgrade_sql
[ $? -ne 0 ] && die "Failed to package ${UPGRADE_SQL_TAR}"
rm -rf ./upgrade_sql > /dev/null 2>&1
sha256sum ${UPGRADE_SQL_TAR} | awk -F" " '{print $1}' > "${UPGRADE_SQL_SHA256}"
[ $? -ne 0 ] && die "Failed to generate sha256 sum file for ${UPGRADE_SQL_TAR}"
chmod 600 ${UPGRADE_SQL_TAR}
chmod 600 ${UPGRADE_SQL_SHA256}
deploy_pkgs ${UPGRADE_SQL_TAR} ${UPGRADE_SQL_SHA256}
echo "Successfully packaged upgrade_sql files."
}
function make_package_libpq()
{
cd $SCRIPT_DIR
prep_dest_list $release_file_list 'libpq'
rm -rf ${BUILD_DIR}/temp
mkdir -p ${BUILD_DIR}/temp
target_file_copy_for_non_server "$dest_list" ${BUILD_DIR}/temp
cd ${BUILD_DIR}/temp
echo "packaging libpq..."
tar -zvcf "${libpq_package_name}" ./* >>"$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "$package_command ${libpq_package_name} failed"
fi
deploy_pkgs ${libpq_package_name}
echo "install $pkgname tools is ${libpq_package_name} of ${output_path} directory " >> "$LOG_FILE" 2>&1
echo "success!"
}
function make_package_tools()
{
cd $SCRIPT_DIR
prep_dest_list $release_file_list 'client'
rm -rf ${BUILD_DIR}/temp
mkdir -p ${BUILD_DIR}/temp
cd ${BUILD_DIR}/
target_file_copy_for_non_server "$dest_list" ${BUILD_DIR}/temp
cd ${BUILD_DIR}/temp
echo "packaging tools..."
tar -zvcf "${tools_package_name}" ./* >>"$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "$package_command ${tools_package_name} failed"
fi
deploy_pkgs ${tools_package_name}
echo "install $pkgname tools is ${tools_package_name} of ${output_path} directory " >> "$LOG_FILE" 2>&1
echo "success!"
}
function spec_prep()
{
cp opengauss.spec gauss.spec
}
#######################################################################
## Check the installation package production environment
#######################################################################
function srv_pkg_bld()
{
install_gaussdb
}
function srv_pkg_make()
{
echo "Start package opengauss."
make_package_srv
make_package_libpq
make_package_tools
make_package_upgrade_sql
echo "End package opengauss."
}
fi
#############################################################
# main function
# show package for hotpatch sdv.
#############################################################
# 0. prepare spec file
spec_prep
if [ "$show_package" = true ]; then
echo "package: "$server_package_name
echo "bin: "$bin_name
exit 0
fi
# 1. build server
srv_pkg_bld
declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install"
declare PKG_TMP_DIR="${BUILD_DIR}/temp"
# 2. make package
srv_pkg_make
if [ -e "$SCRIPT_DIR/utils/internal_packages.sh" ];then
source $SCRIPT_DIR/utils/internal_packages.sh
else
exit 1
fi
function main()
{
echo "[makegaussdb] $(date +%y-%m-%d' '%T): script dir : ${SCRIPT_DIR}"
echo "[makegaussdb] $(date +%y-%m-%d' '%T): Work root dir : ${ROOT_DIR}"
gaussdb_pkg
}
main
echo "now, all packages has finished!"
exit 0

View File

@ -1,265 +0,0 @@
#!/bin/bash
#######################################################################
# Copyright (c): 2020-2025, Huawei Tech. Co., Ltd.
# descript: recompress package
# version: 2.0
# date: 2021-05-19
#######################################################################
declare server_package_path=""
declare agent_package_path=""
declare product_mode="multiple"
declare unpack_server="unpack_server"
declare unpack_agent="unpack_agent"
declare unpack_psycopg2="unpack_psycopg2"
declare compress_command="tar -zcf"
declare decompress_command="tar -zxf"
function print_help()
{
echo "Usage: $0 [OPTION]
-h|--help show help information.
-pm product mode, values parameter is single, multiple or opengauss, default value is multiple.
--server-pacakge the server pacakge path.
--agent-package the agent package path, only -pm is single or multiple need.
"
}
function log() {
echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@"
}
function error() {
echo -e "\033[31m[makegaussdb] $(date +%y-%m-%d' '%T) Error: $@\033[0m"
}
while [ $# -gt 0 ]; do
case "$1" in
-h|--help)
print_help
exit 1
;;
-pm)
if [ X$2 == X"" ]; then
error "no given pm product mode."
exit 1
fi
product_mode=$2
shift 2
;;
--server-package)
if [ X$2 == X"" ]; then
error "no given server compress path"
exit 1
fi
server_package_path=$2
shift 2
;;
--agent-package)
if [ X$2 == X"" ]; then
error "no given agent compress path"
exit 1
fi
agent_package_path=$2
shift 2
;;
*)
echo "Internal Error: option processing error: $1" 1>&2
echo "please input right paramtenter, the following command may help you"
echo "sh reconstruct.sh --help or sh reconstruct.sh -h"
exit 1
esac
done
function standard_path() {
local package_path=$1
local first_char=$(expr substr "${package_path}" 1 1)
if [ "${first_char}" != "/" ]; then
package_path="$(pwd)/${package_path}"
fi
echo "${package_path}"
}
function check_path() {
local package_type=$1
local package_path=$2
if [ X${package_path} = X ]; then
error "the paramtenter --${package_type} can not be empty."
exit 1
fi
if [ ! -f "${package_path}" ]; then
error "the file ${package_path} not exist, please check."
exit 1
fi
}
function check_parameter() {
check_path "server-package" ${server_package_path}
server_package_path=$(standard_path ${server_package_path})
if [ X${product_mode} != X"opengauss" ]; then
check_path "agent-pacakge" ${agent_package_path}
agent_package_path=$(standard_path ${agent_package_path})
fi
}
function backup_compress() {
local compress_name=$1
local bak_package_name="${compress_name%%.*}_old.${compress_name#*.}"
if [ -d "${bak_package_name}" ]; then
rm -rf ${bak_package_name}
fi
cp ${compress_name} ${bak_package_name}
}
function delete_backup_package() {
local compress_name=$1
local bak_package_name="${compress_name%%.*}_old.${compress_name#*.}"
if [ -d "${bak_package_name}" ]; then
rm -rf ${bak_package_name}
fi
}
function final_compress() {
local compress_file=$1
if [ X"${compress_file##*.}" == X"zip" ]; then
zip -q -r ${compress_file} ./*
else
${compress_command} ${compress_file} ./*
fi
}
function begin_decompress() {
local decompress_file=$1
local decompress_dir=$2
if [ X"${decompress_file##*.}" == X"zip" ]; then
unzip -q ${decompress_file} -d ${decompress_dir}
else
${decompress_command} ${decompress_file} -C ${decompress_dir}
fi
}
function distribute_compress() {
server_dir=$(dirname "${server_package_path}")
server_name=$(basename "${server_package_path}")
agent_dir=$(dirname "${agent_package_path}")
agent_name=$(basename "${agent_package_path}")
log "server_name: ${server_name}, agent_name: ${agent_name}"
# decompress server package and copy psycopg2 to lib
cd ${server_dir}
backup_compress ${server_name}
if [ -e "${unpack_server}" ]; then
rm -rf ${unpack_server}
fi
mkdir ${unpack_server}
begin_decompress ${server_name} ${unpack_server}
cd ${unpack_server} && mkdir ${unpack_server} ${unpack_psycopg2}
euler_name=$(basename "$(find . -name "GaussDB-Kernel-V500R00*-64bit.tar.gz")")
psycopg2_name=$(basename "$(find . -name "GaussDB-Kernel-V500R00*-64bit-Python.tar.gz")")
log "euler_name: ${euler_name}, psycopg2_name: ${psycopg2_name}"
${decompress_command} ${euler_name} -C ${unpack_server}
${decompress_command} ${psycopg2_name} -C ${unpack_psycopg2}
chmod -R 700 ${unpack_psycopg2}/psycopg2
cp -r ${unpack_psycopg2}/psycopg2 ${unpack_server}/lib
cp -r ${unpack_psycopg2}/psycopg2 ${unpack_server}/script/gspylib/inspection/lib
log "complete copy psycopg2 to server package."
# decompress agent package and copy psycopg2 to lib, then compress
cd ${agent_dir}
backup_compress ${agent_name}
if [ -e "${unpack_agent}" ]; then
rm -rf ${unpack_agent}
fi
mkdir ${unpack_agent}
begin_decompress ${agent_name} ${unpack_agent}
cd ${unpack_agent} && mkdir ${unpack_agent}
agent_tar_name=$(basename "$(find . -name "GaussDB-Kernel-V500R00*-64bit-AGENT.tar.gz")")
${decompress_command} ${agent_tar_name} -C ${unpack_agent}
cd ${unpack_agent}
cp -r ${server_dir}/${unpack_server}/${unpack_psycopg2}/psycopg2 lib/
${compress_command} ${agent_tar_name} ./*
rm -rf ../${agent_tar_name} && mv ${agent_tar_name} ../ && cd ../ && rm -rf ${unpack_agent}
final_compress ${agent_name}
rm -rf ../${agent_name} && mv ${agent_name} ../ && cd ../ && rm -rf ${unpack_agent}
cd ${agent_dir}
delete_backup_package ${agent_name}
log "complete copy psycopg2 to agent package and compress agent package."
# compress server package
log "begin to compress server package ......"
cd ${server_dir}/${unpack_server}/${unpack_server}
${compress_command} ${euler_name} ./*
rm -rf ../${euler_name} && mv ${euler_name} ../ && cd ../ && rm -rf ${unpack_server}
if [ -d "${unpack_psycopg2}" ]; then
rm -rf ${unpack_psycopg2}
fi
final_compress ${server_name}
rm -rf ../${server_name} && mv ${server_name} ../ && cd ../ && rm -rf ${unpack_server}
cd ${server_dir}
delete_backup_package ${server_name}
log "complete compress server package."
}
function opengauss_compress() {
server_dir=$(dirname "${server_package_path}")
server_name=$(basename "${server_package_path}")
cd ${server_dir}
backup_compress ${server_name}
if [ -e "${unpack_server}" ]; then
rm -rf ${unpack_server}
fi
mkdir ${unpack_server}
${decompress_command} ${server_name} -C ${unpack_server}
cd ${unpack_server} && mkdir ${unpack_agent} ${unpack_psycopg2}
psycopg2_name=$(basename "$(find . -name "openGauss-*-Python.tar.gz")")
agent_name=$(basename "$(find . -name "openGauss-*-om.tar.gz")")
log "agent_name: ${agent_name}, psycopg2_name: ${psycopg2_name}"
${decompress_command} ${agent_name} -C ${unpack_agent}
${decompress_command} ${psycopg2_name} -C ${unpack_psycopg2}
chmod -R 700 ${unpack_psycopg2}/psycopg2
cp -r ${unpack_psycopg2}/psycopg2 ${unpack_agent}/lib
cp -r ${unpack_psycopg2}/psycopg2 ${unpack_agent}/script/gspylib/inspection/lib
log "complete copy psycopg2 to agent package."
# compress agent package
cd ${unpack_agent}
${compress_command} ${agent_name} ./*
rm -rf ../${agent_name} && mv ${agent_name} ../ && cd ../ && rm -rf ${unpack_agent}
log "complete compress agent package."
# recover om sha256
sha256_name="$(echo ${agent_name} | sed 's/\.tar\.gz//').sha256"
if [ -d "${sha256_name}" ]; then
rm -rf ${sha256_name}
fi
sha256sum "${agent_name}" | awk -F" " '{print $1}' > "${sha256_name}"
if [ $? -ne 0 ]; then
die "generate sha256 file failed."
fi
if [ -d "${unpack_psycopg2}" ]; then
rm -rf ${unpack_psycopg2}
fi
${compress_command} ${server_name} ./*
rm -rf ../${server_name} && mv ${server_name} ../ && cd ../ && rm -rf ${unpack_server}
delete_backup_package ${server_name}
log "complete compress server package."
}
check_parameter
if [ X${product_mode} == X"opengauss" ]; then
opengauss_compress
else
distribute_compress
fi

View File

@ -76,11 +76,14 @@ separate_symbol()
echo "$x is a script, do not separate symbol"
elif [[ "$x" = *".dat" ]];then
echo "$x is a license file, do not separate symbol"
elif [[ "$x" = *".sh" ]];then
# The following second condition judges whether the file is a shell script without a suffix name.
# Usually, executable shell script has a header comment that indicates which interpreter to use,
# e.g., "#!/usr/bin/env bash".
elif [[ "$x" = *".sh" ]] || [[ -f "$x" && -x "$x" && "$(head -c2 $x)" == '#!' ]]; then
echo "$x is a shell file, do not separate symbol"
elif [[ "$x" = *".la" ]];then
echo "$x is a la file, do not separate symbol"
elif [[ "$x" = *".crt" ]];then
elif [[ "$x" = *".crt" ]];then
echo "$x is a crt file, do not separate symbol"
elif [[ "$x" = *".ini" ]];then
echo "$x is a ini file, do not separate symbol"

View File

@ -0,0 +1,187 @@
#!/bin/bash
#######################################################################
# Copyright (c): 2020-2021, Huawei Tech. Co., Ltd.
# descript: Compile and pack openGauss
# Return 0 means OK.
# Return 1 means failed.
# version: 2.0
# date: 2020-08-08
#######################################################################
#######################################################################
## Check the installation package production environment
#######################################################################
function gaussdb_pkg_pre_clean()
{
if [ -d "$BUILD_DIR" ]; then
rm -rf $BUILD_DIR
fi
if [ -d "$LOG_FILE" ]; then
rm -rf $LOG_FILE
fi
}
###################################
#######################################################################
##read version from gaussdb.ver
#######################################################################
function read_gaussdb_version()
{
cd ${SCRIPT_DIR}
echo "${gaussdb_name_for_package}-${version_number}" > version.cfg
#auto read the number from kernal globals.cpp, no need to change it here
}
###################################
# get version number from globals.cpp
##################################
function read_gaussdb_number()
{
global_kernal="${ROOT_DIR}/src/common/backend/utils/init/globals.cpp"
version_name="GRAND_VERSION_NUM"
version_num=""
line=$(cat $global_kernal | grep ^const* | grep $version_name)
version_num1=${line#*=}
#remove the symbol;
version_num=$(echo $version_num1 | tr -d ";")
#remove the blank
version_num=$(echo $version_num)
if echo $version_num | grep -qE '^92[0-9]+$'
then
# get the last three number
latter=${version_num:2}
echo "92.${latter}" >>${SCRIPT_DIR}/version.cfg
else
echo "Cannot get the version number from globals.cpp."
exit 1
fi
}
#######################################################################
##insert the commitid to version.cfg as the upgrade app path specification
#######################################################################
function get_kernel_commitid()
{
export PATH=${BUILD_DIR}:$PATH
export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH
commitid=$(LD_PRELOAD='' ${BUILD_DIR}/bin/gaussdb -V | awk '{print $5}' | cut -d ")" -f 1)
echo "${commitid}" >>${SCRIPT_DIR}/version.cfg
echo "End insert commitid into version.cfg" >> "$LOG_FILE" 2>&1
}
#######################################################################
## generate the version file.
#######################################################################
function make_license_control()
{
python_exec=$(which python 2>/dev/null)
if [ -x "$python_exec" ]; then
$python_exec ${binarylib_dir}/buildtools/license_control/encrypted_version_file.py >> "$LOG_FILE" 2>&1
fi
if [ $? -ne 0 ]; then
die "create ${binarylib_dir}/buildtools/license_control license file failed."
fi
if [ -f "$gaussdb_200_file" ] && [ -f "$gaussdb_300_file" ]; then
# Get the md5sum.
gaussdb_200_sha256sum=$(sha256sum $gaussdb_200_file | awk '{print $1}')
gaussdb_300_sha256sum=$(sha256sum $gaussdb_300_file | awk '{print $1}')
# Modify the source code.
sed -i "s/^[ \t]*const[ \t]\+char[ \t]*\*[ \t]*sha256_digests[ \t]*\[[ \t]*SHA256_DIGESTS_COUNT[ \t]*\][ \t]*=[ \t]*{[ \t]*NULL[ \t]*,[ \t]*NULL[ \t]*}[ \t]*;[ \t]*$/const char \*sha256_digests\[SHA256_DIGESTS_COUNT\] = {\"$gaussdb_200_sha256sum\", \"$gaussdb_300_sha256sum\"};/g" $gaussdb_version_file
fi
if [ $? -ne 0 ]; then
die "modify '$gaussdb_version_file' failed."
fi
}
function make_gaussdb_kernel()
{
export BUILD_TUPLE=${PLATFORM_ARCH}
export THIRD_BIN_PATH="${binarylib_dir}"
export PREFIX_HOME="${BUILD_DIR}"
export DEBUG_TYPE=${version_mode}
echo "Begin make install gaussdb server" >> "$LOG_FILE" 2>&1
export GAUSSHOME=${BUILD_DIR}
export LD_LIBRARY_PATH=${BUILD_DIR}/lib:${BUILD_DIR}/lib/postgresql:${LD_LIBRARY_PATH}
[ -d "${CMAKE_BUILD_DIR}" ] && rm -rf ${CMAKE_BUILD_DIR}
[ -d "${BUILD_DIR}" ] && rm -rf ${BUILD_DIR}
mkdir -p ${CMAKE_BUILD_DIR}
cd ${CMAKE_BUILD_DIR}
cmake .. ${CMAKE_OPT}
if [ $? -ne 0 ]; then
die "cmake failed."
fi
cpus_num=$(grep -w processor /proc/cpuinfo|wc -l)
make -sj ${cpus_num}
if [ $? -ne 0 ]; then
die "make failed."
fi
make install -sj ${cpus_num}
if [ $? -ne 0 ]; then
die "make install failed."
fi
echo "End make install gaussdb server" >> "$LOG_FILE" 2>&1
}
#######################################################################
##install gaussdb database contained server,client and libpq
#######################################################################
function install_gaussdb()
{
# Generate the license control file, and set md5sum string to the code.
echo "Modify gaussdb_version.cpp file." >> "$LOG_FILE" 2>&1
make_license_control
echo "Modify gaussdb_version.cpp file success." >> "$LOG_FILE" 2>&1
cd "$ROOT_DIR/"
if [ $? -ne 0 ]; then
die "change dir to $ROOT_DIR failed."
fi
if [ "$version_mode" = "debug" -a "$separate_symbol" = "on" ]; then
echo "WARNING: do not separate symbol in debug mode!"
fi
if [ "$product_mode" != "opengauss" ]; then
die "the product mode can only be opengauss!"
fi
echo "build gaussdb kernel." >> "$LOG_FILE" 2>&1
make_gaussdb_kernel
echo "build gaussdb kernel success." >> "$LOG_FILE" 2>&1
chmod 444 ${BUILD_DIR}/bin/cluster_guc.conf
dos2unix ${BUILD_DIR}/bin/cluster_guc.conf > /dev/null 2>&1
#insert the commitid to version.cfg as the upgrade app path specification
get_kernel_commitid
}
#######################################################################
##install gaussdb database and others
##select to install something according to variables package_type need
#######################################################################
function gaussdb_build()
{
case "$package_type" in
server)
install_gaussdb
;;
libpq)
install_gaussdb
;;
*)
echo "Internal Error: option processing error: $package_type"
echo "please input right paramenter values server or libpq "
exit 1
esac
}

View File

@ -0,0 +1,164 @@
#!/bin/bash
#######################################################################
# Copyright (c): 2020-2025, Huawei Tech. Co., Ltd.
# descript: Compile and pack openGauss
# Return 0 means OK.
# Return 1 means failed.
# version: 2.0
# date: 2020-08-08
#######################################################################
declare LOG_FILE="${SCRIPT_DIR}/makemppdb_pkg.log"
declare gaussdb_version='openGauss'
declare PLATFORM_ARCH=$(uname -p)
declare package_path=${ROOT_DIR}/output
declare install_package_format="tar"
declare PLATFORM=32
bit=$(getconf LONG_BIT)
if [ "$bit" -eq 64 ]; then
declare PLATFORM=64
fi
# 公共方法
#######################################################################
##putout the version of gaussdb
#######################################################################
function print_version()
{
echo "$version_number"
}
#######################################################################
# Print log.
#######################################################################
function log()
{
echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@"
echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@" >> "$LOG_FILE" 2>&1
}
#######################################################################
# print log and exit.
#######################################################################
function die()
{
log "$@"
echo "$@"
exit $ERR_MKGS_FAILED
}
#######################################################################
##select package command accroding to install_package_format
#######################################################################
function select_package_command()
{
case "$install_package_format" in
tar)
tar='tar'
option=' -zcvf'
package_command="$tar$option"
;;
esac
}
select_package_command
#######################################################################
##get os dist version
#######################################################################
export PLAT_FORM_STR=$(sh "${ROOT_DIR}/src/get_PlatForm_str.sh")
if [ "${PLAT_FORM_STR}"x == "Failed"x -o "${PLAT_FORM_STR}"x == ""x ]
then
echo "We only support openEuler(aarch64), EulerOS(aarch64), CentOS, Kylin(aarch64) platform."
exit 1;
fi
if [[ "$PLAT_FORM_STR" =~ "euleros" ]]; then
dist_version="EulerOS"
if [ "$PLATFORM_ARCH"X == "aarch64"X ];then
GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA"
fi
elif [[ "$PLAT_FORM_STR" =~ "centos" ]]; then
dist_version="CentOS"
if [ "$PLATFORM_ARCH"X == "aarch64"X ];then
GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA"
fi
elif [[ "$PLAT_FORM_STR" =~ "openeuler" ]]; then
dist_version="openEuler"
if [ "$PLATFORM_ARCH"X == "aarch64"X ];then
GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA -D__ARM_LSE"
fi
elif [[ "$PLAT_FORM_STR" =~ "kylin" ]]; then
dist_version="Kylin"
if [ "$PLATFORM_ARCH"X == "aarch64"X ];then
GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA"
fi
else
echo "We only support openEuler(aarch64), EulerOS(aarch64), CentOS, Kylin(aarch64) platform."
echo "Kernel is $kernel"
exit 1
fi
##add platform architecture information
if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then
if [ "$dist_version" != "openEuler" ] && [ "$dist_version" != "EulerOS" ] && [ "$dist_version" != "Kylin" ] ; then
echo "We only support NUMA on openEuler(aarch64), EulerOS(aarch64), Kylin(aarch64) platform."
exit 1
fi
fi
if [ "${binarylib_dir}" != 'None' ] && [ -d "${binarylib_dir}" ]; then
BUILD_TOOLS_PATH="${binarylib_dir}/buildtools/${PLAT_FORM_STR}"
PLATFORM_PATH="${binarylib_dir}/platform/${PLAT_FORM_STR}"
BINARYLIBS_PATH="${binarylib_dir}/dependency"
else
die "${binarylib_dir} not exist"
fi
declare INSTALL_TOOLS_DIR=${BINARYLIBS_PATH}/install_tools_${PLAT_FORM_STR}
declare UNIX_ODBC="${BINARYLIBS_PATH}/${PLAT_FORM_STR}/unixodbc"
# Comment 编译相关
gcc_version="7.3"
ccache -V >/dev/null 2>&1 && USE_CCACHE="ccache " ENABLE_CCACHE="--enable-ccache"
export CC="${USE_CCACHE}$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/gcc"
export CXX="${USE_CCACHE}$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/g++"
export LD_LIBRARY_PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/lib64:$BUILD_TOOLS_PATH/gcc$gcc_version/isl/lib:$BUILD_TOOLS_PATH/gcc$gcc_version/mpc/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/mpfr/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/gmp/lib/:$LD_LIBRARY_PATH
export PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin:$PATH
export JAVA_HOME=${binarylib_dir}/platform/huaweijdk8/${PLATFORM_ARCH}/jdk
declare ERR_MKGS_FAILED=1
declare MKGS_OK=0
gaussdb_200_file="${binarylib_dir}/buildtools/license_control/gaussdb.version.GaussDB200"
gaussdb_300_file="${binarylib_dir}/buildtools/license_control/gaussdb.version.GaussDB300"
gaussdb_200_standard_file="${binarylib_dir}/buildtools/license_control/gaussdb.license.GaussDB200_Standard"
gaussdb_version_file="${ROOT_DIR}/src/gausskernel/process/postmaster/gaussdb_version.cpp"
if [ -f "$SCRIPT_DIR/gaussdb.ver" ];then
declare version_number=$(cat ${SCRIPT_DIR}/gaussdb.ver | grep 'VERSION' | awk -F "=" '{print $2}')
else
echo "gaussdb.ver not found!"
exit 1
fi
declare release_file_list="${PLATFORM_ARCH}_${product_mode}_list"
#######################################################################
## declare all package name
#######################################################################
declare gaussdb_name_for_package="$(echo ${gaussdb_version} | sed 's/ /-/g')"
declare version_string="${gaussdb_name_for_package}-${version_number}"
declare package_pre_name="${version_string}-${dist_version}-${PLATFORM}bit"
declare libpq_package_name="${package_pre_name}-Libpq.tar.gz"
declare tools_package_name="${package_pre_name}-tools.tar.gz"
declare kernel_package_name="${package_pre_name}.tar.bz2"
declare symbol_package_name="${package_pre_name}-symbol.tar.gz"
declare sha256_name="${package_pre_name}.sha256"

View File

@ -0,0 +1,239 @@
#!/bin/bash
#############################################################################
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
# Description : gs_backup is a utility to back up or restore binary files and parameter files.
#############################################################################
declare UPGRADE_SQL_DIR="${ROOT_DIR}/src/include/catalog/upgrade_sql"
#######################################################################
# move pkgs to output directory
#######################################################################
function deploy_pkgs()
{
mkdir -p $package_path
for pkg in $@; do
if [ -f "$pkg" ]; then
mv $pkg $package_path/
fi
done
}
#######################################################################
# copy directory's files list to $2
#######################################################################
function copy_files_list()
{
for file in $(echo $1)
do
test -e $file && tar -cpf - $file | ( cd $2; tar -xpf - )
done
}
#######################################################################
##copy target file into temporary directory temp
#######################################################################
function target_file_copy()
{
cd ${BUILD_DIR}
copy_files_list "$1" $2
cp ${SCRIPT_DIR}/version.cfg ${BUILD_DIR}/temp
if [ $? -ne 0 ]; then
die "copy ${SCRIPT_DIR}/version.cfg to ${BUILD_DIR}/temp failed"
fi
sed -i '/^process_cpu_affinity|/d' $2/bin/cluster_guc.conf
#generate tar file
echo "Begin generate ${kernel_package_name} tar file..." >> "$LOG_FILE" 2>&1
cd $2
tar -jcvpf "${kernel_package_name}" ./* >> "$LOG_FILE" 2>&1
cd '-'
mv $2/"${kernel_package_name}" ./
if [ $? -ne 0 ]; then
die "generate ${kernel_package_name} failed."
fi
echo "End generate ${kernel_package_name} tar file" >> "$LOG_FILE" 2>&1
#generate sha256 file
sha256_name="${package_pre_name}.sha256"
echo "Begin generate ${sha256_name} sha256 file..." >> "$LOG_FILE" 2>&1
sha256sum "${kernel_package_name}" | awk -F" " '{print $1}' > "$sha256_name"
if [ $? -ne 0 ]; then
die "generate sha256 file failed."
fi
echo "End generate ${sha256_name} sha256 file" >> "$LOG_FILE" 2>&1
###################################################
# make server package
###################################################
if [ -d "${2}" ]; then
rm -rf ${2}
fi
}
function target_file_copy_for_non_server()
{
cd ${BUILD_DIR}
copy_files_list "$1" $2
}
#######################################################################
##function make_package_prep have two actions
##1.parse release_file_list variable represent file
##2.copy target file into a newly created temporary directory temp
#######################################################################
function prep_dest_list()
{
cd $SCRIPT_DIR
releasefile=$1
pkgname=$2
local head=$(cat $releasefile | grep "\[$pkgname\]" -n | awk -F: '{print $1}')
if [ ! -n "$head" ]; then
die "error: ono find $pkgname in the $releasefile file "
fi
local tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}')
if [ ! -n "$tail" ]; then
local all=$(cat $releasefile | wc -l)
let tail=$all+1-$head
fi
dest_list=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1")
}
#######################################################################
##back to separate_debug_symbol.sh dir
#######################################################################
function separate_symbol()
{
cd $SCRIPT_DIR
if [ "$version_mode" = "release" ]; then
chmod +x ./separate_debug_information.sh
./separate_debug_information.sh
cd $SCRIPT_DIR
mv symbols.tar.gz $symbol_package_name
deploy_pkgs $symbol_package_name
fi
}
function make_package_srv()
{
echo "Begin package server"
cd $SCRIPT_DIR
prep_dest_list $release_file_list 'server'
rm -rf ${BUILD_DIR}/temp
mkdir -p ${BUILD_DIR}/temp/etc
target_file_copy "$dest_list" ${BUILD_DIR}/temp
deploy_pkgs ${sha256_name} ${kernel_package_name}
echo "make server(all) package success!"
}
#######################################################################
# Install all SQL files from src/distribute/include/catalog/upgrade_sql
# to INSTALL_DIR/bin/script/upgrade_sql.
# Package all SQL files and then verify them with SHA256.
#######################################################################
function make_package_upgrade_sql()
{
echo "Begin to install upgrade_sql files..."
UPGRADE_SQL_TAR="upgrade_sql.tar.gz"
UPGRADE_SQL_SHA256="upgrade_sql.sha256"
cd $SCRIPT_DIR
mkdir -p ${BUILD_DIR}
cd ${BUILD_DIR}
rm -rf temp
mkdir temp
cd ${BUILD_DIR}/temp
cp -r "${UPGRADE_SQL_DIR}" ./upgrade_sql
[ $? -ne 0 ] && die "Failed to cp upgrade_sql files"
tar -czf ${UPGRADE_SQL_TAR} upgrade_sql
[ $? -ne 0 ] && die "Failed to package ${UPGRADE_SQL_TAR}"
rm -rf ./upgrade_sql > /dev/null 2>&1
sha256sum ${UPGRADE_SQL_TAR} | awk -F" " '{print $1}' > "${UPGRADE_SQL_SHA256}"
[ $? -ne 0 ] && die "Failed to generate sha256 sum file for ${UPGRADE_SQL_TAR}"
chmod 600 ${UPGRADE_SQL_TAR}
chmod 600 ${UPGRADE_SQL_SHA256}
deploy_pkgs ${UPGRADE_SQL_TAR} ${UPGRADE_SQL_SHA256}
echo "Successfully packaged upgrade_sql files."
}
function make_package_libpq()
{
cd $SCRIPT_DIR
prep_dest_list $release_file_list 'libpq'
rm -rf ${BUILD_DIR}/temp
mkdir -p ${BUILD_DIR}/temp
target_file_copy_for_non_server "$dest_list" ${BUILD_DIR}/temp
cd ${BUILD_DIR}/temp
echo "packaging libpq..."
tar -zvcf "${libpq_package_name}" ./* >>"$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "$package_command ${libpq_package_name} failed"
fi
deploy_pkgs ${libpq_package_name}
echo "install $pkgname tools is ${libpq_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1
echo "success!"
}
function make_package_tools()
{
cd $SCRIPT_DIR
prep_dest_list $release_file_list 'client'
rm -rf ${BUILD_DIR}/temp
mkdir -p ${BUILD_DIR}/temp
cd ${BUILD_DIR}/
target_file_copy_for_non_server "$dest_list" ${BUILD_DIR}/temp
cd ${BUILD_DIR}/temp
echo "packaging tools..."
tar -zvcf "${tools_package_name}" ./* >>"$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "$package_command ${tools_package_name} failed"
fi
deploy_pkgs ${tools_package_name}
echo "install $pkgname tools is ${tools_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1
echo "success!"
}
function gaussdb_pkg()
{
echo "Start package opengauss."
separate_symbol
make_package_srv
make_package_libpq
make_package_tools
make_package_upgrade_sql
echo "End package opengauss."
}

View File

@ -0,0 +1,286 @@
#!/bin/bash
#######################################################################
# Copyright (c): 2020-2025, Huawei Tech. Co., Ltd.
# descript: Compile and pack openGauss
# Return 0 means OK.
# Return 1 means failed.
# version: 2.0
# date: 2020-08-08
#######################################################################
#######################################################################
## Check the installation package production environment
#######################################################################
function gaussdb_pkg_pre_clean()
{
if [ -d "$BUILD_DIR" ]; then
rm -rf $BUILD_DIR
fi
if [ -d "$LOG_FILE" ]; then
rm -rf $LOG_FILE
fi
}
###################################
#######################################################################
##read version from gaussdb.ver
#######################################################################
function read_gaussdb_version()
{
cd ${SCRIPT_DIR}
echo "${gaussdb_name_for_package}-${version_number}" > version.cfg
#auto read the number from kernal globals.cpp, no need to change it here
}
PG_REG_TEST_ROOT="${ROOT_DIR}"
ROACH_DIR="${ROOT_DIR}/distribute/bin/roach"
MPPDB_DECODING_DIR="${ROOT_DIR}/contrib/mppdb_decoding"
###################################
# get version number from globals.cpp
##################################
function read_gaussdb_number()
{
global_kernal="${ROOT_DIR}/src/common/backend/utils/init/globals.cpp"
version_name="GRAND_VERSION_NUM"
version_num=""
line=$(cat $global_kernal | grep ^const* | grep $version_name)
version_num1=${line#*=}
#remove the symbol;
version_num=$(echo $version_num1 | tr -d ";")
#remove the blank
version_num=$(echo $version_num)
if echo $version_num | grep -qE '^92[0-9]+$'
then
# get the last three number
latter=${version_num:2}
echo "92.${latter}" >>${SCRIPT_DIR}/version.cfg
else
echo "Cannot get the version number from globals.cpp."
exit 1
fi
}
#######################################################################
##insert the commitid to version.cfg as the upgrade app path specification
#######################################################################
function get_kernel_commitid()
{
export PATH=${BUILD_DIR}:$PATH
export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH
commitid=$(LD_PRELOAD='' ${BUILD_DIR}/bin/gaussdb -V | awk '{print $5}' | cut -d ")" -f 1)
echo "${commitid}" >>${SCRIPT_DIR}/version.cfg
echo "End insert commitid into version.cfg" >> "$LOG_FILE" 2>&1
}
#######################################################################
## generate the version file.
#######################################################################
function make_license_control()
{
python_exec=$(which python 2>/dev/null)
if [ -x "$python_exec" ]; then
$python_exec ${binarylib_dir}/buildtools/license_control/encrypted_version_file.py >> "$LOG_FILE" 2>&1
fi
if [ $? -ne 0 ]; then
die "create ${binarylib_dir}/buildtools/license_control license file failed."
fi
if [ -f "$gaussdb_200_file" ] && [ -f "$gaussdb_300_file" ]; then
# Get the md5sum.
gaussdb_200_sha256sum=$(sha256sum $gaussdb_200_file | awk '{print $1}')
gaussdb_300_sha256sum=$(sha256sum $gaussdb_300_file | awk '{print $1}')
# Modify the source code.
sed -i "s/^[ \t]*const[ \t]\+char[ \t]*\*[ \t]*sha256_digests[ \t]*\[[ \t]*SHA256_DIGESTS_COUNT[ \t]*\][ \t]*=[ \t]*{[ \t]*NULL[ \t]*,[ \t]*NULL[ \t]*}[ \t]*;[ \t]*$/const char \*sha256_digests\[SHA256_DIGESTS_COUNT\] = {\"$gaussdb_200_sha256sum\", \"$gaussdb_300_sha256sum\"};/g" $gaussdb_version_file
fi
if [ $? -ne 0 ]; then
die "modify '$gaussdb_version_file' failed."
fi
}
#######################################################################
##back to separate_debug_symbol.sh dir
#######################################################################
function separate_symbol()
{
cd $SCRIPT_DIR
if [ "$version_mode" = "release" -a "$separate_symbol" = "on" ]; then
chmod +x ./separate_debug_information.sh
./separate_debug_information.sh
cd $SCRIPT_DIR
mkdir -p $package_path
mv symbols.tar.gz $package_path/$symbol_package_name
fi
}
#######################################################################
##install gaussdb database contained server,client and libpq
#######################################################################
function install_gaussdb()
{
# Generate the license control file, and set md5sum string to the code.
echo "Modify gaussdb_version.cpp file." >> "$LOG_FILE" 2>&1
make_license_control
echo "Modify gaussdb_version.cpp file success." >> "$LOG_FILE" 2>&1
#putinto to Code dir
cd "$ROOT_DIR"
#echo "$ROOT_DIR/Code"
if [ $? -ne 0 ]; then
die "change dir to $ROOT_DIR failed."
fi
if [ "$version_mode" = "debug" -a "$separate_symbol" = "on" ]; then
echo "WARNING: do not separate symbol in debug mode!"
fi
if [ "$product_mode" != "opengauss" ]; then
die "the product mode can only be opengauss!"
fi
#configure
make distclean -sj >> "$LOG_FILE" 2>&1
echo "Begin configure." >> "$LOG_FILE" 2>&1
chmod 755 configure
if [ "$product_mode"x == "opengauss"x ]; then
enable_readline="--with-readline"
else
enable_readline="--without-readline"
fi
shared_opt="--gcc-version=${gcc_version}.0 --prefix="${BUILD_DIR}" --3rd=${binarylib_dir} --enable-thread-safety ${enable_readline} --without-zlib"
if [ "$product_mode"x == "opengauss"x ]; then
if [ "$version_mode"x == "release"x ]; then
# configure -D__USE_NUMA -D__ARM_LSE with arm opengauss mode
if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then
echo "configure -D__USE_NUMA -D__ARM_LSE with arm opengauss mode"
GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA -D__ARM_LSE"
fi
./configure $shared_opt CFLAGS="-O2 -g3 ${GAUSSDB_EXTRA_FLAGS}" --enable-mot CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1
elif [ "$version_mode"x == "memcheck"x ]; then
./configure $shared_opt CFLAGS="-O0" --enable-mot --enable-debug --enable-cassert --enable-memory-check CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1
elif [ "$version_mode"x == "fiurelease"x ]; then
./configure $shared_opt CFLAGS="-O2 -g3 ${GAUSSDB_EXTRA_FLAGS}" --enable-mot --disable-jemalloc CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1
elif [ "$version_mode"x == "fiudebug"x ]; then
./configure $shared_opt CFLAGS="-O0 ${GAUSSDB_EXTRA_FLAGS}" --enable-mot --enable-debug --enable-cassert --disable-jemalloc CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1
else
./configure $shared_opt CFLAGS="-O0 ${GAUSSDB_EXTRA_FLAGS}" --enable-mot --enable-debug --enable-cassert CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1
fi
fi
if [ $? -ne 0 ]; then
die "configure failed."
fi
echo "End configure" >> "$LOG_FILE" 2>&1
echo "Begin make install MPPDB server" >> "$LOG_FILE" 2>&1
make clean >> "$LOG_FILE" 2>&1
export GAUSSHOME=${BUILD_DIR}
export LD_LIBRARY_PATH=${BUILD_DIR}/lib:${BUILD_DIR}/lib/postgresql:${LD_LIBRARY_PATH}
make -sj 8>> "$LOG_FILE" 2>&1
make install -sj 8>> "$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
make install -sj 8>> "$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
make install -sj 8>> "$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "make install failed."
fi
fi
fi
cd "$ROOT_DIR/contrib/pg_upgrade_support"
make clean >> "$LOG_FILE" 2>&1
make -sj >> "$LOG_FILE" 2>&1
make install -sj >> "$LOG_FILE" 2>&1
echo "End make install MPPDB" >> "$LOG_FILE" 2>&1
cd "$ROOT_DIR"
if [ "${make_check}" = 'on' ]; then
echo "Begin make check MPPDB..." >> "$LOG_FILE" 2>&1
cd ${PG_REG_TEST_ROOT}
make check -sj >> "$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "make check MPPDB failed."
fi
echo "End make check MPPDB success." >> "$LOG_FILE" 2>&1
fi
echo "Begin make install mpp_decoding..." >> "$LOG_FILE" 2>&1
#copy mppdb_decoding form clienttools to bin
if [ "$version_mode"x == "release"x ]; then
cd "$MPPDB_DECODING_DIR"
make >> "$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "make install mppdb_decoding failed."
fi
echo "End make install mppdb_decoding success." >> "$LOG_FILE" 2>&1
echo "Begin pack mppdb_decoding..." >> "$LOG_FILE" 2>&1
cp ${MPPDB_DECODING_DIR}/mppdb_decoding.so ${BUILD_DIR}/lib/postgresql/mppdb_decoding.so
elif [ "$version_mode"x == "memcheck"x ]; then
cd "$MPPDB_DECODING_DIR"
make >> "$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "make install mppdb_decoding failed."
fi
echo "End make install mppdb_decoding success." >> "$LOG_FILE" 2>&1
echo "Begin pack mppdb_decoding..." >> "$LOG_FILE" 2>&1
cp ${MPPDB_DECODING_DIR}/mppdb_decoding.so ${BUILD_DIR}/lib/postgresql/mppdb_decoding.so
else
cd "$MPPDB_DECODING_DIR"
make >> "$LOG_FILE" 2>&1
if [ $? -ne 0 ]; then
die "make install mppdb_decoding failed."
fi
echo "End make install mppdb_decoding success." >> "$LOG_FILE" 2>&1
echo "Begin pack mppdb_decoding..." >> "$LOG_FILE" 2>&1
cp ${MPPDB_DECODING_DIR}/mppdb_decoding.so ${BUILD_DIR}/lib/postgresql/mppdb_decoding.so
fi
if [ $? -ne 0 ]; then
if [ "$version_mode"x == "release"x ]; then
die "cp ${MPPDB_DECODING_DIR}/mppdb_decoding ${MPPDB_DECODING_DIR}/bin/mppdb_decoding failed"
else
die "cp ${MPPDB_DECODING_DIR}/mppdb_decoding ${MPPDB_DECODING_DIR}/bin/mppdb_decoding failed"
fi
fi
chmod 444 ${BUILD_DIR}/bin/cluster_guc.conf
dos2unix ${BUILD_DIR}/bin/cluster_guc.conf > /dev/null 2>&1
separate_symbol
get_kernel_commitid
}
#######################################################################
##install gaussdb database and others
##select to install something according to variables package_type need
#######################################################################
function gaussdb_build()
{
case "$package_type" in
server)
install_gaussdb
;;
libpq)
install_gaussdb
;;
*)
echo "Internal Error: option processing error: $package_type"
echo "please input right paramenter values server or libpq "
exit 1
esac
}

View File

@ -2,47 +2,19 @@
./bin/gsql
./bin/gaussdb
./bin/gstrace
./bin/gs_basebackup
./bin/gs_probackup
./bin/gs_tar
./bin/gs_encrypt
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_ctl
./bin/gs_initdb
./bin/gs_ctl
./bin/gs_guc
./bin/encrypt
./bin/openssl
./bin/gs_restore
./bin/gs_cgroup
./bin/openssl
./bin/pg_config
./bin/pg_controldata
./bin/pg_format_cu
./bin/pg_resetxlog
./bin/pg_recvlogical
./bin/alarmItem.conf
./bin/retry_errcodes.conf
./bin/cluster_guc.conf
./bin/bind_net_irq.sh
./bin/setArmOptimization.sh
./bin/krb5kdc
./bin/klist
./bin/kinit
./bin/kdestroy
./bin/kdb5_util
./bin/kadmin.local
./bin/lz4
./bin/kadmind
./bin/dbmind
./bin/server.key.cipher
./bin/server.key.rand
./bin/gs_plan_simulator.sh
./etc/kerberos/kadm5.acl
./etc/kerberos/kdc.conf
./etc/kerberos/krb5.conf
./etc/kerberos/mppdb-site.xml
./share/postgresql/tmp/udstools.py
./share/postgresql/db4ai
./share/postgresql/snowball_create.sql
./share/postgresql/pg_hba.conf.sample
@ -54,7 +26,6 @@
./share/postgresql/pg_ident.conf.sample
./share/postgresql/postgres.description
./share/postgresql/postgresql.conf.sample
./share/postgresql/mot.conf.sample
./share/postgresql/extension/plpgsql--1.0.sql
./share/postgresql/extension/hstore.control
./share/postgresql/extension/security_plugin.control
@ -72,8 +43,6 @@
./share/postgresql/extension/hdfs_fdw.control
./share/postgresql/extension/log_fdw--1.0.sql
./share/postgresql/extension/log_fdw.control
./share/postgresql/extension/mot_fdw--1.0.sql
./share/postgresql/extension/mot_fdw.control
./share/postgresql/extension/postgres_fdw--1.0.sql
./share/postgresql/extension/postgres_fdw.control
./share/postgresql/timezone/GB-Eire
@ -282,7 +251,6 @@
./share/postgresql/timezone/Canada/Newfoundland
./share/postgresql/timezone/Canada/Saskatchewan
./share/postgresql/timezone/Canada/Pacific
./share/postgresql/timezone/Canada/East-Saskatchewan
./share/postgresql/timezone/Canada/Mountain
./share/postgresql/timezone/Canada/Central
./share/postgresql/timezone/CST6CDT
@ -664,7 +632,6 @@
./share/postgresql/timezone/Navajo
./share/postgresql/timezone/GMT
./share/postgresql/system_views.sql
./share/postgresql/private_system_views.sql
./share/postgresql/performance_views.sql
./share/postgresql/sql_features.txt
./share/postgresql/pg_cast_oid.txt
@ -703,11 +670,41 @@
./share/postgresql/timezonesets/Default
./share/postgresql/timezonesets/Etc.txt
./share/postgresql/postgres.bki
./share/llvmir/GaussDB_expr.ir
./share/sslcert/gsql/openssl.cnf
./share/sslcert/grpc/openssl.cnf
./share/sslcert/om/openssl.cnf
./lib/libsimsearch/
./lib/libpq.so
./lib/libpq.so.5
./lib/libpq.so.5.5
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libcgroup.so
./lib/libcgroup.so.1
./lib/libz.so
./lib/libz.so.1
./lib/libz.so.1.2.11
./lib/liblz4.so
./lib/liblz4.so.1
./lib/liblz4.so.1.9.2
./lib/libcjson.so
./lib/libcjson.so.1
./lib/libcjson.so.1.7.13
./lib/libcjson_utils.so
./lib/libcjson_utils.so.1
./lib/libcjson_utils.so.1.7.13
./lib/libstdc++.so.6
./lib/libgcc_s.so.1
./lib/libgomp.so
./lib/libgomp.so.1
./lib/libgomp.so.1.0.0
./lib/libdcf.so
./lib/libzstd.so
./lib/libzstd.so.1
./lib/libzstd.so.1.5.0
./lib/libcurl.so
./lib/libcurl.so.4
./lib/libcurl.so.4.6.0
./lib/libxgboost.so
./lib/postgresql/latin2_and_win1250.so
./lib/postgresql/euc2004_sjis2004.so
./lib/postgresql/euc_kr_and_mic.so
@ -716,12 +713,6 @@
./lib/postgresql/cyrillic_and_mic.so
./lib/postgresql/utf8_and_johab.so
./lib/postgresql/utf8_and_gb18030.so
./lib/postgresql/pgxs/src/makefiles/pgxs.mk
./lib/postgresql/pgxs/src/Makefile.shlib
./lib/postgresql/pgxs/src/Makefile.port
./lib/postgresql/pgxs/src/nls-global.mk
./lib/postgresql/pgxs/src/Makefile.global
./lib/postgresql/pgxs/src/get_PlatForm_str.sh
./lib/postgresql/pgxs/config/install-sh
./lib/postgresql/euc_cn_and_mic.so
./lib/postgresql/latin_and_mic.so
@ -747,142 +738,8 @@
./lib/postgresql/pg_plugin
./lib/postgresql/proc_srclib
./lib/postgresql/security_plugin.so
./lib/postgresql/pg_upgrade_support.so
./lib/postgresql/java/pljava.jar
./lib/postgresql/postgres_fdw.so
./lib/postgresql/pgoutput.so
./lib/libpljava.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
./lib/libpq.so.5.5
./lib/libpq_ce.so
./lib/libpq_ce.so.5
./lib/libpq_ce.so.5.5
./lib/libgauss_cl_jni.so
./lib/libcgroup.so
./lib/libcgroup.so.1
./lib/libcom_err_gauss.so
./lib/libcom_err_gauss.so.3
./lib/libcom_err_gauss.so.3.0
./lib/libatomic.so
./lib/libatomic.so.1
./lib/libatomic.so.1.2.0
./lib/libmasstree.so
./lib/libupb.so
./lib/libupb.so.9
./lib/libupb.so.9.0.0
./lib/libabsl_str_format_internal.so
./lib/libabsl_strings.so
./lib/libabsl_throw_delegate.so
./lib/libabsl_strings_internal.so
./lib/libabsl_base.so
./lib/libabsl_dynamic_annotations.so
./lib/libabsl_spinlock_wait.so
./lib/libabsl_int128.so
./lib/libabsl_bad_optional_access.so
./lib/libabsl_raw_logging_internal.so
./lib/libabsl_log_severity.so
./lib/libaddress_sorting.so
./lib/libaddress_sorting.so.9
./lib/libgssapi_krb5_gauss.so
./lib/libgssapi_krb5_gauss.so.2
./lib/libgssapi_krb5_gauss.so.2.2
./lib/libgssrpc_gauss.so
./lib/libgssrpc_gauss.so.4
./lib/libgssrpc_gauss.so.4.2
./lib/libk5crypto_gauss.so
./lib/libk5crypto_gauss.so.3
./lib/libk5crypto_gauss.so.3.1
./lib/libkadm5clnt.so
./lib/libkadm5clnt_mit.so
./lib/libkadm5clnt_mit.so.11
./lib/libkadm5clnt_mit.so.11.0
./lib/libkadm5clnt_mit.so.12
./lib/libkadm5clnt_mit.so.12.0
./lib/libkadm5srv.so
./lib/libkadm5srv_mit.so
./lib/libkadm5srv_mit.so.11
./lib/libkadm5srv_mit.so.11.0
./lib/libkadm5srv_mit.so.12
./lib/libkadm5srv_mit.so.12.0
./lib/libkdb5.so
./lib/libkdb5.so.9
./lib/libkdb5.so.9.0
./lib/libkdb5.so.10
./lib/libkdb5.so.10.0
./lib/libkrad.so
./lib/libkrad.so.0
./lib/libkrad.so.0.0
./lib/libkrb5_gauss.so
./lib/libkrb5_gauss.so.3
./lib/libkrb5_gauss.so.3.3
./lib/libkrb5support_gauss.so
./lib/libkrb5support_gauss.so.0
./lib/libkrb5support_gauss.so.0.1
./lib/krb5/plugins/kdb/db2.so
./lib/libverto.so
./lib/libverto.so.0
./lib/libverto.so.0.0
./lib/libcurl.so
./lib/libcurl.so.4
./lib/libcurl.so.4.6.0
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libgcc_s.so.1
./lib/libstdc++.so.6
./lib/libz.so
./lib/libz.so.1
./lib/libz.so.1.2.11
./lib/liblz4.so
./lib/liblz4.so.1
./lib/liblz4.so.1.9.2
./lib/libcjson.so
./lib/libcjson.so.1
./lib/libcjson.so.1.7.13
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libpgport_tool.so
./lib/libpgport_tool.so.1
./share/llvmir/GaussDB_expr.ir
./lib/libeSDKLogAPI.so
./lib/libeSDKOBS.so
./lib/liblog4cpp.so
./lib/liblog4cpp.so.5
./lib/liblog4cpp.so.5.0.6
./lib/libcharset.so
./lib/libcharset.so.1
./lib/libcharset.so.1.0.0
./lib/libiconv.so
./lib/libiconv.so.2
./lib/libiconv.so.2.6.1
./lib/libnghttp2.so
./lib/libnghttp2.so.14
./lib/libnghttp2.so.14.20.0
./lib/libpcre.so
./lib/libpcre.so.1
./lib/libpcre.so.1.2.12
./lib/libsecurec.so
./lib/libxml2.so
./lib/libxml2.so.2
./lib/libxml2.so.2.9.9
./lib/libparquet.so
./lib/libparquet.so.14
./lib/libparquet.so.14.1.0
./lib/libarrow.so
./lib/libarrow.so.14
./lib/libarrow.so.14.1.0
./lib/OBS.ini
./lib/postgresql/latin2_and_win1250.so
./lib/postgresql/euc2004_sjis2004.so
./lib/libdcf.so
./lib/libzstd.so
./lib/libzstd.so.1
./lib/libzstd.so.1.4.4
./include/postgresql/server/postgres_ext.h
./include/postgresql/server/pg_config_os.h
./include/postgresql/server/pgtime.h
@ -1012,6 +869,7 @@
./include/postgresql/server/storage/backendid.h
./include/postgresql/server/storage/lock/lock.h
./include/postgresql/server/storage/lock/lwlock.h
./include/postgresql/server/storage/lwlocknames.h
./include/postgresql/server/storage/barrier.h
./include/postgresql/server/storage/shmem.h
./include/postgresql/server/pg_config.h
@ -1035,418 +893,14 @@
./include/postgresql/server/lib/ilist.h
./include/postgresql/server/pgxc/locator.h
./include/postgresql/server/gstrace/gstrace_infra.h
./include/postgresql/server/extension_dependency.h
./include/postgresql/server/libpq/libpq-fe.h
./include/postgresql/server/access/clog.h
./include/postgresql/server/storage/proc.h
./include/postgresql/server/access/xlog.h
./include/postgresql/server/storage/lwlocknames.h
./include/postgresql/server/access/xloginsert.h
./include/postgresql/server/catalog/pg_control.h
./include/postgresql/server/access/parallel_recovery/redo_item.h
./include/postgresql/server/access/parallel_recovery/posix_semaphore.h
./include/postgresql/server/replication/replicainternal.h
./include/postgresql/server/knl/knl_instance.h
./include/postgresql/server/knl/knl_guc.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_sql.h
./include/postgresql/server/knl/knl_guc/knl_guc_common.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_sql.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_storage.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_storage.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_storage.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_security.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_security.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_network.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_network.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_memory.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_memory.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_resource.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_resource.h
./include/postgresql/server/knl/knl_guc/knl_session_attr_common.h
./include/postgresql/server/knl/knl_guc/knl_instance_attr_common.h
./include/postgresql/server/lib/circularqueue.h
./include/postgresql/server/access/double_write_basic.h
./include/postgresql/server/knl/knl_thread.h
./include/postgresql/server/access/sdir.h
./include/postgresql/server/gssignal/gs_signal.h
./include/postgresql/server/knl/knl_session.h
./include/postgresql/server/libpq/pqcomm.h
./include/postgresql/server/cipher.h
./include/postgresql/server/portability/instr_time.h
./include/postgresql/server/utils/memgroup.h
./include/postgresql/server/storage/latch.h
./include/postgresql/server/workload/qnode.h
./include/postgresql/server/streaming/init.h
./include/postgresql/server/streaming/launcher.h
./include/postgresql/server/pgxc/barrier.h
./include/postgresql/server/libcomm/libcomm.h
./include/postgresql/server/hotpatch/hotpatch.h
./include/postgresql/server/hotpatch/hotpatch_backend.h
./include/postgresql/server/postmaster/bgwriter.h
./include/postgresql/server/postmaster/pagewriter.h
./include/postgresql/server/replication/heartbeat.h
./include/postgresql/server/access/multi_redo_settings.h
./include/postgresql/server/access/redo_statistic_msg.h
./include/postgresql/server/replication/rto_statistic.h
./include/postgresql/server/replication/walprotocol.h
./include/postgresql/server/storage/mot/jit_def.h
./include/postgresql/server/threadpool/threadpool.h
./include/postgresql/server/threadpool/threadpool_controler.h
./include/postgresql/server/threadpool/threadpool_group.h
./include/postgresql/server/knl/knl_variable.h
./include/postgresql/server/threadpool/threadpool_listener.h
./include/postgresql/server/threadpool/threadpool_sessctl.h
./include/postgresql/server/storage/procsignal.h
./include/postgresql/server/threadpool/threadpool_worker.h
./include/postgresql/server/threadpool/threadpool_scheduler.h
./include/postgresql/server/threadpool/threadpool_stream.h
./include/postgresql/server/replication/dataqueuedefs.h
./include/postgresql/server/gtm/gtm_c.h
./include/postgresql/server/cm/etcdapi.h
./include/postgresql/server/alarm/alarm.h
./include/postgresql/server/access/xact.h
./include/postgresql/server/access/cstore_am.h
./include/postgresql/server/access/cstore_roughcheck_func.h
./include/postgresql/server/access/cstoreskey.h
./include/postgresql/server/storage/cu.h
./include/postgresql/server/vecexecutor/vectorbatch.h
./include/postgresql/server/cstore.h
./include/postgresql/server/storage/cstore/cstore_mem_alloc.h
./include/postgresql/server/access/cstore_minmax_func.h
./include/postgresql/server/storage/custorage.h
./include/postgresql/server/storage/fd.h
./include/postgresql/server/postmaster/aiocompleter.h
./include/postgresql/server/storage/buf/bufmgr.h
./include/postgresql/server/storage/buf/buf_internals.h
./include/postgresql/server/storage/smgr.h
./include/postgresql/server/catalog/pg_am.h
./include/postgresql/server/catalog/pg_class.h
./include/postgresql/server/catalog/pg_index.h
./include/postgresql/server/rewrite/prs2lock.h
./include/postgresql/server/tcop/stmt_retry.h
./include/postgresql/server/catalog/pg_hashbucket_fn.h
./include/postgresql/server/utils/rel_gs.h
./include/postgresql/server/catalog/pg_partition.h
./include/postgresql/server/catalog/pg_hashbucket.h
./include/postgresql/server/catalog/catalog.h
./include/postgresql/server/catalog/catversion.h
./include/postgresql/server/catalog/pg_namespace.h
./include/postgresql/server/utils/partitionmap_gs.h
./include/postgresql/server/access/heapam.h
./include/postgresql/server/storage/pagecompress.h
./include/postgresql/server/replication/bcm.h
./include/postgresql/server/storage/cstore/cstorealloc.h
./include/postgresql/server/storage/cucache_mgr.h
./include/postgresql/server/storage/cache_mgr.h
./include/postgresql/server/nodes/plannodes.h
./include/postgresql/server/foreign/foreign.h
./include/postgresql/server/access/obs/obs_am.h
./include/postgresql/server/storage/buf/buffile.h
./include/postgresql/server/replication/slot.h
./include/postgresql/server/access/obs/eSDKOBS.h
./include/postgresql/server/commands/defrem.h
./include/postgresql/server/optimizer/pruning.h
./include/postgresql/server/nodes/relation.h
./include/postgresql/server/optimizer/bucketinfo.h
./include/postgresql/server/pgxc/nodemgr.h
./include/postgresql/server/bulkload/dist_fdw.h
./include/postgresql/server/bulkload/importerror.h
./include/postgresql/server/commands/gds_stream.h
./include/postgresql/server/bulkload/utils.h
./include/postgresql/server/cjson/cJSON.h
./include/postgresql/server/ssl/gs_openssl_client.h
./include/postgresql/server/funcapi.h
./include/postgresql/server/executor/executor.h
./include/postgresql/server/executor/execdesc.h
./include/postgresql/server/nodes/execnodes.h
./include/postgresql/server/access/genam.h
./include/postgresql/server/nodes/tidbitmap.h
./include/postgresql/server/access/relscan.h
./include/postgresql/server/access/itup.h
./include/postgresql/server/executor/instrument.h
./include/postgresql/server/miscadmin.h
./include/postgresql/server/libpq/libpq-be.h
./include/postgresql/server/libpq/hba.h
./include/postgresql/server/libpq/sha2.h
./include/postgresql/server/utils/anls_opt.h
./include/postgresql/server/pgxc/pgxc.h
./include/postgresql/server/catalog/namespace.h
./include/postgresql/server/commands/trigger.h
./include/postgresql/server/executor/spi.h
./include/postgresql/server/access/ustore/undo/knl_uundotype.h
./include/postgresql/server/access/ustore/knl_uheap.h
./include/postgresql/server/access/ustore/knl_utuple.h
./include/postgresql/server/access/ustore/knl_utype.h
./include/postgresql/server/access/ustore/knl_upage.h
./include/postgresql/server/access/ustore/knl_uredo.h
./include/postgresql/server/access/ustore/knl_uundovec.h
./include/postgresql/server/access/ustore/knl_uundorecord.h
./include/postgresql/server/access/ustore/undo/knl_uundoxlog.h
./include/postgresql/server/access/ustore/undo/knl_uundotxn.h
./include/postgresql/server/access/ustore/undo/knl_uundozone.h
./include/postgresql/server/access/ustore/undo/knl_uundospace.h
./include/postgresql/server/communication/commproxy_basic.h
./include/postgresql/server/access/parallel_recovery/page_redo.h
./include/postgresql/server/access/parallel_recovery/spsc_blocking_queue.h
./include/postgresql/server/executor/exec/execdesc.h
./include/postgresql/server/db4ai/matrix.h
./include/postgresql/server/db4ai/scores.h
./jre/ASSEMBLY_EXCEPTION
./jre/bin/java
./jre/bin/jjs
./jre/bin/keytool
./jre/bin/orbd
./jre/bin/pack200
./jre/bin/policytool
./jre/bin/rmid
./jre/bin/rmiregistry
./jre/bin/servertool
./jre/bin/tnameserv
./jre/bin/unpack200
./jre/lib/amd64/jli/libjli.so
./jre/lib/amd64/jvm.cfg
./jre/lib/amd64/libattach.so
./jre/lib/amd64/libavplugin-ffmpeg-58.so
./jre/lib/amd64/libawt_headless.so
./jre/lib/amd64/libawt.so
./jre/lib/amd64/libawt_xawt.so
./jre/lib/amd64/libdecora_sse.so
./jre/lib/amd64/libdt_socket.so
./jre/lib/amd64/libfontmanager.so
./jre/lib/amd64/libfxplugins.so
./jre/lib/amd64/libglassgtk2.so
./jre/lib/amd64/libglassgtk3.so
./jre/lib/amd64/libglass.so
./jre/lib/amd64/libgstreamer-lite.so
./jre/lib/amd64/libhprof.so
./jre/lib/amd64/libinstrument.so
./jre/lib/amd64/libj2gss.so
./jre/lib/amd64/libj2pcsc.so
./jre/lib/amd64/libj2pkcs11.so
./jre/lib/amd64/libjaas_unix.so
./jre/lib/amd64/libjava_crw_demo.so
./jre/lib/amd64/libjavafx_font_freetype.so
./jre/lib/amd64/libjavafx_font_pango.so
./jre/lib/amd64/libjavafx_font.so
./jre/lib/amd64/libjavafx_iio.so
./jre/lib/amd64/libjava.so
./jre/lib/amd64/libjawt.so
./jre/lib/amd64/libjdwp.so
./jre/lib/amd64/libjfxmedia.so
./jre/lib/amd64/libjfxwebkit.so
./jre/lib/amd64/libjpeg.so
./jre/lib/amd64/libjsdt.so
./jre/lib/amd64/libjsig.so
./jre/lib/amd64/libjsoundalsa.so
./jre/lib/amd64/libjsound.so
./jre/lib/amd64/liblcms.so
./jre/lib/amd64/libmanagement.so
./jre/lib/amd64/libmlib_image.so
./jre/lib/amd64/libnet.so
./jre/lib/amd64/libnio.so
./jre/lib/amd64/libnpt.so
./jre/lib/amd64/libprism_common.so
./jre/lib/amd64/libprism_es2.so
./jre/lib/amd64/libprism_sw.so
./jre/lib/amd64/libsaproc.so
./jre/lib/amd64/libsctp.so
./jre/lib/amd64/libsplashscreen.so
./jre/lib/amd64/libsunec.so
./jre/lib/amd64/libunpack.so
./jre/lib/amd64/libverify.so
./jre/lib/amd64/libzip.so
./jre/lib/amd64/server/libjvm.so
./jre/lib/amd64/server/Xusage.txt
./jre/lib/calendars.properties
./jre/lib/charsets.jar
./jre/lib/classlist
./jre/lib/cmm/CIEXYZ.pf
./jre/lib/cmm/GRAY.pf
./jre/lib/cmm/LINEAR_RGB.pf
./jre/lib/cmm/PYCC.pf
./jre/lib/cmm/sRGB.pf
./jre/lib/content-types.properties
./jre/lib/currency.data
./jre/lib/ext/cldrdata.jar
./jre/lib/ext/dnsns.jar
./jre/lib/ext/jaccess.jar
./jre/lib/ext/jfxrt.jar
./jre/lib/ext/localedata.jar
./jre/lib/ext/meta-index
./jre/lib/ext/nashorn.jar
./jre/lib/ext/sunec.jar
./jre/lib/ext/sunjce_provider.jar
./jre/lib/ext/sunpkcs11.jar
./jre/lib/ext/zipfs.jar
./jre/lib/flavormap.properties
./jre/lib/fontconfig.Euler.properties
./jre/lib/fontconfig.properties
./jre/lib/fontconfig.Ubuntu.properties
./jre/lib/fonts/Roboto-Regular.ttf
./jre/lib/hijrah-config-umalqura.properties
./jre/lib/images/cursors/cursors.properties
./jre/lib/images/cursors/invalid32x32.gif
./jre/lib/images/cursors/motif_CopyDrop32x32.gif
./jre/lib/images/cursors/motif_CopyNoDrop32x32.gif
./jre/lib/images/cursors/motif_LinkDrop32x32.gif
./jre/lib/images/cursors/motif_LinkNoDrop32x32.gif
./jre/lib/images/cursors/motif_MoveDrop32x32.gif
./jre/lib/images/cursors/motif_MoveNoDrop32x32.gif
./jre/lib/javafx-mx.jar
./jre/lib/javafx.properties
./jre/lib/jce.jar
./jre/lib/jexec
./jre/lib/jfr/default.jfc
./jre/lib/jfr.jar
./jre/lib/jfr/profile.jfc
./jre/lib/jfxswt.jar
./jre/lib/jsse.jar
./jre/lib/jvm.hprof.txt
./jre/lib/logging.properties
./jre/lib/management-agent.jar
./jre/lib/management/jmxremote.access
./jre/lib/management/jmxremote.password.template
./jre/lib/management/management.properties
./jre/lib/management/snmp.acl.template
./jre/lib/meta-index
./jre/lib/net.properties
./jre/lib/psfontj2d.properties
./jre/lib/psfont.properties.ja
./jre/lib/resources.jar
./jre/lib/rt.jar
./jre/lib/security/blacklisted.certs
./jre/lib/security/cacerts
./jre/lib/security/java.policy
./jre/lib/security/java.security
./jre/lib/security/policy/limited/local_policy.jar
./jre/lib/security/policy/limited/US_export_policy.jar
./jre/lib/security/policy/unlimited/local_policy.jar
./jre/lib/security/policy/unlimited/US_export_policy.jar
./jre/lib/sound.properties
./jre/lib/tzdb.dat
./jre/LICENSE
./jre/THIRD_PARTY_README
[client]
./bin/gsql
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_restore
./bin/gs_basebackup
./bin/gs_probackup
./lib/postgresql/latin2_and_win1250.so
./lib/postgresql/euc2004_sjis2004.so
./lib/postgresql/euc_kr_and_mic.so
./lib/postgresql/utf8_and_uhc.so
./lib/postgresql/euc_tw_and_big5.so
./lib/postgresql/cyrillic_and_mic.so
./lib/postgresql/utf8_and_johab.so
./lib/postgresql/utf8_and_gb18030.so
./lib/postgresql/pgxs/src/makefiles/pgxs.mk
./lib/postgresql/pgxs/src/Makefile.shlib
./lib/postgresql/pgxs/src/Makefile.port
./lib/postgresql/pgxs/src/nls-global.mk
./lib/postgresql/pgxs/src/Makefile.global
./lib/postgresql/pgxs/config/install-sh
./lib/postgresql/euc_cn_and_mic.so
./lib/postgresql/latin_and_mic.so
./lib/postgresql/utf8_and_sjis2004.so
./lib/postgresql/utf8_and_euc_jp.so
./lib/postgresql/utf8_and_sjis.so
./lib/postgresql/utf8_and_cyrillic.so
./lib/postgresql/utf8_and_euc_kr.so
./lib/postgresql/ascii_and_mic.so
./lib/postgresql/utf8_and_iso8859_1.so
./lib/postgresql/euc_jp_and_sjis.so
./lib/postgresql/dict_snowball.so
./lib/postgresql/utf8_and_ascii.so
./lib/postgresql/utf8_and_euc_tw.so
./lib/postgresql/utf8_and_iso8859.so
./lib/postgresql/utf8_and_win.so
./lib/postgresql/utf8_and_euc_cn.so
./lib/postgresql/utf8_and_gbk.so
./lib/postgresql/utf8_and_euc2004.so
./lib/postgresql/utf8_and_big5.so
./lib/postgresql/java/pljava.jar
./lib/libpljava.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
./lib/libpq.so.5.5
./lib/libpq_ce.so
./lib/libpq_ce.so.5
./lib/libpq_ce.so.5.5
./lib/libgauss_cl_jni.so
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libstdc++.so.6
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libpgport_tool.so
./lib/libpgport_tool.so.1
./lib/libgssapi_krb5_gauss.so
./lib/libgssapi_krb5_gauss.so.2
./lib/libgssapi_krb5_gauss.so.2.2
./lib/libgssrpc_gauss.so
./lib/libgssrpc_gauss.so.4
./lib/libgssrpc_gauss.so.4.2
./lib/libk5crypto_gauss.so
./lib/libk5crypto_gauss.so.3
./lib/libk5crypto_gauss.so.3.1
./lib/libkrb5support_gauss.so
./lib/libkrb5support_gauss.so.0
./lib/libkrb5support_gauss.so.0.1
./lib/libkrb5_gauss.so
./lib/libkrb5_gauss.so.3
./lib/libkrb5_gauss.so.3.3
./lib/libcom_err_gauss.so
./lib/libcom_err_gauss.so.3
./lib/libcom_err_gauss.so.3.0
[libpq]
./lib/libpq.a
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libpq.so
./lib/libpq.so.5
./lib/libpq.so.5.5
./lib/libpq_ce.so
./lib/libpq_ce.so.5
./lib/libpq_ce.so.5.5
./lib/libgauss_cl_jni.so
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libstdc++.so.6
./lib/libssl.so
./lib/libssl.so.1.1
./lib/libpgport_tool.so
./lib/libpgport_tool.so.1
./lib/libgssapi_krb5_gauss.so
./lib/libgssapi_krb5_gauss.so.2
./lib/libgssapi_krb5_gauss.so.2.2
./lib/libgssrpc_gauss.so
./lib/libgssrpc_gauss.so.4
./lib/libgssrpc_gauss.so.4.2
./lib/libk5crypto_gauss.so
./lib/libk5crypto_gauss.so.3
./lib/libk5crypto_gauss.so.3.1
./lib/libkrb5support_gauss.so
./lib/libkrb5support_gauss.so.0
./lib/libkrb5support_gauss.so.0.1
./lib/libkrb5_gauss.so
./lib/libkrb5_gauss.so.3
./lib/libkrb5_gauss.so.3.3
./lib/libcom_err_gauss.so
./lib/libcom_err_gauss.so.3
./lib/libcom_err_gauss.so.3.0
./include/gs_thread.h
./include/gs_threadlocal.h
./include/postgres_ext.h
./include/libpq-fe.h
./include/libpq-events.h
./include/libpq/libpq-fs.h
[version]
V500R002C00
[header]
./include/libpq-fe.h
./include/postgres_ext.h
@ -1455,14 +909,8 @@ V500R002C00
./include/pg_config.h
./include/pg_config_manual.h
./include/pg_config_os.h
./include/cm_config.h
./include/c.h
./include/port.h
./include/cm_msg.h
./include/cm_c.h
./include/cm_misc.h
./include/libpq-int.h
./include/pqcomm.h
./include/pqexpbuffer.h
./include/xlogdefs.h
./include/cm-libpq-fe.h

View File

@ -35,6 +35,7 @@
./bin/lz4
./bin/kadmind
./bin/dbmind
./bin/gs_dbmind
./bin/server.key.cipher
./bin/server.key.rand
./bin/gs_plan_simulator.sh
@ -824,9 +825,7 @@
./lib/libverto.so
./lib/libverto.so.0
./lib/libverto.so.0.0
./lib/libcurl.so
./lib/libcurl.so.4
./lib/libcurl.so.4.6.0
./lib/libcurl.so*
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libssl.so
@ -839,9 +838,7 @@
./lib/liblz4.so
./lib/liblz4.so.1
./lib/liblz4.so.1.9.2
./lib/libcjson.so
./lib/libcjson.so.1
./lib/libcjson.so.1.7.13
./lib/libcjson.so*
./lib/libconfig.so
./lib/libconfig.so.4
./lib/libpgport_tool.so
@ -849,25 +846,13 @@
./share/llvmir/GaussDB_expr.ir
./lib/libeSDKLogAPI.so
./lib/libeSDKOBS.so
./lib/liblog4cpp.so
./lib/liblog4cpp.so.5
./lib/liblog4cpp.so.5.0.6
./lib/libcharset.so
./lib/libcharset.so.1
./lib/libcharset.so.1.0.0
./lib/libiconv.so
./lib/libiconv.so.2
./lib/libiconv.so.2.6.1
./lib/libnghttp2.so
./lib/libnghttp2.so.14
./lib/libnghttp2.so.14.20.0
./lib/libpcre.so
./lib/libpcre.so.1
./lib/libpcre.so.1.2.12
./lib/liblog4cpp.so*
./lib/libcharset.so*
./lib/libiconv.so*
./lib/libnghttp2.so*
./lib/libpcre.so*
./lib/libsecurec.so
./lib/libxml2.so
./lib/libxml2.so.2
./lib/libxml2.so.2.9.9
./lib/libxml2.so*
./lib/libparquet.so
./lib/libparquet.so.14
./lib/libparquet.so.14.1.0
@ -880,7 +865,8 @@
./lib/libdcf.so
./lib/libzstd.so
./lib/libzstd.so.1
./lib/libzstd.so.1.4.4
./lib/libzstd.so.1.5.0
./lib/libxgboost.so
./include/postgresql/server/postgres_ext.h
./include/postgresql/server/pg_config_os.h
@ -1009,8 +995,9 @@
./include/postgresql/server/storage/item/itemptr.h
./include/postgresql/server/storage/lock/s_lock.h
./include/postgresql/server/storage/backendid.h
./include/postgresql/server/storage/lock/lock.h
./include/postgresql/server/storage/lock/lwlock.h
./include/postgresql/server/storage/lock.h
./include/postgresql/server/storage/lwlock.h
./include/postgresql/server/storage/lwlocknames.h
./include/postgresql/server/storage/barrier.h
./include/postgresql/server/storage/shmem.h
./include/postgresql/server/pg_config.h
@ -1444,8 +1431,6 @@
./include/libpq-fe.h
./include/libpq-events.h
./include/libpq/libpq-fs.h
[version]
V500R002C00
[header]
./include/libpq-fe.h
./include/postgres_ext.h
@ -1457,9 +1442,6 @@ V500R002C00
./include/cm_config.h
./include/c.h
./include/port.h
./include/cm_msg.h
./include/cm_c.h
./include/cm_misc.h
./include/libpq-int.h
./include/pqcomm.h
./include/pqexpbuffer.h

View File

@ -199,17 +199,14 @@ ENDMACRO(CHECK_CC_ENABLE)
function(GET_VERSIONSTR_FROMGIT ret)
set(PG_VERSION "9.2.4")
set(OPENGAUSS_VERSION "2.1.0")
execute_process(
COMMAND ${CMAKE_SOURCE_DIR}/${openGauss}/cmake/src/buildfunction.sh --d ${PROJECT_TRUNK_DIR} OUTPUT_VARIABLE KERNEL_VERSION_STR)
execute_process(
COMMAND ${CMAKE_SOURCE_DIR}/${openGauss}/cmake/src/buildfunction.sh --s ${PROJECT_TRUNK_DIR} OUTPUT_VARIABLE GS_VERSION_STR)
set(PG_VERSION "${PG_VERSION}" PARENT_SCOPE)
set(${ret} "${GS_VERSION_STR}" PARENT_SCOPE)
set(OPENGAUSS_VERSION_NUM_STR, "${OPENGAUSS_VERSION}" PARENT_SCOPE)
if(NOT ${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS} STREQUAL OFF_OFF)
set(${ret} "${KERNEL_VERSION_STR}" PARENT_SCOPE)
set(PG_VERSION_STR "openGauss ${OPENGAUSS_VERSION} ${KERNEL_VERSION_STR}")
set(PG_VERSION_STR "openGauss ${OPENGAUSS_VERSION} ${GS_VERSION_STR}")
else()
set(${ret} "${GS_VERSION_STR}" PARENT_SCOPE)
set(PG_VERSION_STR "${GS_VERSION_STR}")
endif()
set(PG_VERSION_STR "${PG_VERSION_STR}" PARENT_SCOPE)

View File

@ -49,6 +49,7 @@ option(ENABLE_LCOV "enable lcov, the old is --enable-lcov" OFF)
# new add
option(ENABLE_MULTIPLE_NODES "enable distribute,the old is --enable-multiple-nodes" OFF)
option(ENABLE_PRIVATEGAUSS "enable privategauss,the old is --enable-pribategauss" OFF)
option(ENABLE_LITE_MODE "enable lite in single_node mode,the old is --enable-lite-mode" OFF)
option(ENABLE_DEBUG "enable privategauss,the old is --enable-pribategauss" OFF)
option(ENABLE_MOT "enable mot in single_node mode,the old is --enable-mot" OFF)
option(ENABLE_MYSQL_FDW "enable export or import data with mysql,the old is --enable-mysql-fdw" OFF)
@ -125,6 +126,12 @@ if(${BUILD_TUPLE} STREQUAL "aarch64")
endif()
endif()
if(${ENABLE_LITE_MODE} STREQUAL "ON")
set(ENABLE_LLVM_COMPILE OFF)
set(ENABLE_GSS OFF)
set(KRB5 OFF)
endif()
set(PROTECT_OPTIONS -fwrapv -std=c++14 -fnon-call-exceptions ${OPTIMIZE_LEVEL})
set(WARNING_OPTIONS -Wall -Wendif-labels -Werror -Wformat-security)
set(OPTIMIZE_OPTIONS -pipe -pthread -fno-aggressive-loop-optimizations -fno-expensive-optimizations -fno-omit-frame-pointer -fno-strict-aliasing -freg-struct-return)
@ -237,8 +244,8 @@ add_definitions(-Wno-builtin-macro-redefined)
SET_GCC_FLAGS(DB_COMMON_FLAGS "")
#hotpatch
set(HOTPATCH_PLATFORM_LIST suse11_sp1_x86_64 euleros2.0_sp8_aarch64 euleros2.0_sp9_aarch64 euleros2.0_sp2_x86_64 euleros2.0_sp5_x86_64 kylinv10_sp1_aarch64 kylinv10_sp1_x86_64_intel)
set(HOTPATCH_ARM_LIST euleros2.0_sp8_aarch64 euleros2.0_sp9_aarch64 kylinv10_sp1_aarch64)
set(HOTPATCH_PLATFORM_LIST suse11_sp1_x86_64 suse12_sp5_x86_64 euleros2.0_sp8_aarch64 euleros2.0_sp9_aarch64 euleros2.0_sp10_aarch64 euleros2.0_sp2_x86_64 euleros2.0_sp5_x86_64 euleros2.0_sp10_x86_64 kylinv10_sp1_aarch64 kylinv10_sp1_x86_64_intel)
set(HOTPATCH_ARM_LIST euleros2.0_sp8_aarch64 euleros2.0_sp9_aarch64 euleros2.0_sp10_aarch64 kylinv10_sp1_aarch64)
list(FIND HOTPATCH_PLATFORM_LIST "${PLAT_FORM_NAME}" RET_HOTPATCH)
list(FIND HOTPATCH_ARM_LIST "${PLAT_FORM_NAME}" RET_ARM_HOTPATCH)
if(NOT ${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS} STREQUAL OFF_OFF)
@ -246,11 +253,7 @@ if(NOT ${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS} STREQUAL OFF_OFF)
if("${GCC_VERSION}" STREQUAL "7.3.0")
set(SUPPORT_HOTPATCH "yes")
if(NOT ${RET_ARM_HOTPATCH} EQUAL -1)
if("$ENV{DEBUG_TYPE}" STREQUAL "debug")
set(HOTPATCH_ATOMIC_LDS -Wl,-T${LIBHOTPATCH_TOOL_PATH}/atomic_debug.lds)
else()
set(HOTPATCH_ATOMIC_LDS -Wl,-T${LIBHOTPATCH_TOOL_PATH}/atomic.lds)
endif()
set(HOTPATCH_ATOMIC_LDS -Wl,-T${LIBHOTPATCH_TOOL_PATH}/atomic.lds)
endif()
else()
set(SUPPORT_HOTPATCH "no")
@ -262,11 +265,17 @@ else()
set(SUPPORT_HOTPATCH "no")
endif()
# LLVM version
execute_process(COMMAND ${LLVM_CONFIG} --version OUTPUT_VARIABLE LLVM_VERSION_STR OUTPUT_STRIP_TRAILING_WHITESPACE)
string(REPLACE "." ";" LLVM_VERSION_LIST ${LLVM_VERSION_STR})
list(GET LLVM_VERSION_LIST 0 LLVM_MAJOR_VERSION)
list(GET LLVM_VERSION_LIST 1 LLVM_MINOR_VERSION)
if(${ENABLE_LITE_MODE} STREQUAL "ON")
set(SUPPORT_HOTPATCH "no")
endif()
if(${ENABLE_LLVM_COMPILE} STREQUAL "ON")
# LLVM version
execute_process(COMMAND ${LLVM_CONFIG} --version OUTPUT_VARIABLE LLVM_VERSION_STR OUTPUT_STRIP_TRAILING_WHITESPACE)
string(REPLACE "." ";" LLVM_VERSION_LIST ${LLVM_VERSION_STR})
list(GET LLVM_VERSION_LIST 0 LLVM_MAJOR_VERSION)
list(GET LLVM_VERSION_LIST 1 LLVM_MINOR_VERSION)
endif()
if(${NO_CHECK_CONFIG})
string(SUBSTRING "${BUILD_TUPLE}" 0 6 BUILD_HOST_PLATFORM)
@ -305,6 +314,9 @@ SET(EC_CONFIG_IN_FILE ecpg_config.h.in)
build_mppdb_config_paths_h(PG_CONFIG_PATH_H)
configure_file(${openGauss}/cmake/src/config-in/${CONFIG_IN_FILE} ${CMAKE_BINARY_DIR}/pg_config.h @ONLY)
configure_file(${openGauss}/cmake/src/config-in/${EC_CONFIG_IN_FILE} ${CMAKE_BINARY_DIR}/ecpg_config.h @ONLY)
#set host_cpu for pgxs.mk
set(HOST_CPU ${BUILD_TUPLE})
configure_file(${openGauss}/src/makefiles/pgxs.mk ${CMAKE_BINARY_DIR}/${openGauss}/src/makefiles/pgxs.mk @ONLY)
SET(PROJECT_INCLUDE_DIR ${PROJECT_INCLUDE_DIR} ${CMAKE_BINARY_DIR})
#
@ -312,3 +324,6 @@ if("${ENABLE_MULTIPLE_NODES}" STREQUAL "ON" AND "${ENABLE_MOT}" STREQUAL "ON")
message(FATAL_ERROR "error: --enable-mot option is not supported with --enable-multiple-nodes option")
endif()
if("${ENABLE_MULTIPLE_NODES}" STREQUAL "ON" AND "${ENABLE_LITE_MODE}" STREQUAL "ON")
message(FATAL_ERROR "error: --enable-lite-mode option is not supported with --enable-multiple-nodes option")
endif()

View File

@ -114,34 +114,17 @@ function get_gs_version()
commits=$(git log | grep "See merge request" | wc -l)
mrid=$(git log | grep "See merge request" | head -1 | awk -F! '{print $2}' | grep -o '[0-9]\+')
debug_str="$DEBUG_TYPE"
product=$(cat build/script/gaussdb.ver | grep 'PRODUCT' | awk -F "=" '{print $2}')
version=$(cat build/script/gaussdb.ver | grep 'VERSION' | awk -F "=" '{print $2}')
if test "$enable_ccache" = yes; then
default_gs_version="(openGauss 2.0.0 build 1f1f1f1f) compiled at 2100-00-00 00:00:00 commit 9999 last mr 9999 debug"
default_gs_version="(${product} ${version} build 1f1f1f1f) compiled at 2100-00-00 00:00:00 commit 9999 last mr 9999 debug"
else
date_time=$(date -d today +"%Y-%m-%d %H:%M:%S")
default_gs_version="(openGauss 2.1.0 build $csv_version) compiled at $date_time commit $commits last mr $mrid $debug_str"
default_gs_version="(${product} ${version} build ${csv_version}) compiled at $date_time commit $commits last mr $mrid $debug_str"
fi
printf "${default_gs_version}"
}
function get_kernel_version()
{
cd $1
csv_version=$(git log | grep commit | head -1 | awk '{print $2}' | cut -b 1-8)
commits=$(git log | grep "See merge request" | wc -l)
mrid=$(git log | grep "See merge request" | head -1 | awk -F! '{print $2}' | grep -o '[0-9]\+')
debug_str="$DEBUG_TYPE"
product=$(cat build/script/gauss.spec | grep 'PRODUCT' | awk -F "=" '{print $2}')
version=$(cat build/script/gauss.spec | grep 'VERSION' | awk -F "=" '{print $2}')
if test "$enable_ccache" = yes; then
default_kernel_version="(GaussDB Kernel V500R002C00 build 1f1f1f1f1f1f1f1f) compiled at 2100-00-00 00:00:00 commit 9999 last mr 9999 debug"
else
date_time=$(date -d today +"%Y-%m-%d %H:%M:%S")
default_kernel_version="($product $version build $csv_version) compiled at $date_time commit $commits last mr $mrid $debug_str"
fi
printf "${default_kernel_version}"
}
function get_time_for_roach()
{
tmp=$(date +'%d %b %Y %H:%M:%S')
@ -166,8 +149,6 @@ case "${DO_CMD}" in
create_conversionfile ;;
--create_snowballfile|snowball)
create_snowballfile ;;
--get_kernel_versionstr|--d)
get_kernel_version "$2";;
--get_gs_versionstr|--s)
get_gs_version "$2";;
--get_time_for_roach)

View File

@ -913,6 +913,10 @@
* * (--enable-privategauss) */
#cmakedefine ENABLE_PRIVATEGAUSS
/* Define to 1 if you want to generate gauss product as lite mode.
* * (--enable-lite-mode) */
#cmakedefine ENABLE_LITE_MODE
/* Define to 1 if you want to use mot
* --enable-mot */
#cmakedefine ENABLE_MOT
@ -931,3 +935,7 @@
/* Define to on if you want to collect USTORE statistics */
#cmakedefine DEBUG_UHEAP
/* Define to 1 if you want to build opengauss rpm package on openeuler os.
* (--with-openeuler-os) */
#cmakedefine WITH_OPENEULER_OS

View File

@ -3,6 +3,7 @@ set(3RD_PATH $ENV{THIRD_BIN_PATH})
set(VERSION_TYPE $ENV{DEBUG_TYPE})
option(ENABLE_LLT "enable llt, current value is --enable-llt" OFF)
option(ENABLE_UT "enable ut, current value is --enable-ut" OFF)
option(WITH_OPENEULER_OS "Build openGauss rpm package on openEuler os" OFF)
execute_process(COMMAND sh ${PROJECT_SRC_DIR}/get_PlatForm_str.sh OUTPUT_VARIABLE PLAT_FORM_STR OUTPUT_STRIP_TRAILING_WHITESPACE)
@ -76,6 +77,7 @@ set(PROTOBUF_HOME ${DEPENDENCY_PATH}/protobuf/${SUPPORT_LLT})
set(THRIFT_HOME ${DEPENDENCY_PATH}/thrift)
set(SNAPPY_HOME ${DEPENDENCY_PATH}/snappy/${LIB_UNIFIED_SUPPORT})
set(ZLIB_HOME ${DEPENDENCY_PATH}/zlib1.2.11/${SUPPORT_LLT})
set(XGBOOST_HOME ${DEPENDENCY_PATH}/xgboost/${SUPPORT_LLT})
set(ZSTD_HOME ${DEPENDENCY_PATH}/zstd)
set(LICENSE_HOME ${PLATFORM_PATH}/AdaptiveLM_C_V100R005C01SPC002/${SUPPORT_LLT})
set(HOTPATCH_HOME ${PLATFORM_PATH}/hotpatch)
@ -159,6 +161,12 @@ else()
endif()
endif()
if(${WITH_OPENEULER_OS} STREQUAL "ON")
set(SECURE_C_CHECK boundscheck)
else()
set(SECURE_C_CHECK securec)
endif()
#############################################################################
# kerberos component
#############################################################################
@ -275,6 +283,12 @@ set(SNAPPY_LIB_PATH ${SNAPPY_HOME}/lib)
set(ZLIB_INCLUDE_PATH ${ZLIB_HOME}/include)
set(ZLIB_LIB_PATH ${ZLIB_HOME}/lib)
#############################################################################
# xgboost component
#############################################################################
set(XGBOOST_INCLUDE_PATH ${XGBOOST_HOME}/include)
set(XGBOOST_LIB_PATH ${XGBOOST_HOME}/lib64)
#############################################################################
# zstd component
#############################################################################
@ -346,3 +360,15 @@ set(MOCKCPP_3RDPARTY_PATH ${MOCKCPP_HOME}/3rdparty)
set(MASSTREE_INCLUDE_PATH ${MASSTREE_HOME}/include)
set(MASSTREE_LIB_PATH ${MASSTREE_HOME}/lib)
############################################################################
# gtest component
############################################################################
set(GTEST_INCLUDE_PATH ${GTEST_HOME}/include)
set(GTEST_LIB_PATH ${GTEST_HOME}/lib)
############################################################################
# mockcpp component
############################################################################
set(MOCKCPP_INCLUDE_PATH ${MOCKCPP_HOME}/include)
set(MOCKCPP_LIB_PATH ${MOCKCPP_HOME}/lib)
set(MOCKCPP_3RDPARTY_PATH ${MOCKCPP_HOME}/3rdparty)

156
configure vendored
View File

@ -708,6 +708,7 @@ with_ossp_uuid
with_selinux
krb_srvtab
with_python
with_openeuler_os
enable_thread_safety
INCLUDES
TAS
@ -741,12 +742,15 @@ enable_llt
enable_llvm
llvm_major_version
llvm_minor_version
flex_major_version
flex_minor_version
enable_ut
enable_qunit
enable_jemalloc
enable_jemalloc_debug
enable_privategauss
enable_multiple_nodes
enable_lite_mode
enable_mot
enable_memory_check
enable_mysql_fdw
@ -754,7 +758,6 @@ enable_oracle_fdw
enable_thread_check
enable_shared
default_gs_version
default_kernel_version
default_port
WANTED_LANGUAGES
enable_nls
@ -824,12 +827,14 @@ enable_integer_datetimes
enable_nls
with_pgport
with_gs_version
with_openeuler_os
enable_shared
enable_rpath
enable_jemalloc
enable_jemalloc_debug
enable_privategauss
enable_multiple_nodes
enable_lite_mode
enable_mot
enable_memory_check
enable_mysql_fdw
@ -1311,6 +1316,11 @@ Try \`$0 --help' for more information." >&2
esac
done
# if compile with_openeuler_os. it should use gcc on os.
if test "${with_openeuler_os+set}" = set; then
gcc_version=$(gcc --version | sed q | awk -F')' '{print $2}' | awk '{print $1}')
fi
if test -n "$ac_prev"; then
ac_option=--`echo $ac_prev | sed 's/_/-/g'`
{ $as_echo "$as_me: error: missing argument to $ac_option" >&2
@ -1549,6 +1559,7 @@ Optional Features:
--disable-float4-byval disable float4 passed by value
--disable-float8-byval disable float8 passed by value
--enable-ccache build with ccache reducing compile time
--enable-lite-mode generate the gauss product as lite mode
Optional Packages:
--with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
@ -2655,8 +2666,8 @@ $as_echo "$as_me: error: argument required for --with-gs-version option" >&2;}
esac
else
product=$(cat build/script/gauss.spec | grep 'PRODUCT' | awk -F "=" '{print $2}')
version=$(cat build/script/gauss.spec | grep 'VERSION' | awk -F "=" '{print $2}')
product=$(cat build/script/gaussdb.ver | grep 'PRODUCT' | awk -F "=" '{print $2}')
version=$(cat build/script/gaussdb.ver | grep 'VERSION' | awk -F "=" '{print $2}')
gitversion=$(git log 2>/dev/null | grep commit | head -1 | awk '{print $2}' | cut -b 1-8)
commits=$(git log 2>/dev/null | grep "See in merge request" | wc -l)
debug_str=""
@ -2666,11 +2677,9 @@ else
fi
if test "$enable_ccache" = yes; then
default_gs_version="(openGauss 2.1.0 build 1f1f1f1f) compiled at 2100-00-00 00:00:00 commit 9999 last mr 9999 debug"
default_kernel_version="(GaussDB Kernel V500R002C00 build 1f1f1f1f) compiled at 2100-00-00 00:00:00 commit 9999 last mr 9999 debug"
default_gs_version="($product $version build 1f1f1f1f) compiled at 2100-00-00 00:00:00 commit 9999 last mr 9999 debug"
else
default_gs_version="($product $version build $gitversion) compiled at `date -d today +\"%Y-%m-%d %H:%M:%S\"` commit $commits last mr $mrid $debug_str"
default_kernel_version="($product $version build $gitversion) compiled at `date -d today +\"%Y-%m-%d %H:%M:%S\"` commit $commits last mr $mrid $debug_str"
fi
fi
@ -2679,7 +2688,7 @@ fi
$as_echo "$default_gs_version" >&6; }
cat >>confdefs.h <<_ACEOF
#define DEF_GS_VERSION "${default_kernel_version}"
#define DEF_GS_VERSION "${default_gs_version}"
_ACEOF
@ -2876,14 +2885,6 @@ if test "${enable_mot+set}" = set; then
$as_echo "$as_me: error: --enable-mot option is not supported with --enable-multiple-nodes option" >&2;}
{ (exit 1); exit 1; }; }
fi
if test "$enable_llvm" = no; then
{ { $as_echo "$as_me:$LINENO: error: --enable-mot option is not supported with --disable-llvm option" >&5
$as_echo "$as_me: error: --enable-mot option is not supported with --disable-llvm option" >&2;}
{ (exit 1); exit 1; }; }
fi
;;
no)
@ -3185,6 +3186,91 @@ else
fi
#
# --enable-lite-mode enables
#
# Check whether --enable-lite-mode was given.
if test "${enable_lite_mode+set}" = set; then
enableval=$enable_lite_mode;
case $enableval in
yes)
if test "$enable_multiple_nodes" = yes; then
{ { $as_echo "$as_me:$LINENO: error: --enable-lite-mode option is not supported with --enable-multiple-nodes option" >&5
$as_echo "$as_me: error: --enable-lite-mode option is not supported with --enable-multiple-nodes option" >&2;}
{ (exit 1); exit 1; }; }
fi
;;
no)
:
;;
*)
{ { $as_echo "$as_me:$LINENO: error: no argument expected for --enable-lite-mode option" >&5
$as_echo "$as_me: error: no argument expected for --enable-lite-mode option" >&2;}
{ (exit 1); exit 1; }; }
;;
esac
else
enable_lite_mode=no
fi
if test "$enable_multiple_nodes" = yes; then
enable_lite_mode=no
fi
if test "$enable_lite_mode" = yes; then
cat >>confdefs.h <<\_ACEOF
#define ENABLE_LITE_MODE 1
_ACEOF
fi
#
# --with-openeuler-os enable
#
# Check whether --with-openeuler-os was given.
if test "${with_openeuler_os+set}" = set; then
enableval=$with_openeuler_os;
case $enableval in
yes)
if test "$enable_multiple_nodes" = yes; then
{ { $as_echo "$as_me:$LINENO: error: --with-openeuler-os option is not supported with --enable-multiple-nodes option" >&5
$as_echo "$as_me: error: --with-openeuler-os option is not supported with --enable-multiple-nodes option" >&2;}
{ (exit 1); exit 1; }; }
fi
;;
no)
:
;;
*)
{ { $as_echo "$as_me:$LINENO: error: no argument expected for --with-openeuler-os option" >&5
$as_echo "$as_me: error: no argument expected for --with-openeuler-os option" >&2;}
{ (exit 1); exit 1; }; }
;;
esac
else
with_openeuler_os=no
fi
if test "$enable_multiple_nodes" = yes; then
with_openeuler_os=no
fi
if test "$with_openeuler_os" = yes; then
cat >>confdefs.h <<\_ACEOF
#define WITH_OPENEULER_OS 1
_ACEOF
fi
#
@ -5771,10 +5857,14 @@ if test "${enable_llvm+set}" = set; then
case $enableval in
yes)
if test "$enable_lite_mode" = yes; then
{ $as_echo "$as_me:$LINENO: enable_lite_mode is open, llvm will close" >&5
$as_echo "$as_me: enable_lite_mode is open, llvm will close" >&2;}
else
cat >>confdefs.h <<\_ACEOF
#define ENABLE_LLVM_COMPILE 1
_ACEOF
fi
;;
no)
:
@ -5786,16 +5876,20 @@ $as_echo "$as_me: error: no argument expected for --enable-llvm option" >&2;}
;;
esac
else
if test "$enable_lite_mode" = yes; then
{ $as_echo "$as_me:$LINENO: enable_lite_mode is open, llvm will close" >&5
$as_echo "$as_me: enable_lite_mode is open, llvm will close" >&2;}
else
cat >>confdefs.h <<\_ACEOF
#define ENABLE_LLVM_COMPILE 1
_ACEOF
enable_llvm=yes
fi
fi
llvm_version_str='10.0.0'
if [ ! -z "${with_3rdpartydir}" ]; then
if [[ ! -z "${with_3rdpartydir}" ]] && [[ "$enable_lite_mode" != yes ]]; then
platstr=$(sh src/get_PlatForm_str.sh)
llvm_version_str=`${with_3rdpartydir}/dependency/${platstr}/llvm/comm/bin/llvm-config --version`
fi
@ -6527,7 +6621,7 @@ fi
# JDK
#
with_jdk=''
if [ ! -z "${with_3rdpartydir}" ]; then
if [[ ! -z "${with_3rdpartydir}" ]] && [[ "$with_openeuler_os" != yes ]]; then
platstr=$(sh src/get_PlatForm_str.sh)
cpuarch=$(uname -m)
for d in "openjdk8" "huaweijdk8"; do
@ -7822,6 +7916,14 @@ else
pgac_flex_version=`$FLEX --version 2>/dev/null`
{ $as_echo "$as_me:$LINENO: using $pgac_flex_version" >&5
$as_echo "$as_me: using $pgac_flex_version" >&6;}
flex_major_version=$(echo $pgac_flex_version | awk '{print $2}' | awk -F "." '{print $1}')
flex_minor_version=$(echo $pgac_flex_version | awk '{print $2}' | awk -F "." '{print $2}')
cat >>confdefs.h <<_ACEOF
#define FLEX_MAJOR_VERSION $flex_major_version
#define FLEX_MINOR_VERSION $flex_minor_version
_ACEOF
fi
@ -9258,7 +9360,7 @@ $as_echo "$as_me: WARNING:
*** Not using spinlocks will cause poor performance." >&2;}
fi
if test "$with_gssapi" = no ; then
if test "$with_gssapi_" = no ; then
if test "$PORTNAME" != "win32"; then
{ $as_echo "$as_me:$LINENO: checking for library containing gss_init_sec_context" >&5
$as_echo_n "checking for library containing gss_init_sec_context... " >&6; }
@ -12552,7 +12654,7 @@ fi
fi
if test "$with_gssapi" = no ; then
if test "$with_gssapi_" = no ; then
for ac_header in gssapi/gssapi.h
do
@ -29687,7 +29789,7 @@ fi
if test "$enable_multiple_nodes" = yes; then
cat >>confdefs.h <<_ACEOF
#define PG_VERSION_STR "openGauss $OPENGAUSS_VERSION ${default_kernel_version} on $host, compiled by $cc_string, `expr $ac_cv_sizeof_void_p \* 8`-bit"
#define PG_VERSION_STR "openGauss $OPENGAUSS_VERSION ${default_gs_version} on $host, compiled by $cc_string, `expr $ac_cv_sizeof_void_p \* 8`-bit"
_ACEOF
else
cat >>confdefs.h <<_ACEOF
@ -31156,8 +31258,10 @@ find src/gausskernel/ -name "*.y" | sort >> ./ereport.txt
find src/common/backend -name "*.cpp" | sort >> ./ereport.txt
find src/gausskernel/ -name "*.cpp" | sort >> ./ereport.txt
if [[ "$enable_multiple_nodes" != no ]] || [[ "$enable_privategauss" != no ]]; then
find ../distribute/cm -name "*.l" | sort > ./cm_ereport.txt
find ../distribute/cm -name "*.y" | sort >> ./cm_ereport.txt
find ../distribute/cm -name "*.cpp" | sort >> ./cm_ereport.txt
if [[ "$enable_lite_mode" != yes ]]; then
if [[ "$enable_multiple_nodes" != no ]] || [[ "$enable_privategauss" != no ]]; then
find ../distribute/cm -name "*.l" | sort > ./cm_ereport.txt
find ../distribute/cm -name "*.y" | sort >> ./cm_ereport.txt
find ../distribute/cm -name "*.cpp" | sort >> ./cm_ereport.txt
fi
fi

View File

@ -12,6 +12,7 @@ set(CMAKE_MODULE_PATH
${CMAKE_CURRENT_SOURCE_DIR}/hstore
${CMAKE_CURRENT_SOURCE_DIR}/test_decoding
${CMAKE_CURRENT_SOURCE_DIR}/mppdb_decoding
${CMAKE_CURRENT_SOURCE_DIR}/sql_decoding
${CMAKE_CURRENT_SOURCE_DIR}/spi
${CMAKE_CURRENT_SOURCE_DIR}/pg_upgrade_support
${CMAKE_CURRENT_SOURCE_DIR}/postgres_fdw
@ -28,6 +29,7 @@ set(CMAKE_MODULE_PATH
add_subdirectory(hstore)
add_subdirectory(test_decoding)
add_subdirectory(mppdb_decoding)
add_subdirectory(sql_decoding)
add_subdirectory(spi)
if("${ENABLE_MULTIPLE_NODES}" STREQUAL "ON" OR "${ENABLE_PRIVATEGAUSS}" STREQUAL "ON")
add_subdirectory(pg_upgrade_support)

View File

@ -3,7 +3,7 @@
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION file_fdw" to load this file. \quit
CREATE FUNCTION file_fdw_handler()
CREATE FUNCTION pg_catalog.file_fdw_handler()
RETURNS fdw_handler
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT NOT FENCED;

View File

@ -158,6 +158,14 @@ Datum file_fdw_handler(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(fdwroutine);
}
void check_file_fdw_permission()
{
if ((!initialuser()) && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) {
ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("Dist fdw are only available for the supper user and Operatoradmin")));
}
}
/*
* Validate the generic options given to a FOREIGN DATA WRAPPER, SERVER,
* USER MAPPING or FOREIGN TABLE that uses file_fdw.
@ -173,6 +181,7 @@ Datum file_fdw_validator(PG_FUNCTION_ARGS)
List* other_options = NIL;
ListCell* cell = NULL;
check_file_fdw_permission();
if (catalog == UserMappingRelationId) {
ereport(
ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("file_fdw doesn't support in USER MAPPING.")));

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -3,12 +3,12 @@
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION gc_fdw" to load this file. \quit
CREATE FUNCTION gc_fdw_handler()
CREATE FUNCTION pg_catalog.gc_fdw_handler()
RETURNS fdw_handler
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT NOT FENCED;
CREATE FUNCTION gc_fdw_validator(text[], oid)
CREATE FUNCTION pg_catalog.gc_fdw_validator(text[], oid)
RETURNS void
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT NOT FENCED;

View File

@ -3,12 +3,12 @@
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION hdfs_fdw" to load this file. \quit
CREATE FUNCTION hdfs_fdw_handler()
CREATE FUNCTION pg_catalog.hdfs_fdw_handler()
RETURNS fdw_handler
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT NOT FENCED;
CREATE FUNCTION hdfs_fdw_validator(text[], oid)
CREATE FUNCTION pg_catalog.hdfs_fdw_validator(text[], oid)
RETURNS void
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT NOT FENCED;

View File

@ -363,8 +363,12 @@ void serverOptionValidator(List* ServerOptionList)
*/
switch (serverType) {
case T_OBS_SERVER: {
#ifndef ENABLE_LITE_MODE
checkOptionNameValidity(ServerOptionList, OBS_SERVER_OPTION);
break;
#else
FEATURE_ON_LITE_MODE_NOT_SUPPORTED();
#endif
}
case T_HDFS_SERVER: {
FEATURE_NOT_PUBLIC_ERROR("HDFS is not yet supported.");
@ -405,6 +409,7 @@ void serverOptionValidator(List* ServerOptionList)
ServerOptionCheckSet(ServerOptionList, serverType, addressFound, cfgPathFound, akFound, sakFound,
encrypt, userNameFound, passWordFound, regionFound, hostName, ak, sk, regionStr);
#ifndef ENABLE_LITE_MODE
if (T_OBS_SERVER == serverType) {
if (addressFound && regionFound) {
ereport(ERROR,
@ -442,6 +447,8 @@ void serverOptionValidator(List* ServerOptionList)
checkOBSServerValidity(URL, ak, sk, encrypt);
}
}
#endif
if (T_HDFS_SERVER == serverType && !cfgPathFound) {
ereport(ERROR,
(errcode(ERRCODE_FDW_DYNAMIC_PARAMETER_VALUE_NEEDED),
@ -1434,6 +1441,7 @@ static void HdfsEndForeignScan(ForeignScanState* scanState)
}
}
#ifdef ENABLE_LLVM_COMPILE
/*
* LLVM optimization information should be shown. We check the query
* uses LLVM optimization or not.
@ -1502,6 +1510,7 @@ static void HdfsEndForeignScan(ForeignScanState* scanState)
}
}
}
#endif
/* clears all file related memory */
if (NULL != executionState->fileReader) {

View File

@ -716,7 +716,11 @@ List* CNSchedulingForAnalyze(unsigned int* totalFilesNum, unsigned int* numOfDns
if (isglbstats) {
if (IS_OBS_CSV_TXT_FOREIGN_TABLE(foreignTableId)) {
/* for dist obs foreign table.*/
#ifndef ENABLE_LITE_MODE
allTask = CNSchedulingForDistOBSFt(foreignTableId);
#else
FEATURE_ON_LITE_MODE_NOT_SUPPORTED();
#endif
} else {
if (rel_loc_info == NULL) {
ereport(ERROR,

View File

@ -5,22 +5,22 @@
CREATE TYPE hstore;
CREATE FUNCTION hstore_in(cstring)
CREATE FUNCTION pg_catalog.hstore_in(cstring)
RETURNS hstore
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION hstore_out(hstore)
CREATE FUNCTION pg_catalog.hstore_out(hstore)
RETURNS cstring
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION hstore_recv(internal)
CREATE FUNCTION pg_catalog.hstore_recv(internal)
RETURNS hstore
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION hstore_send(hstore)
CREATE FUNCTION pg_catalog.hstore_send(hstore)
RETURNS bytea
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
@ -34,12 +34,12 @@ CREATE TYPE hstore (
STORAGE = extended
);
CREATE FUNCTION hstore_version_diag(hstore)
CREATE FUNCTION pg_catalog.hstore_version_diag(hstore)
RETURNS integer
AS 'MODULE_PATHNAME','hstore_version_diag'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION fetchval(hstore,text)
CREATE FUNCTION pg_catalog.fetchval(hstore,text)
RETURNS text
AS 'MODULE_PATHNAME','hstore_fetchval'
LANGUAGE C STRICT IMMUTABLE;
@ -50,7 +50,7 @@ CREATE OPERATOR -> (
PROCEDURE = fetchval
);
CREATE FUNCTION slice_array(hstore,text[])
CREATE FUNCTION pg_catalog.slice_array(hstore,text[])
RETURNS text[]
AS 'MODULE_PATHNAME','hstore_slice_to_array'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
@ -61,17 +61,17 @@ CREATE OPERATOR -> (
PROCEDURE = slice_array
);
CREATE FUNCTION slice(hstore,text[])
CREATE FUNCTION pg_catalog.slice(hstore,text[])
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_slice_to_hstore'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION isexists(hstore,text)
CREATE FUNCTION pg_catalog.isexists(hstore,text)
RETURNS bool
AS 'MODULE_PATHNAME','hstore_exists'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION exist(hstore,text)
CREATE FUNCTION pg_catalog.exist(hstore,text)
RETURNS bool
AS 'MODULE_PATHNAME','hstore_exists'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
@ -84,7 +84,7 @@ CREATE OPERATOR ? (
JOIN = contjoinsel
);
CREATE FUNCTION exists_any(hstore,text[])
CREATE FUNCTION pg_catalog.exists_any(hstore,text[])
RETURNS bool
AS 'MODULE_PATHNAME','hstore_exists_any'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
@ -97,7 +97,7 @@ CREATE OPERATOR ?| (
JOIN = contjoinsel
);
CREATE FUNCTION exists_all(hstore,text[])
CREATE FUNCTION pg_catalog.exists_all(hstore,text[])
RETURNS bool
AS 'MODULE_PATHNAME','hstore_exists_all'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
@ -110,27 +110,27 @@ CREATE OPERATOR ?& (
JOIN = contjoinsel
);
CREATE FUNCTION isdefined(hstore,text)
CREATE FUNCTION pg_catalog.isdefined(hstore,text)
RETURNS bool
AS 'MODULE_PATHNAME','hstore_defined'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION defined(hstore,text)
CREATE FUNCTION pg_catalog.defined(hstore,text)
RETURNS bool
AS 'MODULE_PATHNAME','hstore_defined'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION delete(hstore,text)
CREATE FUNCTION pg_catalog.delete(hstore,text)
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_delete'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION delete(hstore,text[])
CREATE FUNCTION pg_catalog.delete(hstore,text[])
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_delete_array'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION delete(hstore,hstore)
CREATE FUNCTION pg_catalog.delete(hstore,hstore)
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_delete_hstore'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
@ -153,7 +153,7 @@ CREATE OPERATOR - (
PROCEDURE = delete
);
CREATE FUNCTION hs_concat(hstore,hstore)
CREATE FUNCTION pg_catalog.hs_concat(hstore,hstore)
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_concat'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
@ -164,12 +164,12 @@ CREATE OPERATOR || (
PROCEDURE = hs_concat
);
CREATE FUNCTION hs_contains(hstore,hstore)
CREATE FUNCTION pg_catalog.hs_contains(hstore,hstore)
RETURNS bool
AS 'MODULE_PATHNAME','hstore_contains'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION hs_contained(hstore,hstore)
CREATE FUNCTION pg_catalog.hs_contained(hstore,hstore)
RETURNS bool
AS 'MODULE_PATHNAME','hstore_contained'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
@ -211,12 +211,12 @@ CREATE OPERATOR ~ (
JOIN = contjoinsel
);
CREATE FUNCTION tconvert(text,text)
CREATE FUNCTION pg_catalog.tconvert(text,text)
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_from_text'
LANGUAGE C IMMUTABLE NOT FENCED; -- not STRICT; needs to allow (key,NULL)
CREATE FUNCTION hstore(text,text)
CREATE FUNCTION pg_catalog.hstore(text,text)
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_from_text'
LANGUAGE C IMMUTABLE; -- not STRICT; needs to allow (key,NULL)
@ -227,25 +227,25 @@ CREATE OPERATOR => (
PROCEDURE = hstore
);
CREATE FUNCTION hstore(text[],text[])
CREATE FUNCTION pg_catalog.hstore(text[],text[])
RETURNS hstore
AS 'MODULE_PATHNAME', 'hstore_from_arrays'
LANGUAGE C IMMUTABLE NOT FENCED; -- not STRICT; allows (keys,null)
CREATE FUNCTION hstore(text[])
CREATE FUNCTION pg_catalog.hstore(text[])
RETURNS hstore
AS 'MODULE_PATHNAME', 'hstore_from_array'
LANGUAGE C IMMUTABLE STRICT NOT FENCED;
CREATE CAST (text[] AS hstore)
WITH FUNCTION hstore(text[]);
WITH FUNCTION pg_catalog.hstore(text[]);
CREATE FUNCTION hstore(record)
CREATE FUNCTION pg_catalog.hstore(record)
RETURNS hstore
AS 'MODULE_PATHNAME', 'hstore_from_record'
LANGUAGE C IMMUTABLE NOT FENCED; -- not STRICT; allows (null::recordtype)
CREATE FUNCTION hstore_to_array(hstore)
CREATE FUNCTION pg_catalog.hstore_to_array(hstore)
RETURNS text[]
AS 'MODULE_PATHNAME','hstore_to_array'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
@ -255,7 +255,7 @@ CREATE OPERATOR %% (
PROCEDURE = hstore_to_array
);
CREATE FUNCTION hstore_to_matrix(hstore)
CREATE FUNCTION pg_catalog.hstore_to_matrix(hstore)
RETURNS text[]
AS 'MODULE_PATHNAME','hstore_to_matrix'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
@ -265,27 +265,27 @@ CREATE OPERATOR %# (
PROCEDURE = hstore_to_matrix
);
CREATE FUNCTION akeys(hstore)
CREATE FUNCTION pg_catalog.akeys(hstore)
RETURNS text[]
AS 'MODULE_PATHNAME','hstore_akeys'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION avals(hstore)
CREATE FUNCTION pg_catalog.avals(hstore)
RETURNS text[]
AS 'MODULE_PATHNAME','hstore_avals'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION skeys(hstore)
CREATE FUNCTION pg_catalog.skeys(hstore)
RETURNS setof text
AS 'MODULE_PATHNAME','hstore_skeys'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION svals(hstore)
CREATE FUNCTION pg_catalog.svals(hstore)
RETURNS setof text
AS 'MODULE_PATHNAME','hstore_svals'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION each(IN hs hstore,
CREATE FUNCTION pg_catalog.each(IN hs hstore,
OUT key text,
OUT value text)
RETURNS SETOF record
@ -521,7 +521,7 @@ AS
OPERATOR 9 ?(hstore,text),
OPERATOR 10 ?|(hstore,text[]),
OPERATOR 11 ?&(hstore,text[]),
FUNCTION 1 bttextcmp(text,text),
FUNCTION 1 pg_catalog.bttextcmp(text,text),
FUNCTION 2 gin_extract_hstore(internal, internal),
FUNCTION 3 gin_extract_hstore_query(internal, internal, int2, internal, internal),
FUNCTION 4 gin_consistent_hstore(internal, int2, internal, int4, internal, internal),

View File

@ -5,22 +5,22 @@
CREATE TYPE hstore;
CREATE FUNCTION hstore_in(cstring)
CREATE FUNCTION pg_catalog.hstore_in(cstring)
RETURNS hstore
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION hstore_out(hstore)
CREATE FUNCTION pg_catalog.hstore_out(hstore)
RETURNS cstring
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;
CREATE FUNCTION hstore_recv(internal)
CREATE FUNCTION pg_catalog.hstore_recv(internal)
RETURNS hstore
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION hstore_send(hstore)
CREATE FUNCTION pg_catalog.hstore_send(hstore)
RETURNS bytea
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
@ -34,145 +34,145 @@ CREATE TYPE hstore (
STORAGE = extended
);
CREATE FUNCTION hstore_version_diag(hstore)
CREATE FUNCTION pg_catalog.hstore_version_diag(hstore)
RETURNS integer
AS 'MODULE_PATHNAME','hstore_version_diag'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION fetchval(hstore,text)
CREATE FUNCTION pg_catalog.fetchval(hstore,text)
RETURNS text
AS 'MODULE_PATHNAME','hstore_fetchval'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION slice_array(hstore,text[])
CREATE FUNCTION pg_catalog.slice_array(hstore,text[])
RETURNS text[]
AS 'MODULE_PATHNAME','hstore_slice_to_array'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION slice(hstore,text[])
CREATE FUNCTION pg_catalog.slice(hstore,text[])
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_slice_to_hstore'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION isexists(hstore,text)
CREATE FUNCTION pg_catalog.isexists(hstore,text)
RETURNS bool
AS 'MODULE_PATHNAME','hstore_exists'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION exist(hstore,text)
CREATE FUNCTION pg_catalog.exist(hstore,text)
RETURNS bool
AS 'MODULE_PATHNAME','hstore_exists'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION exists_any(hstore,text[])
CREATE FUNCTION pg_catalog.exists_any(hstore,text[])
RETURNS bool
AS 'MODULE_PATHNAME','hstore_exists_any'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION exists_all(hstore,text[])
CREATE FUNCTION pg_catalog.exists_all(hstore,text[])
RETURNS bool
AS 'MODULE_PATHNAME','hstore_exists_all'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION isdefined(hstore,text)
CREATE FUNCTION pg_catalog.isdefined(hstore,text)
RETURNS bool
AS 'MODULE_PATHNAME','hstore_defined'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION defined(hstore,text)
CREATE FUNCTION pg_catalog.defined(hstore,text)
RETURNS bool
AS 'MODULE_PATHNAME','hstore_defined'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION delete(hstore,text)
CREATE FUNCTION pg_catalog.delete(hstore,text)
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_delete'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION delete(hstore,text[])
CREATE FUNCTION pg_catalog.delete(hstore,text[])
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_delete_array'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION delete(hstore,hstore)
CREATE FUNCTION pg_catalog.delete(hstore,hstore)
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_delete_hstore'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION hs_concat(hstore,hstore)
CREATE FUNCTION pg_catalog.hs_concat(hstore,hstore)
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_concat'
LANGUAGE C IMMUTABLE NOT FENCED;;
CREATE FUNCTION hs_contains(hstore,hstore)
CREATE FUNCTION pg_catalog.hs_contains(hstore,hstore)
RETURNS bool
AS 'MODULE_PATHNAME','hstore_contains'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION hs_contained(hstore,hstore)
CREATE FUNCTION pg_catalog.hs_contained(hstore,hstore)
RETURNS bool
AS 'MODULE_PATHNAME','hstore_contained'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION tconvert(text,text)
CREATE FUNCTION pg_catalog.tconvert(text,text)
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_from_text'
LANGUAGE C IMMUTABLE NOT FENCED;; -- not STRICT; needs to allow (key,NULL)
CREATE FUNCTION hstore(text,text)
CREATE FUNCTION pg_catalog.hstore(text,text)
RETURNS hstore
AS 'MODULE_PATHNAME','hstore_from_text'
LANGUAGE C IMMUTABLE NOT FENCED;; -- not STRICT; needs to allow (key,NULL)
CREATE FUNCTION hstore(text[],text[])
CREATE FUNCTION pg_catalog.hstore(text[],text[])
RETURNS hstore
AS 'MODULE_PATHNAME', 'hstore_from_arrays'
LANGUAGE C IMMUTABLE NOT FENCED;; -- not STRICT; allows (keys,null)
CREATE FUNCTION hstore(text[])
CREATE FUNCTION pg_catalog.hstore(text[])
RETURNS hstore
AS 'MODULE_PATHNAME', 'hstore_from_array'
LANGUAGE C IMMUTABLE STRICT NOT FENCED;;
CREATE CAST (text[] AS hstore)
WITH FUNCTION hstore(text[]);
WITH FUNCTION pg_catalog.hstore(text[]);
CREATE FUNCTION hstore(record)
CREATE FUNCTION pg_catalog.hstore(record)
RETURNS hstore
AS 'MODULE_PATHNAME', 'hstore_from_record'
LANGUAGE C IMMUTABLE NOT FENCED;; -- not STRICT; allows (null::recordtype)
CREATE FUNCTION hstore_to_array(hstore)
CREATE FUNCTION pg_catalog.hstore_to_array(hstore)
RETURNS text[]
AS 'MODULE_PATHNAME','hstore_to_array'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION hstore_to_matrix(hstore)
CREATE FUNCTION pg_catalog.hstore_to_matrix(hstore)
RETURNS text[]
AS 'MODULE_PATHNAME','hstore_to_matrix'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION akeys(hstore)
CREATE FUNCTION pg_catalog.akeys(hstore)
RETURNS text[]
AS 'MODULE_PATHNAME','hstore_akeys'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION avals(hstore)
CREATE FUNCTION pg_catalog.avals(hstore)
RETURNS text[]
AS 'MODULE_PATHNAME','hstore_avals'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION skeys(hstore)
CREATE FUNCTION pg_catalog.skeys(hstore)
RETURNS setof text
AS 'MODULE_PATHNAME','hstore_skeys'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION svals(hstore)
CREATE FUNCTION pg_catalog.svals(hstore)
RETURNS setof text
AS 'MODULE_PATHNAME','hstore_svals'
LANGUAGE C STRICT IMMUTABLE NOT FENCED;;
CREATE FUNCTION each(IN hs hstore,
CREATE FUNCTION pg_catalog.each(IN hs hstore,
OUT key text,
OUT value text)
RETURNS SETOF record

View File

@ -4,55 +4,55 @@
\echo Use "CREATE EXTENSION hstore" to load this file. \quit
ALTER EXTENSION hstore ADD type hstore;
ALTER EXTENSION hstore ADD function hstore_in(cstring);
ALTER EXTENSION hstore ADD function hstore_out(hstore);
ALTER EXTENSION hstore ADD function hstore_recv(internal);
ALTER EXTENSION hstore ADD function hstore_send(hstore);
ALTER EXTENSION hstore ADD function hstore_version_diag(hstore);
ALTER EXTENSION hstore ADD function fetchval(hstore,text);
ALTER EXTENSION hstore ADD function pg_catalog.hstore_in(cstring);
ALTER EXTENSION hstore ADD function pg_catalog.hstore_out(hstore);
ALTER EXTENSION hstore ADD function pg_catalog.hstore_recv(internal);
ALTER EXTENSION hstore ADD function pg_catalog.hstore_send(hstore);
ALTER EXTENSION hstore ADD function pg_catalog.hstore_version_diag(hstore);
ALTER EXTENSION hstore ADD function pg_catalog.fetchval(hstore,text);
ALTER EXTENSION hstore ADD operator ->(hstore,text);
ALTER EXTENSION hstore ADD function slice_array(hstore,text[]);
ALTER EXTENSION hstore ADD function pg_catalog.slice_array(hstore,text[]);
ALTER EXTENSION hstore ADD operator ->(hstore,text[]);
ALTER EXTENSION hstore ADD function slice(hstore,text[]);
ALTER EXTENSION hstore ADD function isexists(hstore,text);
ALTER EXTENSION hstore ADD function exist(hstore,text);
ALTER EXTENSION hstore ADD function pg_catalog.slice(hstore,text[]);
ALTER EXTENSION hstore ADD function pg_catalog.isexists(hstore,text);
ALTER EXTENSION hstore ADD function pg_catalog.exist(hstore,text);
ALTER EXTENSION hstore ADD operator ?(hstore,text);
ALTER EXTENSION hstore ADD function exists_any(hstore,text[]);
ALTER EXTENSION hstore ADD function pg_catalog.exists_any(hstore,text[]);
ALTER EXTENSION hstore ADD operator ?|(hstore,text[]);
ALTER EXTENSION hstore ADD function exists_all(hstore,text[]);
ALTER EXTENSION hstore ADD function pg_catalog.exists_all(hstore,text[]);
ALTER EXTENSION hstore ADD operator ?&(hstore,text[]);
ALTER EXTENSION hstore ADD function isdefined(hstore,text);
ALTER EXTENSION hstore ADD function defined(hstore,text);
ALTER EXTENSION hstore ADD function delete(hstore,text);
ALTER EXTENSION hstore ADD function delete(hstore,text[]);
ALTER EXTENSION hstore ADD function delete(hstore,hstore);
ALTER EXTENSION hstore ADD function pg_catalog.isdefined(hstore,text);
ALTER EXTENSION hstore ADD function pg_catalog.defined(hstore,text);
ALTER EXTENSION hstore ADD function pg_catalog.delete(hstore,text);
ALTER EXTENSION hstore ADD function pg_catalog.delete(hstore,text[]);
ALTER EXTENSION hstore ADD function pg_catalog.delete(hstore,hstore);
ALTER EXTENSION hstore ADD operator -(hstore,text);
ALTER EXTENSION hstore ADD operator -(hstore,text[]);
ALTER EXTENSION hstore ADD operator -(hstore,hstore);
ALTER EXTENSION hstore ADD function hs_concat(hstore,hstore);
ALTER EXTENSION hstore ADD function pg_catalog.hs_concat(hstore,hstore);
ALTER EXTENSION hstore ADD operator ||(hstore,hstore);
ALTER EXTENSION hstore ADD function hs_contains(hstore,hstore);
ALTER EXTENSION hstore ADD function hs_contained(hstore,hstore);
ALTER EXTENSION hstore ADD function pg_catalog.hs_contains(hstore,hstore);
ALTER EXTENSION hstore ADD function pg_catalog.hs_contained(hstore,hstore);
ALTER EXTENSION hstore ADD operator <@(hstore,hstore);
ALTER EXTENSION hstore ADD operator @>(hstore,hstore);
ALTER EXTENSION hstore ADD operator ~(hstore,hstore);
ALTER EXTENSION hstore ADD operator @(hstore,hstore);
ALTER EXTENSION hstore ADD function tconvert(text,text);
ALTER EXTENSION hstore ADD function hstore(text,text);
ALTER EXTENSION hstore ADD function pg_catalog.tconvert(text,text);
ALTER EXTENSION hstore ADD function pg_catalog.hstore(text,text);
ALTER EXTENSION hstore ADD operator =>(text,text);
ALTER EXTENSION hstore ADD function hstore(text[],text[]);
ALTER EXTENSION hstore ADD function hstore(text[]);
ALTER EXTENSION hstore ADD function pg_catalog.hstore(text[],text[]);
ALTER EXTENSION hstore ADD function pg_catalog.hstore(text[]);
ALTER EXTENSION hstore ADD cast (text[] as hstore);
ALTER EXTENSION hstore ADD function hstore(record);
ALTER EXTENSION hstore ADD function hstore_to_array(hstore);
ALTER EXTENSION hstore ADD function pg_catalog.hstore(record);
ALTER EXTENSION hstore ADD function pg_catalog.hstore_to_array(hstore);
ALTER EXTENSION hstore ADD operator %%(NONE,hstore);
ALTER EXTENSION hstore ADD function hstore_to_matrix(hstore);
ALTER EXTENSION hstore ADD function pg_catalog.hstore_to_matrix(hstore);
ALTER EXTENSION hstore ADD operator %#(NONE,hstore);
ALTER EXTENSION hstore ADD function akeys(hstore);
ALTER EXTENSION hstore ADD function avals(hstore);
ALTER EXTENSION hstore ADD function skeys(hstore);
ALTER EXTENSION hstore ADD function svals(hstore);
ALTER EXTENSION hstore ADD function each(hstore);
ALTER EXTENSION hstore ADD function pg_catalog.akeys(hstore);
ALTER EXTENSION hstore ADD function pg_catalog.avals(hstore);
ALTER EXTENSION hstore ADD function pg_catalog.skeys(hstore);
ALTER EXTENSION hstore ADD function pg_catalog.svals(hstore);
ALTER EXTENSION hstore ADD function pg_catalog.each(hstore);
ALTER EXTENSION hstore ADD function populate_record(anyelement,hstore);
ALTER EXTENSION hstore ADD operator #=(anyelement,hstore);
ALTER EXTENSION hstore ADD function hstore_eq(hstore,hstore);

View File

@ -50,99 +50,99 @@ select ' '::hstore;
-- -> operator
select fetchval('aa=>b, c=>d , b=>16'::hstore, 'c');
select fetchval('aa=>b, c=>d , b=>16'::hstore, 'b');
select fetchval('aa=>b, c=>d , b=>16'::hstore, 'aa');
select (fetchval('aa=>b, c=>d , b=>16'::hstore, 'gg')) is null;
select (fetchval('aa=>NULL, c=>d , b=>16'::hstore, 'aa')) is null;
select (fetchval('aa=>"NULL", c=>d , b=>16'::hstore, 'aa')) is null;
select pg_catalog.fetchval('aa=>b, c=>d , b=>16'::hstore, 'c');
select pg_catalog.fetchval('aa=>b, c=>d , b=>16'::hstore, 'b');
select pg_catalog.fetchval('aa=>b, c=>d , b=>16'::hstore, 'aa');
select (pg_catalog.fetchval('aa=>b, c=>d , b=>16'::hstore, 'gg')) is null;
select (pg_catalog.fetchval('aa=>NULL, c=>d , b=>16'::hstore, 'aa')) is null;
select (pg_catalog.fetchval('aa=>"NULL", c=>d , b=>16'::hstore, 'aa')) is null;
-- -> array operator
select slice_array('aa=>"NULL", c=>d , b=>16'::hstore, ARRAY['aa','c']);
select slice_array('aa=>"NULL", c=>d , b=>16'::hstore, ARRAY['c','aa']);
select slice_array('aa=>NULL, c=>d , b=>16'::hstore, ARRAY['aa','c',null]);
select slice_array('aa=>1, c=>3, b=>2, d=>4'::hstore, ARRAY[['b','d'],['aa','c']]);
select pg_catalog.slice_array('aa=>"NULL", c=>d , b=>16'::hstore, ARRAY['aa','c']);
select pg_catalog.slice_array('aa=>"NULL", c=>d , b=>16'::hstore, ARRAY['c','aa']);
select pg_catalog.slice_array('aa=>NULL, c=>d , b=>16'::hstore, ARRAY['aa','c',null]);
select pg_catalog.slice_array('aa=>1, c=>3, b=>2, d=>4'::hstore, ARRAY[['b','d'],['aa','c']]);
-- exists/defined
select exist('a=>NULL, b=>qq', 'a');
select exist('a=>NULL, b=>qq', 'b');
select exist('a=>NULL, b=>qq', 'c');
select exist('a=>"NULL", b=>qq', 'a');
select defined('a=>NULL, b=>qq', 'a');
select defined('a=>NULL, b=>qq', 'b');
select defined('a=>NULL, b=>qq', 'c');
select defined('a=>"NULL", b=>qq', 'a');
select pg_catalog.exist('a=>NULL, b=>qq', 'a');
select pg_catalog.exist('a=>NULL, b=>qq', 'b');
select pg_catalog.exist('a=>NULL, b=>qq', 'c');
select pg_catalog.exist('a=>"NULL", b=>qq', 'a');
select pg_catalog.defined('a=>NULL, b=>qq', 'a');
select pg_catalog.defined('a=>NULL, b=>qq', 'b');
select pg_catalog.defined('a=>NULL, b=>qq', 'c');
select pg_catalog.defined('a=>"NULL", b=>qq', 'a');
-- delete
select delete('a=>1 , b=>2, c=>3'::hstore, 'a');
select delete('a=>null , b=>2, c=>3'::hstore, 'a');
select delete('a=>1 , b=>2, c=>3'::hstore, 'b');
select delete('a=>1 , b=>2, c=>3'::hstore, 'c');
select delete('a=>1 , b=>2, c=>3'::hstore, 'd');
select pg_column_size(delete('a=>1 , b=>2, c=>3'::hstore, 'b'::text))
= pg_column_size('a=>1, b=>2'::hstore);
select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, 'a');
select pg_catalog.delete('a=>null , b=>2, c=>3'::hstore, 'a');
select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, 'b');
select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, 'c');
select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, 'd');
select pg_catalog.pg_column_size(pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, 'b'::text))
= pg_catalog.pg_column_size('a=>1, b=>2'::hstore);
-- delete (array)
select delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['d','e']);
select delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['d','b']);
select delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['a','c']);
select delete('a=>1 , b=>2, c=>3'::hstore, ARRAY[['b'],['c'],['a']]);
select delete('a=>1 , b=>2, c=>3'::hstore, '{}'::text[]);
select pg_column_size(delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['a','c']))
= pg_column_size('b=>2'::hstore);
select pg_column_size(delete('a=>1 , b=>2, c=>3'::hstore, '{}'::text[]))
= pg_column_size('a=>1, b=>2, c=>3'::hstore);
select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['d','e']);
select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['d','b']);
select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['a','c']);
select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, ARRAY[['b'],['c'],['a']]);
select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, '{}'::text[]);
select pg_catalog.pg_column_size(pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['a','c']))
= pg_catalog.pg_column_size('b=>2'::hstore);
select pg_catalog.pg_column_size(pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, '{}'::text[]))
= pg_catalog.pg_column_size('a=>1, b=>2, c=>3'::hstore);
-- delete (hstore)
select delete('aa=>1 , b=>2, c=>3'::hstore, 'aa=>4, b=>2'::hstore);
select delete('aa=>1 , b=>2, c=>3'::hstore, 'aa=>NULL, c=>3'::hstore);
select delete('aa=>1 , b=>2, c=>3'::hstore, 'aa=>1, b=>2, c=>3'::hstore);
select delete('aa=>1 , b=>2, c=>3'::hstore, 'b=>2'::hstore);
select delete('aa=>1 , b=>2, c=>3'::hstore, ''::hstore);
select pg_column_size(delete('a=>1 , b=>2, c=>3'::hstore, 'b=>2'::hstore))
= pg_column_size('a=>1, c=>3'::hstore);
select pg_column_size(delete('a=>1 , b=>2, c=>3'::hstore, ''::hstore))
= pg_column_size('a=>1, b=>2, c=>3'::hstore);
select pg_catalog.delete('aa=>1 , b=>2, c=>3'::hstore, 'aa=>4, b=>2'::hstore);
select pg_catalog.delete('aa=>1 , b=>2, c=>3'::hstore, 'aa=>NULL, c=>3'::hstore);
select pg_catalog.delete('aa=>1 , b=>2, c=>3'::hstore, 'aa=>1, b=>2, c=>3'::hstore);
select pg_catalog.delete('aa=>1 , b=>2, c=>3'::hstore, 'b=>2'::hstore);
select pg_catalog.delete('aa=>1 , b=>2, c=>3'::hstore, ''::hstore);
select pg_catalog.pg_column_size(pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, 'b=>2'::hstore))
= pg_catalog.pg_column_size('a=>1, c=>3'::hstore);
select pg_catalog.pg_column_size(pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, ''::hstore))
= pg_catalog.pg_column_size('a=>1, b=>2, c=>3'::hstore);
-- hs_concat
select hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f');
select hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'aq=>l');
select hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'aa=>l');
select hs_concat('aa=>1 , b=>2, cq=>3'::hstore, '');
select hs_concat(''::hstore, 'cq=>l, b=>g, fg=>f');
select pg_column_size(hs_concat(''::hstore, ''::hstore)) = pg_column_size(''::hstore);
select pg_column_size(hs_concat('aa=>1'::hstore, 'b=>2'::hstore))
= pg_column_size('aa=>1, b=>2'::hstore);
select pg_column_size(hs_concat('aa=>1, b=>2'::hstore, ''::hstore))
= pg_column_size('aa=>1, b=>2'::hstore);
select pg_column_size(hs_concat(''::hstore, 'aa=>1, b=>2'::hstore))
= pg_column_size('aa=>1, b=>2'::hstore);
select pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f');
select pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'aq=>l');
select pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'aa=>l');
select pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, '');
select pg_catalog.hs_concat(''::hstore, 'cq=>l, b=>g, fg=>f');
select pg_catalog.pg_column_size(pg_catalog.hs_concat(''::hstore, ''::hstore)) = pg_catalog.pg_column_size(''::hstore);
select pg_catalog.pg_column_size(pg_catalog.hs_concat('aa=>1'::hstore, 'b=>2'::hstore))
= pg_catalog.pg_column_size('aa=>1, b=>2'::hstore);
select pg_catalog.pg_column_size(pg_catalog.hs_concat('aa=>1, b=>2'::hstore, ''::hstore))
= pg_catalog.pg_column_size('aa=>1, b=>2'::hstore);
select pg_catalog.pg_column_size(pg_catalog.hs_concat(''::hstore, 'aa=>1, b=>2'::hstore))
= pg_catalog.pg_column_size('aa=>1, b=>2'::hstore);
-- hstore(text,text)
select hs_concat('a=>g, b=>c'::hstore, hstore('asd', 'gf'));
select hs_concat('a=>g, b=>c'::hstore, hstore('b', 'gf'));
select hs_concat('a=>g, b=>c'::hstore, hstore('b', 'NULL'));
select hs_concat('a=>g, b=>c'::hstore, hstore('b', NULL));
select (hs_concat('a=>g, b=>c'::hstore, hstore(NULL, 'b'))) is null;
select pg_column_size(hstore('b', 'gf'))
= pg_column_size('b=>gf'::hstore);
select pg_column_size(hs_concat('a=>g, b=>c'::hstore, hstore('b', 'gf')))
= pg_column_size('a=>g, b=>gf'::hstore);
select pg_catalog.hs_concat('a=>g, b=>c'::hstore, pg_catalog.hstore('asd', 'gf'));
select pg_catalog.hs_concat('a=>g, b=>c'::hstore, pg_catalog.hstore('b', 'gf'));
select pg_catalog.hs_concat('a=>g, b=>c'::hstore, pg_catalog.hstore('b', 'NULL'));
select pg_catalog.hs_concat('a=>g, b=>c'::hstore, pg_catalog.hstore('b', NULL));
select (pg_catalog.hs_concat('a=>g, b=>c'::hstore, pg_catalog.hstore(NULL, 'b'))) is null;
select pg_catalog.pg_column_size(pg_catalog.hstore('b', 'gf'))
= pg_catalog.pg_column_size('b=>gf'::hstore);
select pg_catalog.pg_column_size(pg_catalog.hs_concat('a=>g, b=>c'::hstore, pg_catalog.hstore('b', 'gf')))
= pg_catalog.pg_column_size('a=>g, b=>gf'::hstore);
-- slice()
select slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['g','h','i']);
select slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b']);
select slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['aa','b']);
select slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b','aa']);
select pg_column_size(slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b']))
= pg_column_size('b=>2, c=>3'::hstore);
select pg_column_size(slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b','aa']))
= pg_column_size('aa=>1, b=>2, c=>3'::hstore);
select pg_catalog.slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['g','h','i']);
select pg_catalog.slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b']);
select pg_catalog.slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['aa','b']);
select pg_catalog.slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b','aa']);
select pg_catalog.pg_column_size(pg_catalog.slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b']))
= pg_catalog.pg_column_size('b=>2, c=>3'::hstore);
select pg_catalog.pg_column_size(pg_catalog.slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b','aa']))
= pg_catalog.pg_column_size('aa=>1, b=>2, c=>3'::hstore);
-- array input
select '{}'::text[]::hstore;
@ -151,66 +151,66 @@ select ARRAY['a','g','b','h','asd','i']::hstore;
select ARRAY[['a','g'],['b','h'],['asd','i']]::hstore;
select ARRAY[['a','g','b'],['h','asd','i']]::hstore;
select ARRAY[[['a','g'],['b','h'],['asd','i']]]::hstore;
select hstore('{}'::text[]);
select hstore(ARRAY['a','g','b','h','asd']);
select hstore(ARRAY['a','g','b','h','asd','i']);
select hstore(ARRAY[['a','g'],['b','h'],['asd','i']]);
select hstore(ARRAY[['a','g','b'],['h','asd','i']]);
select hstore(ARRAY[[['a','g'],['b','h'],['asd','i']]]);
select hstore('[0:5]={a,g,b,h,asd,i}'::text[]);
select hstore('[0:2][1:2]={{a,g},{b,h},{asd,i}}'::text[]);
select pg_catalog.hstore('{}'::text[]);
select pg_catalog.hstore(ARRAY['a','g','b','h','asd']);
select pg_catalog.hstore(ARRAY['a','g','b','h','asd','i']);
select pg_catalog.hstore(ARRAY[['a','g'],['b','h'],['asd','i']]);
select pg_catalog.hstore(ARRAY[['a','g','b'],['h','asd','i']]);
select pg_catalog.hstore(ARRAY[[['a','g'],['b','h'],['asd','i']]]);
select pg_catalog.hstore('[0:5]={a,g,b,h,asd,i}'::text[]);
select pg_catalog.hstore('[0:2][1:2]={{a,g},{b,h},{asd,i}}'::text[]);
-- pairs of arrays
select hstore(ARRAY['a','b','asd'], ARRAY['g','h','i']);
select hstore(ARRAY['a','b','asd'], ARRAY['g','h',NULL]);
select hstore(ARRAY['z','y','x'], ARRAY['1','2','3']);
select hstore(ARRAY['aaa','bb','c','d'], ARRAY[null::text,null,null,null]);
select hstore(ARRAY['aaa','bb','c','d'], null);
select quote_literal(hstore('{}'::text[], '{}'::text[]));
select quote_literal(hstore('{}'::text[], null));
select hstore(ARRAY['a'], '{}'::text[]); -- error
select hstore('{}'::text[], ARRAY['a']); -- error
select pg_column_size(hstore(ARRAY['a','b','asd'], ARRAY['g','h','i']))
= pg_column_size('a=>g, b=>h, asd=>i'::hstore);
select pg_catalog.hstore(ARRAY['a','b','asd'], ARRAY['g','h','i']);
select pg_catalog.hstore(ARRAY['a','b','asd'], ARRAY['g','h',NULL]);
select pg_catalog.hstore(ARRAY['z','y','x'], ARRAY['1','2','3']);
select pg_catalog.hstore(ARRAY['aaa','bb','c','d'], ARRAY[null::text,null,null,null]);
select pg_catalog.hstore(ARRAY['aaa','bb','c','d'], null);
select pg_catalog.quote_literal(pg_catalog.hstore('{}'::text[], '{}'::text[]));
select pg_catalog.quote_literal(pg_catalog.hstore('{}'::text[], null));
select pg_catalog.hstore(ARRAY['a'], '{}'::text[]); -- error
select pg_catalog.hstore('{}'::text[], ARRAY['a']); -- error
select pg_catalog.pg_column_size(pg_catalog.hstore(ARRAY['a','b','asd'], ARRAY['g','h','i']))
= pg_catalog.pg_column_size('a=>g, b=>h, asd=>i'::hstore);
-- keys/values
select akeys(hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f'));
select akeys('""=>1');
select akeys('');
select avals(hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f'));
select avals(hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>NULL'));
select avals('""=>1');
select avals('');
select pg_catalog.akeys(pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f'));
select pg_catalog.akeys('""=>1');
select pg_catalog.akeys('');
select pg_catalog.avals(pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f'));
select pg_catalog.avals(pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>NULL'));
select pg_catalog.avals('""=>1');
select pg_catalog.avals('');
select hstore_to_array('aa=>1, cq=>l, b=>g, fg=>NULL'::hstore);
select pg_catalog.hstore_to_array('aa=>1, cq=>l, b=>g, fg=>NULL'::hstore);
select hstore_to_matrix('aa=>1, cq=>l, b=>g, fg=>NULL'::hstore);
select pg_catalog.hstore_to_matrix('aa=>1, cq=>l, b=>g, fg=>NULL'::hstore);
select * from skeys(hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f'));
select * from skeys('""=>1');
select * from skeys('');
select * from svals(hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f'));
select *, svals is null from svals(hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>NULL'));
select * from svals('""=>1');
select * from svals('');
select * from pg_catalog.skeys(pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f'));
select * from pg_catalog.skeys('""=>1');
select * from pg_catalog.skeys('');
select * from pg_catalog.svals(pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f'));
select *, svals is null from pg_catalog.svals(pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>NULL'));
select * from pg_catalog.svals('""=>1');
select * from pg_catalog.svals('');
select * from each('aaa=>bq, b=>NULL, ""=>1 ');
select * from pg_catalog.each('aaa=>bq, b=>NULL, ""=>1 ');
-- hs_contains
select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b'::hstore);
select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b, c=>NULL'::hstore);
select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b, g=>NULL'::hstore);
select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'g=>NULL'::hstore);
select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>c'::hstore);
select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b'::hstore);
select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b, c=>q'::hstore);
select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b'::hstore);
select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b, c=>NULL'::hstore);
select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b, g=>NULL'::hstore);
select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'g=>NULL'::hstore);
select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>c'::hstore);
select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b'::hstore);
select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b, c=>q'::hstore);
CREATE TABLE testhstore (h hstore);
\copy testhstore from 'data/hstore.data'
select count(*) from testhstore where hs_contains(h, 'wait=>NULL'::hstore);
select count(*) from testhstore where hs_contains(h, 'wait=>CC'::hstore);
select count(*) from testhstore where hs_contains(h, 'wait=>CC, public=>t'::hstore);
select count(*) from testhstore where exist(h, 'public');
select count(*) from testhstore where exists_any(h, ARRAY['public','disabled']);
select count(*) from testhstore where exists_all(h, ARRAY['public','disabled']);
select pg_catalog.count(*) from testhstore where pg_catalog.hs_contains(h, 'wait=>NULL'::hstore);
select pg_catalog.count(*) from testhstore where pg_catalog.hs_contains(h, 'wait=>CC'::hstore);
select pg_catalog.count(*) from testhstore where pg_catalog.hs_contains(h, 'wait=>CC, public=>t'::hstore);
select pg_catalog.count(*) from testhstore where pg_catalog.exist(h, 'public');
select pg_catalog.count(*) from testhstore where pg_catalog.exists_any(h, ARRAY['public','disabled']);
select pg_catalog.count(*) from testhstore where pg_catalog.exists_all(h, ARRAY['public','disabled']);

View File

@ -10,12 +10,12 @@
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION log_fdw" to load this file. \quit
CREATE FUNCTION log_fdw_handler()
CREATE FUNCTION pg_catalog.log_fdw_handler()
RETURNS fdw_handler
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT NOT FENCED;
CREATE FUNCTION log_fdw_validator(text[], oid)
CREATE FUNCTION pg_catalog.log_fdw_validator(text[], oid)
RETURNS void
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT NOT FENCED;
@ -26,7 +26,7 @@ CREATE FOREIGN DATA WRAPPER log_fdw
CREATE SERVER log_srv FOREIGN DATA WRAPPER log_fdw;
create or replace function gs_create_log_tables()
create or replace function pg_catalog.gs_create_log_tables()
RETURNS void
AS $$
declare

View File

@ -54,21 +54,15 @@ PG_MODULE_MAGIC;
extern "C" void _PG_init(void);
extern "C" void _PG_output_plugin_init(OutputPluginCallbacks* cb);
typedef struct {
MemoryContext context;
bool include_xids;
bool include_timestamp;
bool skip_empty_xacts;
bool xact_wrote_changes;
bool only_local;
} TestDecodingData;
static void pg_decode_startup(LogicalDecodingContext* ctx, OutputPluginOptions* opt, bool is_init);
static void pg_decode_shutdown(LogicalDecodingContext* ctx);
static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn);
static void pg_output_begin(
LogicalDecodingContext* ctx, TestDecodingData* data, ReorderBufferTXN* txn, bool last_write);
LogicalDecodingContext* ctx, PluginTestDecodingData* data, ReorderBufferTXN* txn, bool last_write);
static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn);
static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn);
static void pg_decode_prepare_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn);
static void pg_decode_change(
LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation rel, ReorderBufferChange* change);
static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id);
@ -87,6 +81,8 @@ void _PG_output_plugin_init(OutputPluginCallbacks* cb)
cb->begin_cb = pg_decode_begin_txn;
cb->change_cb = pg_decode_change;
cb->commit_cb = pg_decode_commit_txn;
cb->abort_cb = pg_decode_abort_txn;
cb->prepare_cb = pg_decode_prepare_txn;
cb->filter_by_origin_cb = pg_decode_filter;
cb->shutdown_cb = pg_decode_shutdown;
}
@ -95,84 +91,33 @@ void _PG_output_plugin_init(OutputPluginCallbacks* cb)
static void pg_decode_startup(LogicalDecodingContext* ctx, OutputPluginOptions* opt, bool is_init)
{
ListCell* option = NULL;
TestDecodingData* data = NULL;
PluginTestDecodingData* data = NULL;
data = (TestDecodingData*)palloc0(sizeof(TestDecodingData));
data = (PluginTestDecodingData*)palloc0(sizeof(PluginTestDecodingData));
data->context = AllocSetContextCreate(ctx->context,
"text conversion context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
data->include_xids = true;
data->include_timestamp = false;
data->include_timestamp = true;
data->skip_empty_xacts = false;
data->only_local = true;
data->tableWhiteList = NIL;
ctx->output_plugin_private = data;
opt->output_type = OUTPUT_PLUGIN_TEXTUAL_OUTPUT;
foreach (option, ctx->output_plugin_options) {
DefElem* elem = (DefElem*)lfirst(option);
Assert(elem->arg == NULL || IsA(elem->arg, String));
if (strcmp(elem->defname, "include-xids") == 0) {
/* if option does not provide a value, it means its value is true */
if (elem->arg == NULL)
data->include_xids = true;
else if (!parse_bool(strVal(elem->arg), &data->include_xids))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname)));
} else if (strcmp(elem->defname, "include-timestamp") == 0) {
if (elem->arg == NULL)
data->include_timestamp = true;
else if (!parse_bool(strVal(elem->arg), &data->include_timestamp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname)));
} else if (strcmp(elem->defname, "force-binary") == 0) {
bool force_binary = false;
if (elem->arg == NULL)
continue;
else if (!parse_bool(strVal(elem->arg), &force_binary))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname)));
if (force_binary)
opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT;
} else if (strcmp(elem->defname, "skip-empty-xacts") == 0) {
if (elem->arg == NULL)
data->skip_empty_xacts = true;
else if (!parse_bool(strVal(elem->arg), &data->skip_empty_xacts))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname)));
} else if (strcmp(elem->defname, "only-local") == 0) {
if (elem->arg == NULL)
data->only_local = true;
else if (!parse_bool(strVal(elem->arg), &data->only_local))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname)));
} else {
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg(
"option \"%s\" = \"%s\" is unknown", elem->defname, elem->arg ? strVal(elem->arg) : "(null)")));
}
ParseDecodingOptionPlugin(option, data, opt);
}
}
/* cleanup this plugin's resources */
static void pg_decode_shutdown(LogicalDecodingContext* ctx)
{
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private;
/* cleanup our own resources via memory context reset */
MemoryContextDelete(data->context);
@ -181,7 +126,7 @@ static void pg_decode_shutdown(LogicalDecodingContext* ctx)
/* BEGIN callback */
static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn)
{
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private;
data->xact_wrote_changes = false;
if (data->skip_empty_xacts)
@ -190,7 +135,8 @@ static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* t
pg_output_begin(ctx, data, txn, true);
}
static void pg_output_begin(LogicalDecodingContext* ctx, TestDecodingData* data, ReorderBufferTXN* txn, bool last_write)
static void pg_output_begin(LogicalDecodingContext* ctx, PluginTestDecodingData* data, ReorderBufferTXN* txn,
bool last_write)
{
OutputPluginPrepareWrite(ctx, last_write);
if (data->include_xids)
@ -203,7 +149,7 @@ static void pg_output_begin(LogicalDecodingContext* ctx, TestDecodingData* data,
/* COMMIT callback */
static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn)
{
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private;
if (data->skip_empty_xacts && !data->xact_wrote_changes)
return;
@ -221,65 +167,57 @@ static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN*
OutputPluginWrite(ctx, true);
}
/* ABORT callback */
static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn)
{
PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private;
if (data->skip_empty_xacts && !data->xact_wrote_changes)
return;
OutputPluginPrepareWrite(ctx, true);
if (data->include_xids)
appendStringInfo(ctx->out, "ABORT %lu", txn->xid);
else
appendStringInfoString(ctx->out, "ABORT");
if (data->include_timestamp)
appendStringInfo(ctx->out, " (at %s)", timestamptz_to_str(txn->commit_time));
OutputPluginWrite(ctx, true);
}
/* PREPARE callback */
static void pg_decode_prepare_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn)
{
PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private;
if (data->skip_empty_xacts && !data->xact_wrote_changes)
return;
OutputPluginPrepareWrite(ctx, true);
if (data->include_xids)
appendStringInfo(ctx->out, "PREPARE %lu", txn->xid);
else
appendStringInfoString(ctx->out, "PREPARE");
if (data->include_timestamp)
appendStringInfo(ctx->out, " (at %s)", timestamptz_to_str(txn->commit_time));
OutputPluginWrite(ctx, true);
}
static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id)
{
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private;
if (data->only_local && origin_id != InvalidRepOriginId)
return true;
return false;
}
/*
* Print literal `outputstr' already represented as string of type `typid'
* into stringbuf `s'.
*
* Some builtin types aren't quoted, the rest is quoted. Escaping is done as
* if u_sess->parser_cxt.standard_conforming_strings were enabled.
*/
static void print_literal(StringInfo s, Oid typid, char* outputstr)
{
const char* valptr = NULL;
switch (typid) {
case INT1OID:
case INT2OID:
case INT4OID:
case INT8OID:
case OIDOID:
case FLOAT4OID:
case FLOAT8OID:
case NUMERICOID:
/* NB: We don't care about Inf, NaN et al. */
appendStringInfoString(s, outputstr);
break;
case BITOID:
case VARBITOID:
appendStringInfo(s, "B'%s'", outputstr);
break;
case BOOLOID:
if (strcmp(outputstr, "t") == 0)
appendStringInfoString(s, "true");
else
appendStringInfoString(s, "false");
break;
default:
appendStringInfoChar(s, '\'');
for (valptr = outputstr; *valptr; valptr++) {
char ch = *valptr;
if (SQL_STR_DOUBLE(ch, false))
appendStringInfoChar(s, ch);
appendStringInfoChar(s, ch);
}
appendStringInfoChar(s, '\'');
break;
}
}
/* print the tuple 'tuple' into the StringInfo s */
static void TupleToJsoninfo(
cJSON* cols_name, cJSON* cols_type, cJSON* cols_val, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls)
@ -348,11 +286,11 @@ static void TupleToJsoninfo(
if (isnull)
appendStringInfoString(val_str, "null");
else if (!typisvarlena)
print_literal(val_str, typid, OidOutputFunctionCall(typoutput, origval));
PrintLiteral(val_str, typid, OidOutputFunctionCall(typoutput, origval));
else {
Datum val; /* definitely detoasted Datum */
val = PointerGetDatum(PG_DETOAST_DATUM(origval));
print_literal(val_str, typid, OidOutputFunctionCall(typoutput, val));
PrintLiteral(val_str, typid, OidOutputFunctionCall(typoutput, val));
}
cJSON* col_val = cJSON_CreateString(val_str->data);
cJSON_AddItemToArray(cols_val, col_val);
@ -365,13 +303,12 @@ static void TupleToJsoninfo(
static void pg_decode_change(
LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation relation, ReorderBufferChange* change)
{
TestDecodingData* data = NULL;
PluginTestDecodingData* data = NULL;
Form_pg_class class_form;
TupleDesc tupdesc;
MemoryContext old;
char* res = NULL;
data = (TestDecodingData*)ctx->output_plugin_private;
u_sess->attr.attr_common.extra_float_digits = 0;
data = (PluginTestDecodingData*)ctx->output_plugin_private;
/* output BEGIN if we haven't yet */
if (data->skip_empty_xacts && !data->xact_wrote_changes) {
@ -385,11 +322,18 @@ static void pg_decode_change(
/* Avoid leaking memory by using and resetting our own context */
old = MemoryContextSwitchTo(data->context);
char *schema = get_namespace_name(class_form->relnamespace);
char *table = NameStr(class_form->relname);
if (data->tableWhiteList != NIL && !CheckWhiteList(data->tableWhiteList, schema, table)) {
(void)MemoryContextSwitchTo(old);
MemoryContextReset(data->context);
return;
}
OutputPluginPrepareWrite(ctx, true);
cJSON* root = cJSON_CreateObject();
cJSON* table_name = cJSON_CreateString(quote_qualified_identifier(
get_namespace_name(get_rel_namespace(RelationGetRelid(relation))), NameStr(class_form->relname)));
cJSON* table_name = cJSON_CreateString(quote_qualified_identifier(schema, table));
cJSON_AddItemToObject(root, "table_name", table_name);
cJSON* op_type = NULL;

View File

@ -2,7 +2,7 @@
# pagehack
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_pagehack_SRC)
set(TGT_pagehack_INC
${TGT_pq_INC} ${ZSTD_INCLUDE_PATH} ${CMAKE_CURRENT_SOURCE_DIR} ${PROJECT_SRC_DIR}/lib/gstrace
${TGT_pq_INC} ${CMAKE_CURRENT_SOURCE_DIR} ${PROJECT_SRC_DIR}/lib/gstrace
)
set(pagehack_DEF_OPTIONS ${MACRO_OPTIONS})
@ -11,13 +11,12 @@ if(${ENABLE_DEBUG} STREQUAL "ON")
endif()
set(pagehack_COMPILE_OPTIONS ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${CHECK_OPTIONS} ${BIN_SECURE_OPTIONS} ${OPTIMIZE_OPTIONS})
set(pagehack_LINK_OPTIONS ${BIN_LINK_OPTIONS})
set(pagehack_LINK_LIBS -lpgport -lcrypt -ldl -lm -ledit -lssl -lcrypto -lsecurec -lrt -lz -lminiunz -lzstd)
set(pagehack_LINK_LIBS -lpgport -lcrypt -ldl -lm -ledit -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz)
add_bintarget(pagehack TGT_pagehack_SRC TGT_pagehack_INC "${pagehack_DEF_OPTIONS}" "${pagehack_COMPILE_OPTIONS}" "${pagehack_LINK_OPTIONS}" "${pagehack_LINK_LIBS}")
add_dependencies(pagehack pgport_static)
target_link_directories(pagehack PUBLIC
${LIBOPENSSL_LIB_PATH} ${PROTOBUF_LIB_PATH} ${LIBPARQUET_LIB_PATH} ${LIBCURL_LIB_PATH} ${SECURE_LIB_PATH}
${ZLIB_LIB_PATH} ${LIBOBS_LIB_PATH} ${LIBEDIT_LIB_PATH} ${LIBCGROUP_LIB_PATH} ${CMAKE_BINARY_DIR}/lib
${ZSTD_LIB_PATH}
)
install(TARGETS pagehack RUNTIME DESTINATION bin)

View File

@ -1,6 +1,6 @@
# contrib/pagehack/Makefile
MODULE_big = pagehack
OBJS = openGaussCompression.o pagehack.o
OBJS = pagehack.o
# executable program, even there is no database server/client
PROGRAM = pagehack
@ -14,7 +14,7 @@ subdir = contrib/pagehack
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
enable_shared = false
override CFLAGS += -lzstd
ifeq ($(enable_debug), yes)
PG_CPPFLAGS += -DDEBUG
endif

File diff suppressed because it is too large Load Diff

View File

@ -1,196 +0,0 @@
/*
* Copyright (c) Huawei Technologies Co., Ltd. 2012-2018. All rights reserved.
*/
#include "openGaussCompression.h"
#include "storage/checksum_impl.h"
#include "storage/page_compression_impl.h"
void OpenGaussCompression::SetFilePath(const char *filePath, int segNo)
{
int rc = snprintf_s(pcaFilePath, MAXPGPATH, MAXPGPATH - 1, PCA_SUFFIX, filePath);
securec_check_ss_c(rc, "\0", "\0");
rc = snprintf_s(pcdFilePath, MAXPGPATH, MAXPGPATH - 1, PCD_SUFFIX, filePath);
securec_check_ss_c(rc, "\0", "\0");
this->segmentNo = segNo;
}
OpenGaussCompression::~OpenGaussCompression()
{
if (pcaFd != nullptr) {
fclose(pcaFd);
}
if (pcdFd != nullptr) {
fclose(pcdFd);
}
if (header != nullptr) {
pc_munmap(header);
}
}
bool OpenGaussCompression::TryOpen()
{
if ((pcaFd = fopen(this->pcaFilePath, "rb+")) == nullptr) {
return false;
}
if ((pcdFd = fopen(this->pcdFilePath, "rb+")) == nullptr) {
return false;
}
if (fseeko(pcaFd, (off_t)offsetof(PageCompressHeader, chunk_size), SEEK_SET) != 0) {
return false;
}
if (fread(&chunkSize, sizeof(chunkSize), 1, this->pcaFd) <= 0) {
return false;
}
header = pc_mmap(fileno(pcaFd), chunkSize, false);
return true;
}
constexpr int MAX_RETRY_LIMIT = 60;
constexpr long RETRY_SLEEP_TIME = 1000000L;
bool OpenGaussCompression::ReadChunkOfBlock(char *dst, size_t *dstLen, BlockNumber blockNumber)
{
auto currentAddr = GET_PAGE_COMPRESS_ADDR(header, chunkSize, blockNumber);
size_t tryCount = 0;
do {
auto chunkNum = currentAddr->nchunks;
for (uint8 i = 0; i < chunkNum; i++) {
off_t seekPos = (off_t)OFFSET_OF_PAGE_COMPRESS_CHUNK(chunkSize, currentAddr->chunknos[i]);
uint8 start = i;
while (i < chunkNum - 1 && currentAddr->chunknos[i + 1] == currentAddr->chunknos[i] + 1) {
i++;
}
if (fseeko(this->pcdFd, seekPos, SEEK_SET) != 0) {
return false;
}
size_t readAmount = chunkSize * (i - start + 1);
if (fread(dst + start * chunkSize, 1, readAmount, this->pcdFd) != readAmount && ferror(this->pcdFd)) {
return false;
}
*dstLen += readAmount;
}
if (chunkNum == 0) {
break;
}
if (DecompressPage(dst, decompressedBuffer, header->algorithm) == BLCKSZ) {
break;
}
if (tryCount < MAX_RETRY_LIMIT) {
++tryCount;
pg_usleep(RETRY_SLEEP_TIME);
} else {
return false;
}
} while (true);
if (PageIs8BXidHeapVersion(dst)) {
byteConvert = ((HeapPageCompressData *)dst)->byte_convert;
diffConvert = ((HeapPageCompressData *)dst)->diff_convert;
} else {
byteConvert = ((PageCompressData *)dst)->byte_convert;
diffConvert = ((PageCompressData *)dst)->diff_convert;
}
this->blockNumber = blockNumber;
return true;
}
bool OpenGaussCompression::WriteBackCompressedData(char *source, size_t sourceLen, BlockNumber blockNumber)
{
auto currentAddr = GET_PAGE_COMPRESS_ADDR(header, chunkSize, blockNumber);
for (size_t i = 0; i < currentAddr->nchunks; ++i) {
off_t seekPos = (off_t)OFFSET_OF_PAGE_COMPRESS_CHUNK(chunkSize, currentAddr->chunknos[i]);
if (fseeko(this->pcdFd, seekPos, SEEK_SET) != 0) {
return false;
}
Assert(sourceLen >= i * chunkSize);
auto writeCount = fwrite(source + i * chunkSize, 1, chunkSize, this->pcdFd);
bool success = chunkSize == writeCount;
if (!success) {
return false;
}
}
fflush(this->pcdFd);
return true;
}
void OpenGaussCompression::MarkCompressedDirty(char *source, size_t sourceLen)
{
int rc = memset_s(source + SizeOfHeapPageHeaderData, sourceLen - SizeOfHeapPageHeaderData, 0xFF,
sourceLen - SizeOfHeapPageHeaderData);
securec_check(rc, "\0", "\0");
}
void OpenGaussCompression::MarkUncompressedDirty()
{
constexpr int writeLen = BLCKSZ / 2;
unsigned char fill_byte[writeLen] = {0xFF};
for (int i = 0; i < writeLen; i++)
fill_byte[i] = 0xFF;
auto rc = memcpy_s(decompressedBuffer + writeLen, BLCKSZ - writeLen, fill_byte, writeLen);
securec_check(rc, "", "");
}
BlockNumber OpenGaussCompression::GetMaxBlockNumber()
{
return (BlockNumber)pg_atomic_read_u32(&header->nblocks);
}
char *OpenGaussCompression::GetPcdFilePath()
{
return this->pcdFilePath;
}
char *OpenGaussCompression::GetDecompressedPage()
{
return this->decompressedBuffer;
}
bool OpenGaussCompression::WriteBackUncompressedData()
{
auto algorithm = header->algorithm;
auto workBufferSize = CompressPageBufferBound(decompressedBuffer, algorithm);
if (workBufferSize < 0) {
return false;
}
char *work_buffer = (char *)malloc(workBufferSize);
RelFileCompressOption relFileCompressOption;
relFileCompressOption.compressPreallocChunks = 0;
relFileCompressOption.compressLevelSymbol = true;
relFileCompressOption.compressLevel = 1;
relFileCompressOption.compressAlgorithm = algorithm;
relFileCompressOption.byteConvert = byteConvert;
relFileCompressOption.diffConvert = diffConvert;
auto compress_buffer_size = CompressPage(decompressedBuffer, work_buffer, workBufferSize, relFileCompressOption);
if (compress_buffer_size < 0) {
return false;
}
uint8 nchunks = (compress_buffer_size - 1) / chunkSize + 1;
auto bufferSize = chunkSize * nchunks;
if (bufferSize >= BLCKSZ) {
/* store original page if can not save space? */
free(work_buffer);
work_buffer = (char *)decompressedBuffer;
nchunks = BLCKSZ / chunkSize;
} else {
/* fill zero in the last chunk */
if (compress_buffer_size < bufferSize) {
auto leftSize = bufferSize - compress_buffer_size;
errno_t rc = memset_s(work_buffer + compress_buffer_size, leftSize, 0, leftSize);
securec_check(rc, "", "");
}
}
uint8 need_chunks = nchunks;
PageCompressAddr *pcAddr = GET_PAGE_COMPRESS_ADDR(header, chunkSize, blockNumber);
if (pcAddr->allocated_chunks < need_chunks) {
auto chunkno = pg_atomic_fetch_add_u32(&header->allocated_chunks, need_chunks - pcAddr->allocated_chunks);
for (uint8 i = pcAddr->allocated_chunks; i < need_chunks; ++i) {
pcAddr->chunknos[i] = ++chunkno;
}
pcAddr->allocated_chunks = need_chunks;
pcAddr->nchunks = need_chunks;
}
return this->WriteBackCompressedData(work_buffer, compress_buffer_size, blockNumber);
}
#include "compression_algorithm.ini"

View File

@ -1,41 +0,0 @@
#ifndef OPENGAUSS_SERVER_OPENGAUSSCOMPRESSION_H
#define OPENGAUSS_SERVER_OPENGAUSSCOMPRESSION_H
#define FRONTEND 1
#include <stdio.h>
#include "c.h"
#include "storage/buf/block.h"
#include "storage/page_compression.h"
class OpenGaussCompression {
private:
FILE* pcaFd = nullptr;
FILE* pcdFd = nullptr;
char pcaFilePath[MAXPGPATH];
char pcdFilePath[MAXPGPATH];
PageCompressHeader* header = nullptr;
private:
int segmentNo;
BlockNumber blockNumber;
decltype(PageCompressHeader::chunk_size) chunkSize;
char decompressedBuffer[BLCKSZ];
bool byteConvert;
bool diffConvert;
public:
void SetFilePath(const char* filePath, int segNo);
virtual ~OpenGaussCompression();
bool TryOpen();
bool ReadChunkOfBlock(char* dst, size_t* dstLen, BlockNumber blockNumber);
bool WriteBackCompressedData(char* source, size_t sourceLen, BlockNumber blockNumber);
bool WriteBackUncompressedData();
void MarkCompressedDirty(char* source, size_t sourceLen);
void MarkUncompressedDirty();
BlockNumber GetMaxBlockNumber();
char* GetPcdFilePath();
char* GetDecompressedPage();
};
#endif // OPENGAUSS_SERVER_OPENGAUSSCOMPRESSION_H

View File

@ -86,17 +86,16 @@
#include "utils/timestamp.h"
#include "cstore.h"
#include "common/build_query/build_query.h"
#include <libgen.h>
#ifdef ENABLE_MULTIPLE_NODES
#include "tsdb/utils/constant_def.h"
#endif
#include "openGaussCompression.h"
/* Max number of pg_class oid, currently about 4000 */
#define MAX_PG_CLASS_ID 10000
/* Number of pg_class types */
#define CLASS_TYPE_NUM 512
#define TEN 10
typedef unsigned char* binary;
static const char* indents[] = { // 10 tab is enough to used.
@ -113,6 +112,14 @@ static const char* indents[] = { // 10 tab is enough to used.
"\t\t\t\t\t\t\t\t\t\t"};
static const int nIndents = sizeof(indents) / sizeof(indents[0]);
static int indentLevel = 0;
static uint64 g_tdCount = 0;
static uint64 g_rpCount = 0;
static uint64 g_tdMax = 0;
static uint64 g_rpMax = 0;
static uint64 g_pageCount = 0;
static uint64 g_freeSpace = 0;
static uint64 g_freeMax = 0;
static HeapTupleData dummyTuple;
// add those special tables to parse, so we can read the tuple data
@ -132,7 +139,6 @@ static const char* PgHeapRelName[] = {"pg_class",
"pg_am",
"pg_statistic",
"pg_toast"};
typedef enum SegmentType { SEG_HEAP, SEG_FSM, SEG_UHEAP, SEG_INDEX_BTREE, SEG_UNDO, SEG_UNKNOWN } SegmentType;
static void ParsePgClassTupleData(binary tupdata, int len, binary nullBitmap, int natrrs);
static void ParsePgIndexTupleData(binary tupdata, int len, binary nullBitmap, int nattrs);
@ -150,8 +156,6 @@ static void ParseToastTupleData(binary tupdata, int len, binary nullBitmap, int
static void ParseTDSlot(const char *page);
static void ParseToastIndexTupleData(binary tupdata, int len, binary nullBitmap, int nattrs);
static int parse_uncompressed_page_file(const char *filename, SegmentType type, const uint32 start_point,
const uint32 number_read);
static ParseHeapTupleData PgHeapRelTupleParser[] = {
ParsePgClassTupleData, // pg_class
@ -806,11 +810,6 @@ typedef struct TwoPhaseRecordOnDisk {
uint16 info; /* flag bits for use by rmgr */
} TwoPhaseRecordOnDisk;
typedef struct TwoPhaseLockRecord {
LOCKTAG locktag;
LOCKMODE lockmode;
} TwoPhaseLockRecord;
typedef struct TwoPhaseFileHeader {
uint32 magic; /* format identifier */
uint32 total_len; /* actual file length */
@ -900,6 +899,8 @@ static const char* HACKINGTYPE[] = {"heap",
"segment"
};
typedef enum SegmentType { SEG_HEAP, SEG_FSM, SEG_UHEAP, SEG_INDEX_BTREE, SEG_UNDO, SEG_UNKNOWN } SegmentType;
const char* PageTypeNames[] = {"DATA", "FSM", "VM"};
#define GETHEAPSTRUCT(TUP) ((unsigned char*)(TUP) + (TUP)->t_hoff)
@ -1284,7 +1285,7 @@ static void ParseTsCudescXXTupleData(binary tupdata, int len, binary nullBitmap,
unsigned char ch = 0;
unsigned char bitmask = 0;
bool isnulls[nattrs] = {0};
if (NULL != nullBitmap) {
datlen = (nattrs + 7) / 8;
j = 0;
@ -1317,7 +1318,7 @@ static void ParseTsCudescXXTupleData(binary tupdata, int len, binary nullBitmap,
fprintf(stdout, "\n%s" "CUId: %u", indents[indentLevel], *(uint32*)nextAttr);
nextAttr += sizeof(uint32);
}
if (!isnulls[3]) {
// rough check MIN/MAX
nextAttr = (char*)att_align_pointer((long int)nextAttr, 'i', -1, nextAttr);
@ -2394,7 +2395,7 @@ static void ParsePgAttributeTupleData(binary tupdata, int len, binary nullBitmap
indents[indentLevel],
(pgAttributeTupData->attcollation));
fprintf(stdout,
"\n%s" "attkvtype: %d",
"\n%s" "attkvtype: %d",
indents[indentLevel],
(pgAttributeTupData->attkvtype));
@ -2427,6 +2428,40 @@ static void parse_uheap_item(const Item item, unsigned len, int blkno, int linen
fprintf(stdout, "\t\t\tNumber of columns: %d\n", UHeapTupleHeaderGetNatts(utuple));
fprintf(stdout, "\t\t\tFlag: %d\n", utuple->flag);
if (utuple->flag & UHEAP_HAS_NULL) {
fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_HASNULL ");
}
if (utuple->flag & UHEAP_DELETED) {
fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_DELETED ");
}
if (utuple->flag & UHEAP_INPLACE_UPDATED) {
fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_INPLACE_UPDATED ");
}
if (utuple->flag & UHEAP_UPDATED) {
fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_UPDATED ");
}
if (utuple->flag & UHEAP_XID_KEYSHR_LOCK) {
fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_XID_KEYSHR_LOCK ");
}
if (utuple->flag & UHEAP_XID_NOKEY_EXCL_LOCK) {
fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_XID_NOKEY_EXCL_LOCK ");
}
if (utuple->flag & UHEAP_XID_EXCL_LOCK) {
fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_XID_EXCL_LOCK ");
}
if (utuple->flag & UHEAP_MULTI_LOCKERS) {
fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_MULTI_LOCKERS ");
}
if (utuple->flag & UHEAP_INVALID_XACT_SLOT) {
fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_INVALID_XACT_SLOT ");
}
if (utuple->flag & SINGLE_LOCKER_XID_IS_LOCK) {
fprintf(stdout, "\t\t\tFlag: %s\n", "SINGLE_LOCKER_XID_IS_LOCK ");
}
if (utuple->flag & SINGLE_LOCKER_XID_IS_SUBXACT) {
fprintf(stdout, "\t\t\tFlag: %s\n", "SINGLE_LOCKER_XID_IS_SUBXACT ");
}
fprintf(stdout, "%sdata:", indents[indentLevel]);
content = ((unsigned char *)(utuple) + utuple->t_hoff);
len -= utuple->t_hoff;
@ -2509,11 +2544,10 @@ static void parse_heap_item(const Item item, unsigned len, int blkno, int lineno
tup->t_infomask &= ~HEAP_HASEXTERNAL;
tup->t_infomask &= ~HEAP_XMAX_COMMITTED;
tup->t_infomask &= ~HEAP_COMBOCID;
tup->t_infomask &= ~HEAP_MOVED_IN;
tup->t_infomask |= HEAP_XMIN_INVALID;
fprintf(stdout,
"force writer tup->t_infomask to HEAP_XMIN_INVALID and clean HEAP_XMIN_COMMITTED | HEAP_COMPRESSED | "
"HEAP_HASEXTERNAL | HEAP_XMAX_COMMITTED | HEAP_COMBOCID | HEAP_MOVED_IN");
"HEAP_HASEXTERNAL | HEAP_XMAX_COMMITTED | HEAP_COMBOCID");
}
if (tup->t_infomask & HEAP_XMAX_COMMITTED)
fprintf(stdout, "HEAP_XMAX_COMMITTED ");
@ -2523,10 +2557,11 @@ static void parse_heap_item(const Item item, unsigned len, int blkno, int lineno
fprintf(stdout, "HEAP_XMAX_IS_MULTI ");
if (tup->t_infomask & HEAP_UPDATED)
fprintf(stdout, "HEAP_UPDATED ");
if (tup->t_infomask & HEAP_MOVED_OFF)
fprintf(stdout, "HEAP_MOVED_OFF ");
if (tup->t_infomask & HEAP_MOVED_IN)
fprintf(stdout, "HEAP_MOVED_IN ");
if ((tup->t_infomask & HEAP_HAS_8BYTE_UID)) {
fprintf(stdout, "HEAP_HAS_8BYTE_UID ");
} else {
fprintf(stdout, "HEAP_HAS_NO_UID ");
}
fprintf(stdout, "\n");
fprintf(stdout, "%st_infomask2: ", indents[indentLevel]);
@ -2644,6 +2679,7 @@ static void parse_one_item(const Item item, unsigned len, int blkno, int lineno,
static void ParseHeapPageHeader(const PageHeader page, int blkno, int blknum)
{
bool checksum_matched = false;
uint64 freeSpace = 0;
if (CheckPageZeroCases(page)) {
uint16 checksum = pg_checksum_page((char*)page, (BlockNumber)blkno + (SegNo * ((BlockNumber)RELSEG_SIZE)));
checksum_matched = (checksum == page->pd_checksum);
@ -2673,25 +2709,32 @@ static void ParseHeapPageHeader(const PageHeader page, int blkno, int blknum)
"\tPage size & version: %u, %u\n",
(uint16)PageGetPageSize(page),
(uint16)PageGetPageLayoutVersion(page));
if (true) {
fprintf(stdout,
"\tpd_xid_base: %lu, pd_multi_base: %lu\n",
((HeapPageHeader)(page))->pd_xid_base,
((HeapPageHeader)(page))->pd_multi_base);
fprintf(stdout,
"\tpd_prune_xid: %lu\n",
((HeapPageHeader)(page))->pd_prune_xid + ((HeapPageHeader)(page))->pd_xid_base);
} else
fprintf(stdout, "\tpd_prune_xid: %u\n", page->pd_prune_xid);
fprintf(stdout,
"\tpd_xid_base: %lu, pd_multi_base: %lu\n",
((HeapPageHeader)(page))->pd_xid_base,
((HeapPageHeader)(page))->pd_multi_base);
fprintf(stdout,
"\tpd_prune_xid: %lu\n",
((HeapPageHeader)(page))->pd_prune_xid + ((HeapPageHeader)(page))->pd_xid_base);
if (page->pd_upper < page->pd_lower) {
fprintf(stdout, "WARNING: INVALID PAGE!");
} else {
freeSpace = page->pd_upper - page->pd_lower;
g_freeMax = freeSpace > g_freeMax ? freeSpace : g_freeMax;
g_freeSpace += freeSpace;
}
g_pageCount++;
return;
}
static void ParseUHeapPageHeader(const PageHeader page, int blkno, int blknum)
{
bool checksum_matched = false;
uint64 tdCount = 0;
uint64 freeSpace = 0;
UHeapPageHeader upage = (UHeapPageHeader)page;
if (CheckPageZeroCases(page)) {
uint16 checksum = pg_checksum_page((char*)page, (BlockNumber)blkno);
uint16 checksum = pg_checksum_page((char*)page, (BlockNumber)blkno + (SegNo * ((BlockNumber)RELSEG_SIZE)));
checksum_matched = (checksum == page->pd_checksum);
}
fprintf(stdout, "page information of block %d/%d\n", blkno, blknum);
@ -2699,9 +2742,9 @@ static void ParseUHeapPageHeader(const PageHeader page, int blkno, int blknum)
fprintf(stdout, "\tpd_checksum: 0x%X, verify %s\n", page->pd_checksum, checksum_matched ? "success" : "fail");
fprintf(stdout, "\tpd_flags: ");
if (PageHasFreeLinePointers(page))
if (UPageHasFreeLinePointers(page))
fprintf(stdout, "PD_HAS_FREE_LINES ");
if (PageIsFull(page))
if (UPageIsFull(page))
fprintf(stdout, "PD_PAGE_FULL ");
if (PageIsAllVisible(page))
fprintf(stdout, "PD_ALL_VISIBLE ");
@ -2712,22 +2755,27 @@ static void ParseUHeapPageHeader(const PageHeader page, int blkno, int blknum)
if (PageIsEncrypt(page))
fprintf(stdout, "PD_ENCRYPT_PAGE ");
fprintf(stdout, "\n");
fprintf(stdout, "\tpd_lower: %u, %s\n", page->pd_lower, PageIsEmpty(page) ? "empty" : "non-empty");
fprintf(stdout, "\tpd_lower: %u, %s\n", page->pd_lower, UPageIsEmpty(upage) ? "empty" : "non-empty");
fprintf(stdout, "\tpd_upper: %u, %s\n", page->pd_upper, PageIsNew(page) ? "new" : "old");
fprintf(stdout, "\tpd_special: %u, size %u\n", page->pd_special, PageGetSpecialSize(page));
fprintf(stdout,
"\tPage size & version: %u, %u\n",
(uint16)PageGetPageSize(page),
(uint16)PageGetPageLayoutVersion(page));
fprintf(stdout, "\tpotential_freespace: %u\n", ((UHeapPageHeaderData *)(page))->potential_freespace);
fprintf(stdout, "\ttd_count: %u\n", ((UHeapPageHeaderData *)(page))->td_count);
fprintf(stdout, "\tpd_prune_xid: %lu\n", ((UHeapPageHeaderData *)(page))->pd_prune_xid);
fprintf(stdout,
"\tpd_xid_base: %lu, pd_multi_base: %lu\n",
((UHeapPageHeaderData *)(page))->pd_xid_base,
((UHeapPageHeaderData *)(page))->pd_multi_base);
fprintf(stdout, "\tPage size & version: %u, %u\n",
(uint16)PageGetPageSize(page), (uint16)PageGetPageLayoutVersion(page));
fprintf(stdout, "\tpotential_freespace: %u\n", upage->potential_freespace);
fprintf(stdout, "\ttd_count: %u\n", upage->td_count);
fprintf(stdout, "\tpd_prune_xid: %lu\n", upage->pd_prune_xid);
fprintf(stdout, "\tpd_xid_base: %lu, pd_multi_base: %lu\n",
upage->pd_xid_base, upage->pd_multi_base);
if (upage->pd_upper < upage->pd_lower) {
fprintf(stdout, "WARNING: INVALID PAGE!");
} else {
freeSpace = upage->pd_upper - upage->pd_lower;
g_freeMax = freeSpace > g_freeMax ? freeSpace : g_freeMax;
g_freeSpace += freeSpace;
}
g_pageCount++;
tdCount = upage->td_count;
g_tdCount += tdCount;
g_tdMax = tdCount > g_tdMax ? tdCount : g_tdMax;
return;
}
@ -2777,14 +2825,10 @@ static void parse_special_data(const char* buffer, SegmentType type)
if (!P_ISDELETED(opaque))
fprintf(stdout, "\tbtree tree level: %u\n", opaque->btpo.level);
else {
if (PageIs4BXidVersion(page))
fprintf(stdout, "\tnext txid_old (deleted): %u\n", opaque->btpo.xact_old);
else {
if (uopaque)
fprintf(stdout, "\tnext txid (deleted): %lu\n", ((UBTPageOpaque)uopaque)->xact);
else
fprintf(stdout, "\tnext txid (deleted): %lu\n", ((BTPageOpaque)opaque)->xact);
}
if (uopaque)
fprintf(stdout, "\tnext txid (deleted): %lu\n", ((UBTPageOpaque)uopaque)->xact);
else
fprintf(stdout, "\tnext txid (deleted): %lu\n", ((BTPageOpaque)opaque)->xact);
}
fprintf(stdout, "\tbtree flag: ");
if (P_ISLEAF(opaque))
@ -2897,12 +2941,14 @@ static void parse_heap_or_index_page(const char* buffer, int blkno, SegmentType
nunused = nnormal = ndead = 0;
if (type == SEG_UHEAP) {
UHeapPageHeaderData *upghdr = (UHeapPageHeaderData *)buffer;
if (upghdr->pd_lower <= SizeOfUHeapPageHeaderData)
if (upghdr->pd_lower <= SizeOfUHeapPageHeaderData) {
nline = 0;
else
nline =
(upghdr->pd_lower - (SizeOfUHeapPageHeaderData + SizeOfUHeapTDData(upghdr))) / sizeof(RowPtr);
} else {
nline = (upghdr->pd_lower - (SizeOfUHeapPageHeaderData +
SizeOfUHeapTDData(upghdr))) / sizeof(RowPtr);
g_rpCount += (uint64)nline;
g_rpMax = (uint64)nline > g_rpMax ? (uint64)nline : g_rpMax;
}
fprintf(stdout, "\n\tUHeap tuple information on this page\n");
for (i = FirstOffsetNumber; i <= nline; i++) {
rowptr = UPageGetRowPtr(buffer, i);
@ -2936,6 +2982,8 @@ static void parse_heap_or_index_page(const char* buffer, int blkno, SegmentType
parse_special_data(buffer, type);
} else if (type == SEG_HEAP || type == SEG_INDEX_BTREE) {
nline = PageGetMaxOffsetNumber((Page)page);
g_rpCount += (uint64)nline;
g_rpMax = (uint64)nline > g_rpMax ? (uint64)nline : g_rpMax;
fprintf(stdout, "\n\tHeap tuple information on this page\n");
for (i = FirstOffsetNumber; i <= nline; i++) {
lp = PageGetItemId(page, i);
@ -3097,78 +3145,7 @@ static int parse_a_page(const char* buffer, int blkno, int blknum, SegmentType t
return true;
}
static BlockNumber CalculateMaxBlockNumber(BlockNumber blknum, BlockNumber start, BlockNumber number)
{
/* parse */
if (start >= blknum) {
fprintf(stderr, "start point exceeds the total block number of relation.\n");
return InvalidBlockNumber;
} else if ((start + number) > blknum) {
fprintf(stderr, "don't have %d blocks from block %d in the relation, only %d blocks\n", number, start,
(blknum - start));
number = blknum;
} else if (number == 0) {
number = blknum;
} else {
number += start;
}
return number;
}
static int parse_page_file(const char* filename, SegmentType type, const uint32 start_point, const uint32 number_read)
{
if (type != SEG_HEAP && type != SEG_INDEX_BTREE) {
return parse_uncompressed_page_file(filename, type, start_point, number_read);
}
auto openGaussCompression = new OpenGaussCompression();
openGaussCompression->SetFilePath(filename, SegNo);
bool success = openGaussCompression->TryOpen();
if (!success) {
delete openGaussCompression;
return parse_uncompressed_page_file(filename, type, start_point, number_read);
}
BlockNumber start = start_point;
BlockNumber blknum = openGaussCompression->GetMaxBlockNumber();
BlockNumber number = CalculateMaxBlockNumber(blknum, start, number_read);
if (number == InvalidBlockNumber) {
delete openGaussCompression;
return false;
}
char compressed[BLCKSZ];
size_t compressedLen;
while (start < number) {
if (!openGaussCompression->ReadChunkOfBlock(compressed, &compressedLen, start)) {
fprintf(stderr, "read block %d failed, filename: %s: %s\n", start, openGaussCompression->GetPcdFilePath(),
strerror(errno));
delete openGaussCompression;
return false;
}
if (!parse_a_page(openGaussCompression->GetDecompressedPage(), start, blknum, type)) {
fprintf(stderr, "Error during parsing block %d/%d\n", start, blknum);
delete openGaussCompression;
return false;
}
if ((write_back && num_item) || dirty_page) {
if (dirty_page) {
openGaussCompression->MarkUncompressedDirty();
}
if (!openGaussCompression->WriteBackUncompressedData()) {
fprintf(stderr, "write back failed, filename: %s: %s\n", openGaussCompression->GetPcdFilePath(),
strerror(errno));
delete openGaussCompression;
return false;
}
}
start++;
}
delete openGaussCompression;
return true;
}
static int parse_uncompressed_page_file(const char *filename, SegmentType type, const uint32 start_point,
const uint32 number_read)
{
char buffer[BLCKSZ];
FILE* fd = NULL;
@ -3196,9 +3173,21 @@ static int parse_uncompressed_page_file(const char *filename, SegmentType type,
blknum = size / BLCKSZ;
/* parse */
number = CalculateMaxBlockNumber(blknum, start, number);
if (number == InvalidBlockNumber) {
if (start >= blknum) {
fprintf(stderr, "start point exceeds the total block number of relation.\n");
fclose(fd);
return false;
} else if ((start + number) > blknum) {
fprintf(stderr,
"don't have %u blocks from block %u in the relation, only %u blocks\n",
number,
start,
(blknum - start));
number = blknum;
} else if (number == 0) {
number = blknum;
} else {
number += start;
}
Assert((start * BLCKSZ) < size);
@ -3214,7 +3203,7 @@ static int parse_uncompressed_page_file(const char *filename, SegmentType type,
}
if (!parse_a_page(buffer, start, blknum, type)) {
fprintf(stderr, "Error during parsing block %d/%d\n", start, blknum);
fprintf(stderr, "Error during parsing block %u/%u\n", start, blknum);
fclose(fd);
return false;
}
@ -3235,6 +3224,16 @@ static int parse_uncompressed_page_file(const char *filename, SegmentType type,
start++;
}
float8 rpAvg = g_pageCount == 0 ? 0 : (float8)g_rpCount / g_pageCount;
float8 tdAvg = g_pageCount == 0 ? 0 : (float8)g_tdCount / g_pageCount;
float8 freeAvg = g_pageCount == 0 ? 0 : (float8)g_freeSpace / g_pageCount;
fprintf(stdout, "Relation information : pageCount %lu.\n", g_pageCount);
fprintf(stdout, "RP information : rpCount %lu, rpMax %lu, rpAvg %f.\n",
g_rpCount, g_rpMax, rpAvg);
fprintf(stdout, "TD information : tdCount %lu, tdMax %lu, tdAvg %f.\n",
g_tdCount, g_tdMax, tdAvg);
fprintf(stdout, "Freespace information : freeTotal %lu, freeMax %lu, freeAvg %f.\n",
g_freeSpace, g_freeMax, freeAvg);
fclose(fd);
return true;
@ -3343,7 +3342,7 @@ static void parse_relation_options_struct(char* vardata, int relkind)
--indentLevel;
}
/* else can index relation options but won't solve it now*/
}
static void parse_oid_array(Oid* ids, int nids)
@ -3751,6 +3750,7 @@ static int parse_cu_file(char* filename, uint64 offset)
/* parse a replslot file */
static int parse_slot_file(char* filename)
{
const uint32 upperLen = 32;
FILE* fd = NULL;
ReplicationSlotOnDisk cp;
size_t readBytes = 0;
@ -3822,8 +3822,11 @@ static int parse_slot_file(char* filename)
fprintf(stdout, "xmin: %lu\n", cp.slotdata.xmin);
fprintf(
stdout, "restart_lsn: %X/%X\n", (uint32)(cp.slotdata.restart_lsn >> 32), (uint32)(cp.slotdata.restart_lsn));
stdout, "restart_lsn: %X/%X\n", (uint32)(cp.slotdata.restart_lsn >> upperLen),
(uint32)(cp.slotdata.restart_lsn));
fprintf(
stdout, "confirmed_flush: %X/%X\n", (uint32)(cp.slotdata.confirmed_flush >> upperLen),
(uint32)(cp.slotdata.confirmed_flush));
fclose(fd);
return true;
@ -4126,7 +4129,7 @@ static int parse_csnlog_file(char* filename)
return true;
}
static bool parse_dw_file_head(char* file_head, dw_file_head_t* saved_file_head)
static bool parse_dw_file_head(char* file_head, dw_file_head_t* saved_file_head, int size = 0)
{
uint32 i;
uint16 id;
@ -4256,7 +4259,8 @@ static uint16 parse_batch_data_pages(dw_batch_t* curr_head, uint16 page_num)
return page_num;
}
static uint16 calc_reading_pages(dw_batch_t** curr_head, char* start_buf, uint16 read_pages, uint16 file_page_id)
static uint16 calc_reading_pages(dw_batch_t** curr_head, char* start_buf, uint16 read_pages, uint16 file_page_id,
uint16 dw_batch_page_num)
{
uint16 buf_page_id;
errno_t rc;
@ -4278,11 +4282,11 @@ static uint16 calc_reading_pages(dw_batch_t** curr_head, char* start_buf, uint16
}
Assert((char*)(*curr_head) + (read_pages + readingPages) * BLCKSZ <= start_buf + DW_BUF_MAX * BLCKSZ);
Assert(file_page_id + read_pages + readingPages <= DW_FILE_PAGE);
Assert(file_page_id + read_pages + readingPages <= dw_batch_page_num);
return (uint16)readingPages;
}
static void parse_dw_batch(char* buf, FILE* fd, dw_file_head_t* file_head, uint16 page_num)
static void parse_dw_batch(char* buf, FILE* fd, dw_file_head_t* file_head, uint16 page_num, uint16 dw_batch_page_num)
{
uint16 file_page_id, read_pages;
uint16 reading_pages;
@ -4296,7 +4300,7 @@ static void parse_dw_batch(char* buf, FILE* fd, dw_file_head_t* file_head, uint1
start_buf = buf;
curr_head = (dw_batch_t*)start_buf;
read_pages = 0;
reading_pages = Min(DW_BATCH_MAX_FOR_NOHBK, (DW_FILE_PAGE - file_page_id));
reading_pages = Min(DW_BATCH_MAX_FOR_NOHBK, (dw_batch_page_num - file_page_id));
flush_pages = 0;
for (;;) {
@ -4328,43 +4332,92 @@ static void parse_dw_batch(char* buf, FILE* fd, dw_file_head_t* file_head, uint1
break;
}
reading_pages = calc_reading_pages(&curr_head, start_buf, read_pages, file_page_id);
reading_pages = calc_reading_pages(&curr_head, start_buf, read_pages, file_page_id, dw_batch_page_num);
}
}
static bool parse_dw_file(const char* file_name, uint32 start_page, uint32 page_num)
{
char* buf;
errno_t rc;
FILE* fd;
size_t result;
uint32 dw_batch_page_num;
dw_file_head_t file_head;
char meta_path[PATH_MAX];
char cur_dir[PATH_MAX];
dw_batch_meta_file* batch_meta_file;
char* meta_buf = NULL;
char* dw_buf = NULL;
rc = strcpy_s(cur_dir, PATH_MAX, file_name);
securec_check(rc, "", "");
(void)dirname(cur_dir);
rc = strcpy_s(meta_path, PATH_MAX, cur_dir);
securec_check(rc, "", "");
rc = strcat_s(meta_path, PATH_MAX, "\\");
securec_check(rc, "", "");
rc = strcat_s(meta_path, PATH_MAX, DW_META_FILE);
securec_check(rc, "", "");
fd = fopen(meta_path, "rb+");
if (fd == NULL) {
fprintf(stderr, "%s: %s\n", meta_path, strerror(errno));
return false;
}
meta_buf = (char*)malloc(DW_META_FILE_BLOCK_NUM * BLCKSZ);
if (meta_buf == NULL) {
fclose(fd);
fprintf(stderr, "out of memory\n");
return false;
}
result = fread(meta_buf, sizeof(dw_batch_meta_file), 1, fd);
if (result != 1) {
free(meta_buf);
fclose(fd);
fprintf(stderr, "read %s: %s\n", meta_path, strerror(errno));
return false;
}
batch_meta_file = (dw_batch_meta_file *) meta_buf;
dw_batch_page_num = (uint32) (DW_FILE_SIZE_UNIT * batch_meta_file->dw_file_size / BLCKSZ);
free(meta_buf);
fclose(fd);
fd = fopen(file_name, "rb+");
if (fd == NULL) {
fprintf(stderr, "%s: %s\n", file_name, strerror(errno));
return false;
}
buf = (char*)malloc(BLCKSZ * DW_BUF_MAX_FOR_NOHBK);
if (buf == NULL) {
dw_buf = (char*)malloc(BLCKSZ * DW_BUF_MAX_FOR_NOHBK);
if (dw_buf == NULL) {
fclose(fd);
fprintf(stderr, "out of memory\n");
return false;
}
result = fread(buf, BLCKSZ, 1, fd);
result = fread(dw_buf, BLCKSZ, 1, fd);
if (result != 1) {
free(buf);
free(dw_buf);
fclose(fd);
fprintf(stderr, "read %s: %s\n", file_name, strerror(errno));
return false;
}
if (!parse_dw_file_head(buf, &file_head)) {
free(buf);
if (!parse_dw_file_head(dw_buf, &file_head, BLCKSZ)) {
free(dw_buf);
fclose(fd);
return false;
}
if (start_page != 0) {
Assert(start_page < DW_FILE_PAGE);
if (start_page >= dw_batch_page_num) {
fprintf(stdout, "start_page %u exceeds the double write file upper limit offset %u\n",
start_page, dw_batch_page_num - 1);
return false;
}
file_head.start = (uint16)start_page;
if (page_num != 0) {
@ -4374,11 +4427,12 @@ static bool parse_dw_file(const char* file_name, uint32 start_page, uint32 page_
}
}
if (page_num == 0) {
page_num = DW_FILE_PAGE - start_page;
page_num = dw_batch_page_num - start_page;
}
parse_dw_batch(buf, fd, &file_head, (uint16)page_num);
free(buf);
parse_dw_batch(dw_buf, fd, &file_head, (uint16)page_num, (uint16)dw_batch_page_num);
free(dw_buf);
fclose(fd);
return true;
}
@ -4566,7 +4620,7 @@ static bool parse_dw_single_flush_file(const char* file_name)
fseek(fd, (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ, SEEK_SET);
result = fread(second_file_head, 1, BLCKSZ, fd);
fseek(fd, (1 + DW_FIRST_DATA_PAGE_NUM + 1) * BLCKSZ, SEEK_SET);
result = fread(item_buf, 1, blk_num * BLCKSZ, fd);
if (blk_num * BLCKSZ != result) {
@ -4684,7 +4738,7 @@ void CheckCRC(pg_crc32 comCrcVal, pg_crc32 pageCrcVal, const uint64 readSize, ch
FIN_CRC32C(comCrcVal);
if (!EQ_CRC32C(pageCrcVal, comCrcVal)) {
fprintf(stderr,
fprintf(stderr,
"Undo meta CRC calculated(%u) is different from CRC recorded(%u) in page.\n", comCrcVal, pageCrcVal);
return;
}
@ -4734,8 +4788,8 @@ static int ParseUndoZoneMeta(const char *filename, int zid)
/* Get page CRC from uspMetaBuffer. */
pageCrcVal = *(pg_crc32 *) (uspMetaBuffer + readSize);
/*
* Calculate the CRC value based on all undospace meta information stored on the page.
/*
* Calculate the CRC value based on all undospace meta information stored on the page.
* Then compare with pageCrcVal.
*/
CheckCRC(comCrcVal, pageCrcVal, readSize, uspMetaBuffer);
@ -4778,7 +4832,7 @@ static int ParseUndoSpaceMeta(const char *filename, int zid, UndoSpaceType type)
fprintf(stderr, "Open file(%s), return code desc(%s).\n", UNDO_META_FILE, strerror(errno));
return false;
}
if (type == UNDO_LOG_SPACE) {
UNDOZONE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOZONE_COUNT_PER_PAGE, totalPageCnt);
lseek(fd, totalPageCnt * UNDO_META_PAGE_SIZE, SEEK_SET);
@ -4812,8 +4866,8 @@ static int ParseUndoSpaceMeta(const char *filename, int zid, UndoSpaceType type)
/* Get page CRC from uspMetaBuffer. */
pageCrcVal = *(pg_crc32 *) (uspMetaBuffer + readSize);
/*
* Calculate the CRC value based on all undospace meta information stored on the page.
/*
* Calculate the CRC value based on all undospace meta information stored on the page.
* Then compare with pageCrcVal.
*/
CheckCRC(comCrcVal, pageCrcVal, readSize, uspMetaBuffer);
@ -4822,8 +4876,8 @@ static int ParseUndoSpaceMeta(const char *filename, int zid, UndoSpaceType type)
zoneId = (loop - 1) * UNDOSPACE_COUNT_PER_PAGE + offset;
uspSpaceInfo = (UndoSpaceMetaInfo *) (uspMetaBuffer + offset * sizeof(UndoSpaceMetaInfo));
if ((zid == INVALID_ZONE_ID) || (zid != INVALID_ZONE_ID && zid == zoneId)) {
fprintf(stdout, "zid=%d, head=%lu, tail=%lu, lsn=%lu.\n", zoneId,
UNDO_PTR_GET_OFFSET(uspSpaceInfo->head), UNDO_PTR_GET_OFFSET(uspSpaceInfo->tail),
fprintf(stdout, "zid=%d, head=%lu, tail=%lu, lsn=%lu.\n", zoneId,
UNDO_PTR_GET_OFFSET(uspSpaceInfo->head), UNDO_PTR_GET_OFFSET(uspSpaceInfo->tail),
uspSpaceInfo->lsn);
if (zid != INVALID_ZONE_ID) {
@ -4883,8 +4937,10 @@ typedef struct UndoHeader {
UndoRecordHeader whdr_;
UndoRecordBlock wblk_;
UndoRecordTransaction wtxn_;
UndoRecordPayload wpay_;
UndoRecordOldTd wtd_;
UndoRecordPayload wpay_;
UndoRecordPartition wpart_;
UndoRecordTablespace wtspc_;
} UndoHeader;
char g_dir[100] = {0};
@ -4962,19 +5018,19 @@ bool ReadUndoRecord(UndoHeader *urec, char *buffer, int startingByte, int *alrea
}
}
if ((urec->whdr_.uinfo & UNDO_UREC_INFO_HAS_PARTOID) != 0) {
if (!ReadUndoBytes((char *)&urec->wtd_, SIZE_OF_UNDO_RECORD_PARTITION,
if (!ReadUndoBytes((char *)&urec->wpart_, SIZE_OF_UNDO_RECORD_PARTITION,
&readptr, endptr, &myBytesRead, alreadyRead)) {
return false;
}
}
if ((urec->whdr_.uinfo & UNDO_UREC_INFO_HAS_TABLESPACEOID) != 0) {
if (!ReadUndoBytes((char *)&urec->wtd_, SIZE_OF_UNDO_RECORD_TABLESPACE,
if (!ReadUndoBytes((char *)&urec->wtspc_, SIZE_OF_UNDO_RECORD_TABLESPACE,
&readptr, endptr, &myBytesRead, alreadyRead)) {
return false;
}
}
if ((urec->whdr_.uinfo & UNDO_UREC_INFO_PAYLOAD) != 0) {
if (!ReadUndoBytes((char *)&urec->wtd_, SIZE_OF_UNDO_RECORD_PAYLOAD,
if (!ReadUndoBytes((char *)&urec->wpay_, SIZE_OF_UNDO_RECORD_PAYLOAD,
&readptr, endptr, &myBytesRead, alreadyRead)) {
return false;
}
@ -4988,7 +5044,7 @@ static bool ParseUndoRecord(UndoRecPtr urp)
char buffer[BLCKSZ] = {'\0'};
BlockNumber blockno = UNDO_PTR_GET_BLOCK_NUM(urp);
int zoneId = UNDO_PTR_GET_ZONE_ID(urp);
int startingByte = UNDO_PTR_GET_BLOCK_NUM(urp);
int startingByte = UNDO_PTR_GET_PAGE_OFFSET(urp);
int fd = -1;
int alreadyRead = 0;
off_t seekpos;
@ -5003,6 +5059,7 @@ static bool ParseUndoRecord(UndoRecPtr urp)
do {
fd = OpenUndoBlock(zoneId, blockno);
if (fd < 0) {
free(urec);
return false;
}
seekpos = (off_t)BLCKSZ * (blockno % ((BlockNumber)UNDOSEG_SIZE));
@ -5013,6 +5070,7 @@ static bool ParseUndoRecord(UndoRecPtr urp)
ret = read(fd, (char *)buffer, BLCKSZ);
if (ret != BLCKSZ) {
close(fd);
free(urec);
fprintf(stderr, "Read undo meta page failed, expect size(8192), real size(%u).\n", ret);
return false;
}
@ -5266,6 +5324,28 @@ static void fill_filenode_map(char** class_map)
return;
}
static long int strtolSafe(const char* nptr, long int default_value)
{
char* tmp = NULL;
long int res = strtol(nptr, &tmp, TEN);
if (errno == ERANGE || tmp == nptr || (errno != 0 && res == 0)) {
fprintf(stdout, "WARNING: failed to convert parameter %s to int!\n", nptr);
res = default_value;
}
return res;
}
static long long int strtollSafe(const char* nptr, long long int default_value)
{
char* tmp = NULL;
long long int res = strtoll(nptr, &tmp, TEN);
if (errno == ERANGE || tmp == nptr || (errno != 0 && res == 0)) {
fprintf(stdout, "WARNING: failed to convert parameter %s to int!\n", nptr);
res = default_value;
}
return res;
}
int main(int argc, char** argv)
{
int c;
@ -5318,7 +5398,7 @@ int main(int argc, char** argv)
}
case 'o':
cu_offset = (uint64)atoll(optarg);
cu_offset = (uint64)strtollSafe(optarg, 0);
break;
case 'r': // relation name given
@ -5371,19 +5451,19 @@ int main(int argc, char** argv)
break;
case 's':
start_point = (unsigned)atoi(optarg);
start_point = (unsigned int)strtolSafe(optarg, 0);
break;
case 'n':
num_block = (unsigned)atoi(optarg);
num_block = (unsigned int)strtolSafe(optarg, 0);
break;
case 'I':
start_item = (unsigned)atoi(optarg);
start_item = (unsigned int)strtolSafe(optarg, 1);
break;
case 'N':
num_item = (unsigned)atoi(optarg);
num_item = (unsigned int)strtolSafe(optarg, 0);
break;
case 'w':
@ -5395,11 +5475,11 @@ int main(int argc, char** argv)
break;
case 'z':
zid = (int)atoi(optarg);
zid = (int)strtolSafe(optarg, INVALID_ZONE_ID);
break;
case 'S':
SegNo = (unsigned)atoi(optarg);
SegNo = (unsigned int)strtolSafe(optarg, 0);
break;
default:

View File

@ -106,11 +106,7 @@ static void GetBTPageStatistics(BlockNumber blkno, Buffer buffer, BTPageStat* st
/* page type (flags) */
if (P_ISDELETED(opaque)) {
stat->type = 'd';
if (PageIs4BXidVersion(page))
stat->btpo.xact = opaque->btpo.xact_old;
else
stat->btpo.xact = ((BTPageOpaque)opaque)->xact;
stat->btpo.xact = ((BTPageOpaque)opaque)->xact;
return;
} else if (P_IGNORE(opaque))
stat->type = 'e';

View File

@ -205,7 +205,7 @@ static void SetWALFileNameForCleanup(void)
* Use just the prefix of the filename, ignore everything after
* first period
*/
XLogFileName(exclusiveCleanupFileName, tli, ((uint64)log) << segLen | seg);
XLogFileName(exclusiveCleanupFileName, MAXFNAMELEN, tli, ((uint64)log) << segLen | seg);
}
}

View File

@ -125,9 +125,9 @@ struct stat stat_buf;
#define XLOG_DATA_FNAME_LEN 24
/* Reworked from access/xlog_internal.h */
#define XLogFileName(fname, tli, logSegNo) \
#define XLogFileName(fname, len, tli, logSegNo) \
snprintf(fname, \
XLOG_DATA_FNAME_LEN + 1, \
len, \
"%08X%08X%08X", \
tli, \
(uint32)((logSegNo) / XLogSegmentsPerXLogId), \
@ -345,7 +345,7 @@ static bool SetWALFileNameForCleanup(void)
}
}
XLogFileName(exclusiveCleanupFileName, tli, (((uint32)log) << 32) | seg);
XLogFileName(exclusiveCleanupFileName, MAXFNAMELEN, tli, (((uint32)log) << 32) | seg);
return cleanup;
}

View File

@ -35,7 +35,7 @@ SET(TGT_xlogdump_INC
SET(xlogdump_DEF_OPTIONS ${MACRO_OPTIONS} -DFRONTEND)
SET(xlogdump_COMPILE_OPTIONS ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${CHECK_OPTIONS} ${BIN_SECURE_OPTIONS} ${OPTIMIZE_OPTIONS})
SET(xlogdump_LINK_OPTIONS ${BIN_LINK_OPTIONS})
SET(xlogdump_LINK_LIBS libpgcommon.a -lpgport -lcrypt -ldl -lm -ledit -lssl -lcrypto -lsecurec -lrt -lz -lminiunz)
SET(xlogdump_LINK_LIBS libpgcommon.a -lpgport -lcrypt -ldl -lm -ledit -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz)
add_bintarget(pg_xlogdump TGT_xlogdump_SRC TGT_xlogdump_INC "${xlogdump_DEF_OPTIONS}" "${xlogdump_COMPILE_OPTIONS}" "${xlogdump_LINK_OPTIONS}" "${xlogdump_LINK_LIBS}")
add_dependencies(pg_xlogdump pgport_static pgcommon_static)
target_link_directories(pg_xlogdump PUBLIC

View File

@ -78,7 +78,7 @@ typedef struct XLogDumpStats {
static void XLogDumpTablePage(XLogReaderState* record, int block_id, RelFileNode rnode, BlockNumber blk);
static void XLogDumpXLogRead(const char* directory, TimeLineID timeline_id, XLogRecPtr startptr, char* buf, Size count);
static int XLogDumpReadPage(XLogReaderState* state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetPtr,
char* readBuff, TimeLineID* curFileTLI);
char* readBuff, TimeLineID* curFileTLI, char* xlog_path = NULL);
static void XLogDumpCountRecord(XLogDumpConfig* config, XLogDumpStats* stats, XLogReaderState* record);
static void XLogDumpDisplayRecord(XLogDumpConfig* config, XLogReaderState* record);
static void XLogDumpStatsRow(const char* name, uint64 n, uint64 total_count, uint64 rec_len, uint64 total_rec_len,
@ -356,7 +356,7 @@ static void XLogDumpXLogRead(const char* directory, TimeLineID timeline_id, XLog
XLByteToSeg(recptr, sendSegNo);
XLogFileName(fname, timeline_id, sendSegNo);
XLogFileName(fname, MAXFNAMELEN, timeline_id, sendSegNo);
sendFile = fuzzy_open_file(directory, fname);
@ -371,7 +371,7 @@ static void XLogDumpXLogRead(const char* directory, TimeLineID timeline_id, XLog
int err = errno;
char fname[MAXPGPATH];
XLogFileName(fname, timeline_id, sendSegNo);
XLogFileName(fname, MAXFNAMELEN, timeline_id, sendSegNo);
fatal_error("could not seek in log segment %s to offset %u: %s", fname, startoff, strerror(err));
}
@ -389,7 +389,7 @@ static void XLogDumpXLogRead(const char* directory, TimeLineID timeline_id, XLog
int err = errno;
char fname[MAXPGPATH];
XLogFileName(fname, timeline_id, sendSegNo);
XLogFileName(fname, MAXFNAMELEN, timeline_id, sendSegNo);
fatal_error("could not read from log segment %s, offset %d, length %d: %s",
fname,
@ -411,7 +411,7 @@ static void XLogDumpXLogRead(const char* directory, TimeLineID timeline_id, XLog
* XLogReader read_page callback
*/
static int XLogDumpReadPage(XLogReaderState* state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetPtr,
char* readBuff, TimeLineID* curFileTLI)
char* readBuff, TimeLineID* curFileTLI, char* xlog_path)
{
XLogDumpPrivate* dumpprivate = (XLogDumpPrivate*)state->private_data;
int count = XLOG_BLCKSZ;
@ -1088,7 +1088,7 @@ begin_read:
for (;;) {
/* try to read the next record */
record = XLogReadRecord(xlogreader_state, first_record, &errormsg, false);
record = XLogReadRecord(xlogreader_state, first_record, &errormsg);
if (!record) {
if (!config.follow || dumpprivate.endptr_reached)
break;

View File

@ -13,7 +13,6 @@
#include "access/gin.h"
#include "access/gist_private.h"
#include "access/hash.h"
#include "access/hash_xlog.h"
#include "access/heapam.h"
#include "access/multixact.h"
#include "access/nbtree.h"
@ -43,7 +42,8 @@
#include "access/ustore/knl_uredo.h"
#define PG_RMGR(symname, name, redo, desc, startup, cleanup, safe_restartpoint, undo, undo_desc) {name, desc},
#define PG_RMGR(symname, name, redo, desc, startup, cleanup, safe_restartpoint, undo, undo_desc, type_name) \
{name, desc},
const RmgrDescData RmgrDescTable[RM_MAX_ID + 1] = {
#include "access/rmgrlist.h"

View File

@ -366,6 +366,7 @@ static void pgstat_hash_page(pgstattuple_type* stat, Relation rel, BlockNumber b
Page page;
OffsetNumber maxoff;
_hash_getlock(rel, blkno, HASH_SHARE);
buf = _hash_getbuf_with_strategy(rel, blkno, HASH_READ, 0, bstrategy);
page = BufferGetPage(buf);
@ -392,6 +393,7 @@ static void pgstat_hash_page(pgstattuple_type* stat, Relation rel, BlockNumber b
}
_hash_relbuf(rel, buf);
_hash_droplock(rel, blkno, HASH_SHARE);
}
/*

View File

@ -158,8 +158,8 @@ PGconn *GetConnection(ForeignServer *server, UserMapping *user, bool will_prep_s
RegisterXactCallback(pgfdw_xact_callback, NULL);
RegisterSubXactCallback(pgfdw_subxact_callback, NULL);
CacheRegisterSyscacheCallback(FOREIGNSERVEROID, pgfdw_inval_callback, (Datum)0);
CacheRegisterSyscacheCallback(USERMAPPINGOID, pgfdw_inval_callback, (Datum)0);
CacheRegisterSessionSyscacheCallback(FOREIGNSERVEROID, pgfdw_inval_callback, (Datum)0);
CacheRegisterSessionSyscacheCallback(USERMAPPINGOID, pgfdw_inval_callback, (Datum)0);
if (IS_THREAD_POOL_SESSION) {
u_sess->ext_fdw_ctx[POSTGRES_TYPE_FDW].fdwExitFunc = pg_fdw_exit;

View File

@ -46,6 +46,7 @@
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "commands/defrem.h"
#include "libpq/pqexpbuffer.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/clauses.h"
#include "optimizer/var.h"
@ -1150,6 +1151,7 @@ static void deparseRelation(StringInfo buf, Relation rel)
const char *nspname = NULL;
const char *relname = NULL;
ListCell *lc;
char parttype = PARTTYPE_NON_PARTITIONED_RELATION;
/* obtain additional catalog information. */
ForeignTable* table = GetForeignTable(RelationGetRelid(rel));
@ -1178,6 +1180,33 @@ static void deparseRelation(StringInfo buf, Relation rel)
relname = RelationGetRelationName(rel);
}
/* foreign table could not be built from a partitioned table */
UserMapping* user = GetUserMapping(rel->rd_rel->relowner, table->serverid);
ForeignServer *server = GetForeignServer(table->serverid);
PGconn* conn = GetConnection(server, user, false);
PQExpBuffer query = createPQExpBuffer();
appendPQExpBuffer(query,
"SELECT c.parttype FROM pg_class c, pg_namespace n "
"WHERE c.relname = '%s' and c.relnamespace = n.oid and n.nspname = '%s'",
quote_identifier(relname), quote_identifier(nspname));
PGresult* res = pgfdw_exec_query(conn, query->data);
if (PQresultStatus(res) != PGRES_TUPLES_OK) {
pgfdw_report_error(ERROR, res, conn, true, query->data);
}
/* res may be empty as the relname/nspname validation is not checked */
if (PQntuples(res) > 0) {
parttype = *PQgetvalue(res, 0, 0);
}
PQclear(res);
destroyPQExpBuffer(query);
if (!ENABLE_SQL_BETA_FEATURE(PARTITION_FDW_ON) &&
(parttype == PARTTYPE_PARTITIONED_RELATION || parttype == PARTTYPE_SUBPARTITIONED_RELATION)) {
ereport(ERROR, (errmsg("could not operate foreign table on partitioned table")));
}
appendStringInfo(buf, "%s.%s", quote_identifier(nspname), quote_identifier(relname));
}

View File

@ -37,17 +37,17 @@ CREATE TABLE "S 1"."T 2" (
INSERT INTO "S 1"."T 1"
SELECT id,
id % 10,
to_char(id, 'FM00000'),
pg_catalog.to_char(id, 'FM00000'),
'1970-01-01'::timestamptz + ((id % 100) || ' days')::interval,
'1970-01-01'::timestamp + ((id % 100) || ' days')::interval,
id % 10,
id % 10,
'foo'::user_enum
FROM generate_series(1, 1000) id;
FROM pg_catalog.generate_series(1, 1000) id;
INSERT INTO "S 1"."T 2"
SELECT id,
'AAA' || to_char(id, 'FM000')
FROM generate_series(1, 100) id;
'AAA' || pg_catalog.to_char(id, 'FM000')
FROM pg_catalog.generate_series(1, 100) id;
ANALYZE "S 1"."T 1";
ANALYZE "S 1"."T 2";
@ -188,7 +188,7 @@ SELECT 'fixed', NULL FROM ft1 t1 WHERE c1 = 1;
-- user-defined operator/function
CREATE FUNCTION postgres_fdw_abs(int) RETURNS int AS $$
BEGIN
RETURN abs($1);
RETURN pg_catalog.abs($1);
END
$$ LANGUAGE plpgsql IMMUTABLE;
CREATE OPERATOR === (
@ -200,7 +200,7 @@ CREATE OPERATOR === (
);
EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2);
EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2;
EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = abs(t1.c2);
EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = pg_catalog.abs(t1.c2);
EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = t1.c2;
-- ===================================================================
@ -210,7 +210,7 @@ EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 1; --
EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 100 AND t1.c2 = 0; -- BoolExpr
EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 IS NULL; -- NullTest
EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 IS NOT NULL; -- NullTest
EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE round(abs(c1), 0) = 1; -- FuncExpr
EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE pg_catalog.round(pg_catalog.abs(c1), 0) = 1; -- FuncExpr
EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = -c1; -- OpExpr(l)
EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE 1 = c1!; -- OpExpr(r)
EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE (c1 IS NOT NULL) IS DISTINCT FROM (c1 IS NOT NULL); -- DistinctExpr
@ -225,9 +225,9 @@ SELECT * FROM ft2 a, ft2 b WHERE a.c1 = 47 AND b.c1 = a.c2;
-- check both safe and unsafe join conditions
EXPLAIN (VERBOSE, COSTS false)
SELECT * FROM ft2 a, ft2 b
WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = pg_catalog.upper(a.c7);
SELECT * FROM ft2 a, ft2 b
WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = pg_catalog.upper(a.c7);
-- bug before 9.3.5 due to sloppy handling of remote-estimate parameters
SELECT * FROM ft1 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft2 WHERE c1 < 5));
SELECT * FROM ft2 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft1 WHERE c1 < 5));
@ -267,12 +267,12 @@ EXPLAIN (VERBOSE, COSTS false) EXECUTE st1(1, 2);
EXECUTE st1(1, 1);
EXECUTE st1(101, 101);
-- subquery using stable function (can't be sent to remote)
PREPARE st2(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND date(c4) = '1970-01-17'::date) ORDER BY c1;
PREPARE st2(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND pg_catalog.date(c4) = '1970-01-17'::date) ORDER BY c1;
EXPLAIN (VERBOSE, COSTS false) EXECUTE st2(10, 20);
EXECUTE st2(10, 20);
EXECUTE st2(101, 121);
-- subquery using immutable function (can be sent to remote)
PREPARE st3(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND date(c5) = '1970-01-17'::date) ORDER BY c1;
PREPARE st3(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND pg_catalog.date(c5) = '1970-01-17'::date) ORDER BY c1;
EXPLAIN (VERBOSE, COSTS false) EXECUTE st3(10, 20);
EXECUTE st3(10, 20);
EXECUTE st3(20, 30);
@ -448,34 +448,34 @@ INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive
UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive
-- Test savepoint/rollback behavior
select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1;
select c2, pg_catalog.count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
begin;
update ft2 set c2 = 42 where c2 = 0;
select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1;
savepoint s1;
update ft2 set c2 = 44 where c2 = 4;
select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1;
release savepoint s1;
select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1;
savepoint s2;
update ft2 set c2 = 46 where c2 = 6;
select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1;
rollback to savepoint s2;
select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1;
release savepoint s2;
select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1;
savepoint s3;
update ft2 set c2 = -2 where c2 = 42 and c1 = 10; -- fail on remote side
rollback to savepoint s3;
select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1;
release savepoint s3;
select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1;
-- none of the above is committed yet remotely
select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
select c2, pg_catalog.count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
commit;
select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1;
select c2, pg_catalog.count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
-- ===================================================================
-- test serial columns (ie, sequence-based defaults)
@ -530,14 +530,14 @@ begin
tg_name, argstr, TG_when, TG_level, TG_OP, relid;
oldnew := '{}'::text[];
if TG_OP != 'INSERT' then
oldnew := array_append(oldnew, format('OLD: %s', OLD));
oldnew := pg_catalog.array_append(oldnew, pg_catalog.format('OLD: %s', OLD));
end if;
if TG_OP != 'DELETE' then
oldnew := array_append(oldnew, format('NEW: %s', NEW));
oldnew := pg_catalog.array_append(oldnew, pg_catalog.format('NEW: %s', NEW));
end if;
RAISE NOTICE '%', array_to_string(oldnew, ',');
RAISE NOTICE '%', pg_catalog.array_to_string(oldnew, ',');
if TG_OP = 'DELETE' then
return OLD;

View File

@ -288,8 +288,8 @@ void audit_open_relation(List *list, Var *col_att, PolicyLabelItem *full_column,
}
}
static void audit_open_view(RuleLock *rules, Var *col_att, PolicyLabelItem* full_column,
PolicyLabelItem *view_full_column)
static void audit_cursor_view(RuleLock *rules, Var *col_att, PolicyLabelItem *full_column,
PolicyLabelItem *view_full_column)
{
if (col_att == NULL)
return;
@ -364,7 +364,7 @@ void get_fqdn_by_relid(RangeTblEntry *rte, PolicyLabelItem *full_column, Var *co
/* schema */
full_column->m_schema = tbl_rel->rd_rel->relnamespace;
if (tbl_rel->rd_rules) { /* view */
audit_open_view(tbl_rel->rd_rules, col_att, full_column, view_full_column);
audit_cursor_view(tbl_rel->rd_rules, col_att, full_column, view_full_column);
if (view_full_column) {
view_full_column->m_schema = tbl_rel->rd_rel->relnamespace;
view_full_column->set_object(rte->relid, O_VIEW);
@ -499,7 +499,7 @@ void access_audit_policy_run(const List* rtable, CmdType cmd_type)
/* table object */
RangeTblEntry *rte = (RangeTblEntry *)lfirst(lc);
policy_result pol_result;
if (rte == NULL || rte->relname == NULL || rte->rtekind == RTE_REMOTE_DUMMY) {
if (rte == NULL || rte->rtekind == RTE_REMOTE_DUMMY) {
continue;
}
@ -507,7 +507,8 @@ void access_audit_policy_run(const List* rtable, CmdType cmd_type)
int recursion_deep = 0;
handle_subquery(rte, rte->subquery->commandType, &pol_result, &checked_tables, &policy_ids,
&security_policy_ids, &recursion_deep);
} else if (checked_tables.insert(rte->relname).second) { /* verify if table object already checked */
} else if (rte->relname != NULL &&
checked_tables.insert(rte->relname).second) { /* verify if table object already checked */
/* use query plan commandtype here but not get it from rte directly */
if (!handle_table_entry(rte, cmd_type, &policy_ids, &security_policy_ids, &pol_result)) {
continue;

View File

@ -196,7 +196,7 @@ static bool is_valid_for_masking(const char* func_name, Oid funcnsp, int& funcid
bool is_valid = true;
/* try to find function on pg_proc */
for (int i = 0; i < catlist->n_members && is_valid; ++i) {
HeapTuple proctup = &catlist->members[i]->tuple;
HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i);
Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(proctup);
/* verify namespace */
if (procform->pronamespace != funcnsp) {

View File

@ -36,6 +36,8 @@
#include "utils/snapmgr.h"
#include "utils/syscache.h"
#include "utils/lsyscache.h"
#include "gs_mask_policy.h"
#include "gs_policy_plugin.h"
static THR_LOCAL loaded_labels *all_labels = NULL;
@ -192,13 +194,13 @@ bool check_label_has_object(const PolicyLabelItem *object,
return false;
}
Assert(CheckLabelBoundPolicy != NULL);
loaded_labels *all_labels = get_policy_labels();
if (all_labels == NULL) {
loaded_labels *cur_all_labels = get_policy_labels();
if (cur_all_labels == NULL) {
return false;
}
loaded_labels::const_iterator it = all_labels->begin();
loaded_labels::const_iterator eit = all_labels->end();
loaded_labels::const_iterator it = cur_all_labels->begin();
loaded_labels::const_iterator eit = cur_all_labels->end();
for (; it != eit; ++it) {
/* for each item of loaded existing labels, and match labels */
if (labels != NULL && labels->find(*(it->first)) == labels->end()) {
@ -238,4 +240,41 @@ void clear_thread_local_label()
delete all_labels;
all_labels = NULL;
}
}
void verify_drop_column(AlterTableStmt *stmt)
{
ListCell *lcmd = NULL;
foreach (lcmd, stmt->cmds) {
AlterTableCmd *cmd = (AlterTableCmd *)lfirst(lcmd);
switch (cmd->subtype) {
case AT_DropColumn: {
/* check by column */
PolicyLabelItem find_obj(stmt->relation->schemaname, stmt->relation->relname, cmd->name, O_COLUMN);
if (check_label_has_object(&find_obj, is_masking_has_object)) {
char buff[512] = {0};
int rc = snprintf_s(buff, sizeof(buff), sizeof(buff) - 1,
"Column: %s is part of some resource label, can not be renamed.", find_obj.m_column);
securec_check_ss(rc, "\0", "\0");
gs_audit_issue_syslog_message("PGAUDIT", buff, AUDIT_POLICY_EVENT, AUDIT_FAILED);
ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\"", buff)));
}
break;
}
case AT_AlterColumnType: {
PolicyLabelItem find_obj(stmt->relation->schemaname, stmt->relation->relname, cmd->name, O_COLUMN);
if (check_label_has_object(&find_obj, is_masking_has_object, true)) {
char buff[512] = {0};
int ret = snprintf_s(buff, sizeof(buff), sizeof(buff) - 1,
"Column: %s is part of some masking policy, can not be changed.", find_obj.m_column);
securec_check_ss(ret, "\0", "\0");
gs_audit_issue_syslog_message("PGAUDIT", buff, AUDIT_POLICY_EVENT, AUDIT_FAILED);
ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\"", buff)));
}
break;
}
default:
break;
}
}
}

View File

@ -56,4 +56,5 @@ bool update_label_value(const gs_stl::gs_string object_name,
void reset_policy_labels();
void clear_thread_local_label();
void verify_drop_column(AlterTableStmt *stmt);
#endif /* GS_POLICY_GS_POLICY_LABELS_H_ */

View File

@ -544,6 +544,19 @@ typedef struct ObjectTypeInfo
const char* object_name;
} ObjectTypeInfo;
typedef struct CmdCursorInfo {
CmdType cmd_type;
const char *object_name;
} CmdCursorInfo;
static CmdCursorInfo cmd_cursorinfo[] = {
{CMD_SELECT, "FOR SELECT FROM"},
{CMD_INSERT, "FOR INSERT TO"},
{CMD_UPDATE, "FOR UPDATE FROM"},
{CMD_DELETE, "FOR DELETE FROM"},
{CMD_UNKNOWN, NULL}
};
static OperInfo oper_infos[] = {
{"create", T_CREATE},
{"alter", T_ALTER},
@ -560,7 +573,7 @@ static OperInfo oper_infos[] = {
{"login_success", T_LOGIN_SUCCESS},
{"login_failure", T_LOGIN_FAILURE},
{"copy", T_COPY},
{"open", T_OPEN},
{"cursor", T_CURSOR},
{"fetch", T_FETCH},
{"close", T_CLOSE},
{"all", T_ALL},
@ -621,6 +634,20 @@ static ObjectTypeInfo object_type_infos[] =
{O_UNKNOWN, NULL}
};
/*
* get_cursorinfo
* return cursor operation object
*/
const char *get_cursorinfo(CmdType type)
{
for (int i = 0; cmd_cursorinfo[i].object_name != NULL; ++i) {
if (cmd_cursorinfo[i].cmd_type == type) {
return cmd_cursorinfo[i].object_name;
}
}
return "UNKNOWN";
}
/*
* get_privilege_type
* return privilege type in enum PrivType by its name
@ -934,4 +961,17 @@ int get_objtype(int object_type)
break;
}
return objtype;
}
CmdType get_rte_commandtype(RangeTblEntry *rte)
{
if (rte->selectedCols) {
return CMD_SELECT;
} else if (rte->insertedCols) {
return CMD_INSERT;
} else if (rte->updatedCols) {
return CMD_UPDATE;
} else {
return CMD_UNKNOWN;
}
}

View File

@ -66,7 +66,7 @@ enum PrivType {
T_LOGIN_SUCCESS,
T_LOGIN_FAILURE,
T_COPY,
T_OPEN,
T_CURSOR,
T_FETCH,
T_CLOSE,
T_ALL
@ -275,5 +275,7 @@ bool name_list_to_label(PolicyLabelItem *item, List *names, char *name = NULL, s
void gen_policy_labelitem(PolicyLabelItem &item, const ListCell *rel, int objtype);
void gen_policy_label_for_commentstmt(PolicyLabelItem &item, const CommentStmt *commentstmt);
int get_objtype(int object_type);
CmdType get_rte_commandtype(RangeTblEntry *rte);
const char *get_cursorinfo(CmdType type);
#endif /* GS_POLICY_OBJECT_TYPES_H_ */

View File

@ -137,10 +137,7 @@ static THR_LOCAL char original_query[256];
static THR_LOCAL MngEventsVector *mng_events = NULL;
using StrMap = gs_stl::gs_map<gs_stl::gs_string, masking_result>;
static THR_LOCAL StrMap* masked_prepared_stmts = NULL;
static THR_LOCAL StrMap* masked_cursor_stmts = NULL;
static void process_masking(ParseState *pstate, Query *query, const policy_set *policy_ids, bool audit_exist);
static void gsaudit_next_PostParseAnalyze_hook(ParseState *pstate, Query *query);
static void destroy_local_parameter();
static void destory_thread_variables()
@ -285,10 +282,7 @@ static void destroy_local_parameter()
mng_events = NULL;
}
if (masked_cursor_stmts != NULL) {
delete masked_cursor_stmts;
masked_cursor_stmts = NULL;
}
free_masked_cursor_stmts();
}
/*
@ -514,77 +508,6 @@ bool verify_copy_command_is_reparsed(List* parsetree_list, const char* query_str
return false;
}
static void free_masked_prepared_stmts()
{
if (masked_prepared_stmts) {
delete masked_prepared_stmts;
masked_prepared_stmts = NULL;
}
}
template< class T>
static inline void flush_stmt_masking_result(const char* name, T* stmts)
{
if (stmts) {
StrMap::const_iterator it = stmts->find(name);
if (it != stmts->end()) {
flush_masking_result(it->second);
}
}
}
static void flush_cursor_stmt_masking_result(const char* name)
{
flush_stmt_masking_result(name, masked_cursor_stmts);
}
static void flush_prepare_stmt_masking_result(const char* name)
{
flush_stmt_masking_result(name, masked_prepared_stmts);
}
static void close_cursor_stmt_as_masked(const char* name)
{
if (masked_cursor_stmts == NULL) {
return;
}
masked_cursor_stmts->erase(name);
if (masked_cursor_stmts->empty() || (strcasecmp(name, "all") == 0)) {
delete masked_cursor_stmts;
masked_cursor_stmts = NULL;
}
}
static void unprepare_stmt_as_masked(const char* name)
{
unprepare_stmt(name);
if (!masked_prepared_stmts) {
return;
}
masked_prepared_stmts->erase(name);
if (masked_prepared_stmts->empty() || !strcasecmp(name, "all")) {
delete masked_prepared_stmts;
masked_prepared_stmts = NULL;
}
}
static inline void set_prepare_stmt_as_masked(const char* name, const masking_result *result)
{
if (!masked_prepared_stmts) {
masked_prepared_stmts = new StrMap;
}
(*masked_prepared_stmts)[name] = (*result);
}
static inline void set_cursor_stmt_as_masked(const char* name, const masking_result *result)
{
if (!masked_cursor_stmts) {
masked_cursor_stmts = new StrMap;
}
(*masked_cursor_stmts)[name] = (*result);
}
void set_result_set_function(const PolicyLabelItem &func)
{
if (result_set_functions == NULL) {
@ -595,129 +518,6 @@ void set_result_set_function(const PolicyLabelItem &func)
}
}
/*
* Do masking for given target list
* this function will parse each RTE of the list
* and then will check wether each node need to do mask.
*/
static bool handle_masking(List* targetList, ParseState *pstate,
const policy_set *policy_ids, List* rtable, Node* utilityNode)
{
if (targetList == NIL || policy_ids->empty()) {
return false;
}
ListCell* temp = NULL;
masking_result masking_result;
foreach(temp, targetList) {
TargetEntry *old_tle = (TargetEntry *) lfirst(temp);
/* Shuffle masking columns can only select directly with out other operations */
parser_target_entry(pstate, old_tle, policy_ids, &masking_result, rtable, true);
}
if (masking_result.size() > 0) {
if (strlen(t_thrd.security_policy_cxt.prepare_stmt_name) > 0) {
/* prepare statement was masked */
set_prepare_stmt_as_masked(t_thrd.security_policy_cxt.prepare_stmt_name,
&masking_result); /* save masking event for executing case */
} else if (utilityNode != NULL) {
switch (nodeTag(utilityNode)) {
case T_DeclareCursorStmt:
{
DeclareCursorStmt* stmt = (DeclareCursorStmt *)utilityNode;
/* save masking event for fetching case */
set_cursor_stmt_as_masked(stmt->portalname, &masking_result);
}
break;
default:
flush_masking_result(&masking_result); /* invoke masking event */
}
} else {
flush_masking_result(&masking_result); /* invoke masking event */
}
return true;
}
return false;
}
static void select_PostParseAnalyze(ParseState *pstate, Query *&query, const policy_set *policy_ids, bool audit_exist)
{
Assert(query != NULL);
List *targetList = NIL;
targetList = (query->targetList != NIL) ? query->targetList : pstate->p_target_list;
handle_masking(targetList, pstate, policy_ids, query->rtable, query->utilityStmt);
/* deal with function type label */
load_function_label(query, audit_exist);
}
static bool process_union_masking(Node *union_node,
ParseState *pstate, const Query *query, const policy_set *policy_ids, bool audit_exist)
{
if (union_node == NULL) {
return false;
}
switch (nodeTag(union_node)) {
/* For each union, we get its query recursively for masking until it doesn't have any union query */
case T_SetOperationStmt:
{
SetOperationStmt *stmt = (SetOperationStmt *)union_node;
if (stmt->op != SETOP_UNION) {
return false;
}
process_union_masking((Node *)(stmt->larg), pstate, query, policy_ids, audit_exist);
process_union_masking((Node *)(stmt->rarg), pstate, query, policy_ids, audit_exist);
}
break;
case T_RangeTblRef:
{
RangeTblRef *ref = (RangeTblRef *)union_node;
if (ref->rtindex <= 0 || ref->rtindex > list_length(query->rtable)) {
return false;
}
Query* mostQuery = rt_fetch(ref->rtindex, query->rtable)->subquery;
process_masking(pstate, mostQuery, policy_ids, audit_exist);
}
break;
default:
break;
}
return true;
}
/*
* Main entrance for masking
* Identify components in query tree that need to do masking.
* This function will find all parts which need masking of select query,
* mainly includes CTE / setOperation / normal select columns.
*/
static void process_masking(ParseState *pstate, Query *query, const policy_set *policy_ids, bool audit_exist)
{
if (query == NULL) {
return;
}
/* set-operation tree UNION query */
if (!process_union_masking(query->setOperations, pstate, query, policy_ids, audit_exist)) {
ListCell *lc = NULL;
/* For each Cte, we get its query recursively for masking, and then handle this query in normal way */
if (query->cteList != NIL) {
foreach(lc, query->cteList) {
CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
Query *cte_query = (Query *)cte->ctequery;
process_masking(pstate, cte_query, policy_ids, audit_exist);
}
}
/* find subquery and process each subquery node */
if (query->rtable != NULL) {
foreach(lc, query->rtable) {
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
Query *subquery = (Query *)rte->subquery;
process_masking(pstate, subquery, policy_ids, audit_exist);
}
}
select_PostParseAnalyze(pstate, query, policy_ids, audit_exist);
}
}
/*
* check exchange partition list contains masked table.
* For given AlterTableCmd list, check whether ordinary
@ -960,44 +760,6 @@ static void verify_drop_user(const char *rolename)
}
}
static void verify_drop_column(AlterTableStmt *stmt)
{
ListCell *lcmd = NULL;
foreach (lcmd, stmt->cmds) {
AlterTableCmd *cmd = (AlterTableCmd *)lfirst(lcmd);
switch (cmd->subtype) {
case AT_DropColumn: {
/* check by column */
PolicyLabelItem find_obj(stmt->relation->schemaname, stmt->relation->relname, cmd->name, O_COLUMN);
if (check_label_has_object(&find_obj, is_masking_has_object)) {
char buff[512] = {0};
int rc = snprintf_s(buff, sizeof(buff), sizeof(buff) - 1,
"Column: %s is part of some resource label, can not be renamed.", find_obj.m_column);
securec_check_ss(rc, "\0", "\0");
gs_audit_issue_syslog_message("PGAUDIT", buff, AUDIT_POLICY_EVENT, AUDIT_FAILED);
ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\"", buff)));
}
break;
}
case AT_AlterColumnType: {
PolicyLabelItem find_obj(stmt->relation->schemaname, stmt->relation->relname, cmd->name, O_COLUMN);
if (check_label_has_object(&find_obj, is_masking_has_object, true))
{
char buff[512] = {0};
int ret = snprintf_s(buff, sizeof(buff), sizeof(buff) - 1,
"Column: %s is part of some masking policy, can not be changed.", find_obj.m_column);
securec_check_ss(ret, "\0", "\0");
gs_audit_issue_syslog_message("PGAUDIT", buff, AUDIT_POLICY_EVENT, AUDIT_FAILED);
ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\"", buff)));
}
break;
}
default:
break;
}
}
}
/*
* Hook ProcessUtility to do session auditing for DDL and utility commands.
*/
@ -1077,13 +839,13 @@ static void gsaudit_ProcessUtility_hook(Node *parsetree, const char *queryString
if (parsetree != NULL) {
switch (nodeTag(parsetree)) {
case T_PlannedStmt: {
if (!check_audited_privilige(T_OPEN) && !SECURITY_CHECK_ACL_PRIV(T_OPEN)) {
if (!check_audited_privilige(T_CURSOR) && !SECURITY_CHECK_ACL_PRIV(T_CURSOR)) {
break;
}
char buff[POLICY_STR_BUFF_LEN] = {0};
PlannedStmt *stmt = (PlannedStmt *)parsetree;
get_open_cursor_info(stmt, buff, sizeof(buff));
internal_audit_str(&security_policy_ids, &audit_policy_ids, buff, T_OPEN, "OPEN", O_CURSOR);
internal_audit_str(&security_policy_ids, &audit_policy_ids, buff, T_CURSOR, "OPEN", O_CURSOR);
break;
}
case T_FetchStmt: {
@ -1097,12 +859,12 @@ static void gsaudit_ProcessUtility_hook(Node *parsetree, const char *queryString
gs_stl::gs_vector<PolicyLabelItem> cursor_objects;
if (portal && portal->queryDesc && portal->queryDesc->plannedstmt &&
portal->queryDesc->plannedstmt->rtable) {
get_cursor_tables(portal->queryDesc->plannedstmt->rtable, buff, sizeof(buff),
printed_size, &cursor_objects);
get_cursor_tables(portal->queryDesc->plannedstmt->rtable, buff, sizeof(buff), printed_size,
&cursor_objects);
}
for (const PolicyLabelItem item : cursor_objects) {
internal_audit_object_str(&security_policy_ids, &audit_policy_ids, &item, T_FETCH,
"FETCH", stmt->portalname);
internal_audit_object_str(&security_policy_ids, &audit_policy_ids, &item, T_FETCH, "FETCH",
stmt->portalname);
}
flush_cursor_stmt_masking_result(stmt->portalname); /* invoke masking event in this case */
}
@ -1186,14 +948,8 @@ static void gsaudit_ProcessUtility_hook(Node *parsetree, const char *queryString
case T_DeallocateStmt: {
DeallocateStmt *stmt = (DeallocateStmt *)parsetree;
char tmp[POLICY_TMP_BUFF_LEN] = {0};
int rc;
if (stmt->name == NULL) {
rc = snprintf_s(tmp, sizeof(tmp), sizeof(tmp) - 1, "ALL");
securec_check_ss(rc, "\0", "\0");
} else {
rc = snprintf_s(tmp, sizeof(tmp), sizeof(tmp) - 1, "%s", stmt->name);
securec_check_ss(rc, "\0", "\0");
}
int rc = snprintf_s(tmp, sizeof(tmp), sizeof(tmp) - 1, "%s", stmt->name == NULL ? "ALL" : stmt->name);
securec_check_ss(rc, "\0", "\0");
check_access_table(&audit_policy_ids, tmp, CMD_DEALLOCATE, O_UNKNOWN, tmp);
unprepare_stmt_as_masked(tmp);
break;
@ -1325,7 +1081,7 @@ static void gsaudit_ProcessUtility_hook(Node *parsetree, const char *queryString
names_pair(granted_name,
rte1->rolname ? rte1->rolname : "ALL" /* grantee_name */),
stmt->is_grant ? T_GRANT : T_REVOKE, stmt->is_grant ? "GRANT" : "REVOKE",
stmt->objtype);
stmt->objtype, stmt->targtype);
}
}
}
@ -1348,7 +1104,29 @@ static void gsaudit_ProcessUtility_hook(Node *parsetree, const char *queryString
internal_audit_object_str(&security_policy_ids, &audit_policy_ids, NULL,
names_pair(rte2->rolname /* granted_name */, rte1->rolname /* grantee_name */),
grantrolestmt->is_grant ? T_GRANT : T_REVOKE, grantrolestmt->is_grant ? "GRANT" : "REVOKE",
O_ROLE, true, true);
O_ROLE, ACL_TARGET_OBJECT, true, true);
}
}
break;
}
case T_GrantDbStmt: {
if (!check_audited_privilige(T_GRANT) && !check_audited_privilige(T_REVOKE) &&
!SECURITY_CHECK_ACL_PRIV(T_GRANT) && !SECURITY_CHECK_ACL_PRIV(T_REVOKE)) {
break;
}
GrantDbStmt *grantdbstmt = (GrantDbStmt *)(parsetree);
ListCell *lc1 = NULL;
ListCell *lc2 = NULL;
if (grantdbstmt && grantdbstmt->grantees && grantdbstmt->privileges) {
forboth(lc1, grantdbstmt->grantees, lc2, grantdbstmt->privileges)
{
PrivGrantee *rte1 = (PrivGrantee *)lfirst(lc1);
DbPriv *rte2 = (DbPriv*)lfirst(lc2);
internal_audit_object_str(&security_policy_ids, &audit_policy_ids, NULL,
names_pair(rte2->db_priv_name, rte1->rolname /* grantee_name */),
grantdbstmt->is_grant ? T_GRANT : T_REVOKE, grantdbstmt->is_grant ? "GRANT" : "REVOKE",
O_UNKNOWN, ACL_TARGET_OBJECT, true, false);
}
}
break;
@ -1718,11 +1496,9 @@ static void gsaudit_ProcessUtility_hook(Node *parsetree, const char *queryString
break;
}
CreateTableAsStmt *createtablestmt = (CreateTableAsStmt *)(parsetree);
if (createtablestmt != NULL) {
IntoClause *intoclause = createtablestmt->into;
if (intoclause != NULL)
audit_table(&security_policy_ids, &audit_policy_ids, intoclause->rel, T_CREATE,
"CREATE", O_TABLE);
if (createtablestmt != NULL && createtablestmt->into != NULL) {
audit_table(&security_policy_ids, &audit_policy_ids, createtablestmt->into->rel, T_CREATE, "CREATE",
O_TABLE);
}
break;
}
@ -1913,19 +1689,6 @@ static const char *ACL_get_object_name(int targetype, int objtype, ListCell *obj
return NULL;
}
CmdType get_rte_commandtype(RangeTblEntry *rte)
{
if (rte->selectedCols) {
return CMD_SELECT;
} else if (rte->insertedCols) {
return CMD_INSERT;
} else if (rte->updatedCols) {
return CMD_UPDATE;
} else {
return CMD_UNKNOWN;
}
}
static void gs_audit_executor_start_hook(QueryDesc *queryDesc, int eflags)
{
/* verify parameter and audit policy */
@ -2090,17 +1853,9 @@ void _PG_init(void)
/*
* Uninstall hooks and release local memory context
* NOTE: Now the uninstall hooks process is disabled referring funciton internal_unload_library
* we just put the release function pointers here to adapt the uninstall process in the feature.
* we just put the release function here to adapt the uninstall process in the feature.
*/
void _PG_fini(void)
{
user_login_hook = NULL;
ExecutorStart_hook = next_ExecutorStart_hook;
ProcessUtility_hook = next_ProcessUtility_hook;
post_parse_analyze_hook = next_post_parse_analyze_hook;
copy_need_to_be_reparse = NULL;
light_unified_audit_executor_hook = NULL;
opfusion_unified_audit_executor_hook = NULL;
opfusion_unified_audit_flush_logs_hook = NULL;
ereport(LOG, (errmsg("Gsaudit extension finished")));
}

View File

@ -48,7 +48,6 @@ const char* GetUserName(char* user_name, size_t user_name_size);
bool get_ipaddress(gs_stl::gs_string& ipaddress);
extern void set_result_set_function(const PolicyLabelItem &func);
void get_name_range_var(const RangeVar *rangevar, gs_stl::gs_string *buffer, bool enforce = true);
CmdType get_rte_commandtype(RangeTblEntry *rte);
extern void load_database_policy_info();
bool is_audit_policy_exist_load_policy_info();

File diff suppressed because it is too large Load Diff

View File

@ -1,39 +1,53 @@
/*
* Copyright (c) 2020 Huawei Technologies Co.,Ltd.
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
* -------------------------------------------------------------------------
*
* masking.h
*
* IDENTIFICATION
* contrib/security_plugin/masking.h
*
* -------------------------------------------------------------------------
*/
#ifndef MASKING_H_
#define MASKING_H_
#include <string>
#include "parser/parse_node.h"
#include "nodes/primnodes.h"
#include "gs_mask_policy.h"
bool parser_target_entry(ParseState *pstate, TargetEntry*& old_tle, const policy_set *policy_ids,
masking_result *result, List* rtable, bool can_mask = true);
void reset_node_location();
/* col_type for integer should be int8, int4, int2, int1 */
Node* create_integer_node(ParseState *pstate, int value, int location, int col_type = INT4OID, bool make_cast = true);
#endif /* MASKING_H_ */
/*
* Copyright (c) 2020 Huawei Technologies Co.,Ltd.
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
* -------------------------------------------------------------------------
*
* masking.h
*
* IDENTIFICATION
* contrib/security_plugin/masking.h
*
* -------------------------------------------------------------------------
*/
#ifndef MASKING_H_
#define MASKING_H_
#include <string>
#include "parser/parse_node.h"
#include "nodes/primnodes.h"
#include "gs_mask_policy.h"
bool parser_target_entry(ParseState *pstate, TargetEntry*& old_tle, const policy_set *policy_ids,
masking_result *result, List* rtable, bool can_mask = true);
void reset_node_location();
/* col_type for integer should be int8, int4, int2, int1 */
Node* create_integer_node(ParseState *pstate, int value, int location, int col_type = INT4OID, bool make_cast = true);
void free_masked_cursor_stmts();
void free_masked_prepared_stmts();
void close_cursor_stmt_as_masked(const char* name);
void unprepare_stmt_as_masked(const char* name);
void set_prepare_stmt_as_masked(const char* name, const masking_result *result);
void set_cursor_stmt_as_masked(const char* name, const masking_result *result);
void flush_cursor_stmt_masking_result(const char* name);
void flush_prepare_stmt_masking_result(const char* name);
bool process_union_masking(Node *union_node,
ParseState *pstate, const Query *query, const policy_set *policy_ids, bool audit_exist);
void process_masking(ParseState *pstate, Query *query, const policy_set *policy_ids, bool audit_exist);
void select_PostParseAnalyze(ParseState *pstate, Query *&query, const policy_set *policy_ids, bool audit_exist);
bool handle_masking(List* targetList, ParseState *pstate,
const policy_set *policy_ids, List* rtable, Node* utilityNode);
#endif /* MASKING_H_ */

View File

@ -49,6 +49,25 @@
#define ACCESS_CONTROL_CHECK_ACL_PRIVILIGE(type) \
((check_acl_privilige_hook == NULL) ? true : check_acl_privilige_hook(type))
typedef struct AclObjectType {
GrantObjectType grant_type;
PrivObject privi_type;
} AclObjectType;
static AclObjectType aclobject_infos[] = {
{ACL_OBJECT_COLUMN, O_COLUMN},
{ACL_OBJECT_RELATION, O_TABLE},
{ACL_OBJECT_SEQUENCE, O_SEQUENCE},
{ACL_OBJECT_DATABASE, O_DATABASE},
{ACL_OBJECT_DOMAIN, O_DOMAIN},
{ACL_OBJECT_FOREIGN_SERVER, O_SERVER},
{ACL_OBJECT_FUNCTION, O_FUNCTION},
{ACL_OBJECT_LANGUAGE, O_LANGUAGE},
{ACL_OBJECT_NAMESPACE, O_SCHEMA},
{ACL_OBJECT_TABLESPACE, O_TABLESPACE},
{ACL_OBJECT_DATA_SOURCE, O_DATA_SOURCE},
};
void add_current_path(int objtype, List *fqdn, gs_stl::gs_string *buffer);
/*
@ -61,14 +80,17 @@ void add_current_path(int objtype, List *fqdn, gs_stl::gs_string *buffer);
* ignore_db: whether ignore database
*/
void internal_audit_object_str(const policy_set *security_policy_ids, const policy_set *policy_ids, const ListCell *rel,
const names_pair names, int priv_type, const char *priv_name, int objtype, bool is_rolegrant, bool ignore_db)
const names_pair names, int priv_type, const char *priv_name, int objtype,
int target_type, bool is_rolegrant, bool ignore_db)
{
/*
* Note PolicyLabelItem just support table/function/view/column
* so that only "all" label will work for other object type
*/
PolicyLabelItem item;
gen_policy_labelitem(item, rel, objtype);
if (target_type == ACL_TARGET_OBJECT) {
gen_policy_labelitem(item, rel, objtype);
}
/* PolicyLabelItem construction will append schema oid by relid */
policy_simple_set policy_result;
@ -213,96 +235,15 @@ bool internal_audit_object_str(const policy_set* security_policy_ids, const poli
return is_found;
}
/* append audit logs for comment */
void audit_object(const policy_set *security_policy_ids, const policy_set *policy_ids, const char *relname,
int priv_type, const char *priv_name, int objtype)
{
switch (objtype) {
case OBJECT_ROLE:
internal_audit_str(security_policy_ids, policy_ids, relname, priv_type, priv_name, O_ROLE);
break;
case OBJECT_USER:
internal_audit_str(security_policy_ids, policy_ids, relname, priv_type, priv_name, O_USER);
break;
case OBJECT_SCHEMA:
internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_SCHEMA);
break;
case OBJECT_SEQUENCE:
internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_SEQUENCE);
break;
case OBJECT_DATABASE:
internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_DATABASE);
break;
case OBJECT_FOREIGN_SERVER:
internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_SERVER);
break;
case OBJECT_FOREIGN_TABLE:
case OBJECT_STREAM:
case OBJECT_TABLE:
internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name,
(objtype == OBJECT_TABLE) ? O_TABLE : O_FOREIGNTABLE);
break;
case OBJECT_COLUMN:
internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_COLUMN);
break;
case OBJECT_FUNCTION:
internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_FUNCTION);
break;
case OBJECT_CONTQUERY:
case OBJECT_VIEW:
internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_VIEW);
break;
case OBJECT_INDEX:
internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_INDEX);
break;
case OBJECT_TABLESPACE:
internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_TABLESPACE);
break;
default:
break;
}
}
void acl_audit_object(const policy_set *security_policy_ids, const policy_set *policy_ids, const ListCell *rel,
const names_pair names, int priv_type, const char *priv_name, int objtype)
const names_pair names, int priv_type, const char *priv_name, int objtype, int target_type)
{
switch (objtype) {
case ACL_OBJECT_COLUMN:
internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_COLUMN);
break;
case ACL_OBJECT_RELATION:
internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_TABLE);
break;
case ACL_OBJECT_SEQUENCE:
internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_SEQUENCE);
break;
case ACL_OBJECT_DATABASE:
internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_DATABASE,
false, true);
break;
case ACL_OBJECT_DOMAIN:
internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_DOMAIN);
break;
case ACL_OBJECT_FOREIGN_SERVER:
internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_SERVER);
break;
case ACL_OBJECT_FUNCTION:
internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_FUNCTION);
break;
case ACL_OBJECT_LANGUAGE:
internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_LANGUAGE);
break;
case ACL_OBJECT_NAMESPACE:
internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_SCHEMA);
break;
case ACL_OBJECT_TABLESPACE:
internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_TABLESPACE);
break;
case ACL_OBJECT_DATA_SOURCE:
internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_DATA_SOURCE);
break;
default:
break;
PrivObject type = get_privtype_from_aclobject((GrantObjectType)objtype);
if (type == O_DATABASE) {
internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, type, target_type,
false, true);
} else {
internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, type, target_type);
}
}
@ -828,37 +769,21 @@ void get_open_cursor_info(PlannedStmt *stmt, char *buff, size_t buff_size)
printed_size = snprintf_s(buff, buff_size, buff_size - 1, "%s ", cstmt->portalname);
securec_check_ss(printed_size, "\0", "\0");
}
switch (stmt->commandType) {
case CMD_SELECT: {
rc = snprintf_s(buff + printed_size, buff_size - printed_size, buff_size - printed_size - 1,
"FOR SELECT FROM");
securec_check_ss(rc, "\0", "\0");
printed_size += rc;
break;
}
case CMD_INSERT: {
rc = snprintf_s(buff + printed_size, buff_size - printed_size, buff_size - printed_size - 1,
"FOR INSERT TO");
securec_check_ss(rc, "\0", "\0");
printed_size += rc;
break;
}
case CMD_UPDATE: {
rc = snprintf_s(buff + printed_size, buff_size - printed_size, buff_size - printed_size - 1,
"FOR UPDATE FROM");
securec_check_ss(rc, "\0", "\0");
printed_size += rc;
break;
}
case CMD_DELETE: {
rc = snprintf_s(buff + printed_size, buff_size - printed_size, buff_size - printed_size - 1,
"FOR DELETE FROM");
securec_check_ss(rc, "\0", "\0");
printed_size += rc;
break;
}
default:
break;
}
rc = snprintf_s(buff + printed_size, buff_size - printed_size, buff_size - printed_size - 1,
get_cursorinfo(stmt->commandType));
securec_check_ss(rc, "\0", "\0");
printed_size += rc;
get_cursor_tables(stmt->rtable, buff, buff_size, printed_size);
}
PrivObject get_privtype_from_aclobject(GrantObjectType acl_type)
{
for (unsigned int i = 0; i < (sizeof(aclobject_infos) / sizeof(aclobject_infos[0])); ++i) {
if (aclobject_infos[i].grant_type == acl_type) {
return aclobject_infos[i].privi_type;
}
}
return O_UNKNOWN;
}

View File

@ -26,13 +26,14 @@
#include "nodes/primnodes.h"
#include "nodes/parsenodes.h"
#include "gs_policy/gs_vector.h"
#include "gs_policy_object_types.h"
#define SET_DB_SCHEMA_TABLE buffer->append(schemaname); \
buffer->push_back('.');
typedef std::pair<gs_stl::gs_string, gs_stl::gs_string> names_pair;
void acl_audit_object(const policy_set *security_policy_ids, const policy_set *policy_ids, const ListCell *rel,
const names_pair names, int priv_type, const char *priv_name, int objtype);
const names_pair names, int priv_type, const char *priv_name, int objtype, int target_type);
bool internal_audit_object_str(const policy_set* security_policy_ids, const policy_set* policy_ids,
const PolicyLabelItem* item, int priv_type, const char* priv_name, const char* objname = "",
bool ignore_db = false);
@ -41,9 +42,7 @@ void internal_audit_str(const policy_set *security_policy_ids, const policy_set
void login_object(const policy_set *security_policy_ids, const policy_set *policy_ids, const char *login_str,
int priv_type, const char *priv_name);
void internal_audit_object_str(const policy_set *security_policy_ids, const policy_set *policy_ids, const ListCell *rel,
const names_pair names, int priv_type, const char *priv_name, int objtype, bool is_rolegrant = false, bool ignore_db = false);
void audit_object(const policy_set *security_policy_ids, const policy_set *policy_ids,
const char *relname, int priv_type, const char *priv_name, int objtype);
const names_pair names, int priv_type, const char *priv_name, int objtype, int target_type = ACL_TARGET_OBJECT, bool is_rolegrant = false, bool ignore_db = false);
void audit_table(const policy_set *security_policy_ids, const policy_set *policy_ids,
RangeVar *rel, int priv_type, const char *priv_name, int objtype);
void alter_table(const policy_set *security_policy_ids, const policy_set *policy_ids,
@ -60,5 +59,6 @@ void destroy_logs();
void get_cursor_tables(List *rtable, char *buff, size_t buff_size, int _printed_size,
gs_stl::gs_vector<PolicyLabelItem> *cursor_objects = nullptr);
void get_open_cursor_info(PlannedStmt *stmt, char *buff, size_t buff_size);
PrivObject get_privtype_from_aclobject(GrantObjectType acl_type);
#endif /* PRIVILEGES_AUDIT_H_ */

View File

@ -3,8 +3,8 @@ create or replace function pg_catalog.creditcardmasking(col text,letter char def
declare
size INTEGER := 4;
begin
return CASE WHEN length(col) >= size THEN
REGEXP_REPLACE(left(col, size*(-1)), '[\d+]', letter, 'g') || right(col, size)
return CASE WHEN pg_catalog.length(col) >= size THEN
pg_catalog.REGEXP_REPLACE(pg_catalog.left(col, size*(-1)), '[\d+]', letter, 'g') || pg_catalog.right(col, size)
ELSE
col
end;
@ -16,7 +16,7 @@ declare
pos INTEGER := position('@' in col);
begin
return CASE WHEN pos > 1 THEN
repeat(letter, pos - 1) || substring(col, pos, length(col) - pos +1)
pg_catalog.repeat(letter, pos - 1) || pg_catalog.substring(col, pos, pg_catalog.length(col) - pos +1)
ELSE
col
end;
@ -26,10 +26,10 @@ $$ LANGUAGE plpgsql;
create or replace function pg_catalog.fullemailmasking(col text, letter char default 'x') RETURNS text AS $$
declare
pos INTEGER := position('@' in col);
dot_pos INTEGER := length(col) - position('.' in reverse(col)) + 1;
dot_pos INTEGER := pg_catalog.length(col) - position('.' in pg_catalog.reverse(col)) + 1;
begin
return CASE WHEN pos > 2 and dot_pos > pos THEN
repeat(letter, pos - 1) || '@' || repeat(letter, dot_pos - pos - 1) || substring(col, dot_pos, length(col) - dot_pos +1)
pg_catalog.repeat(letter, pos - 1) || '@' || pg_catalog.repeat(letter, dot_pos - pos - 1) || pg_catalog.substring(col, dot_pos, pg_catalog.length(col) - dot_pos +1)
ELSE
col
end;
@ -38,7 +38,7 @@ $$ LANGUAGE plpgsql;
create or replace function pg_catalog.alldigitsmasking(col text, letter char default '0') RETURNS text AS $$
begin
return REGEXP_REPLACE(col, '[\d+]', letter, 'g');
return pg_catalog.REGEXP_REPLACE(col, '[\d+]', letter, 'g');
end;
$$ LANGUAGE plpgsql;
@ -46,14 +46,14 @@ create or replace function pg_catalog.shufflemasking(col text) RETURNS text AS $
declare
index INTEGER := 0;
rd INTEGER;
size INTEGER := length(col);
size INTEGER := pg_catalog.length(col);
tmp text := col;
res text;
begin
while size > 0 loop
rd := floor(random() * length(tmp) + 1);
res := res || right(left(tmp, rd), 1);
tmp := left(tmp, rd - 1) || right(tmp, length(tmp) - rd);
rd := pg_catalog.floor(pg_catalog.random() * pg_catalog.length(tmp) + 1);
res := res || pg_catalog.right(pg_catalog.left(tmp, rd), 1);
tmp := pg_catalog.left(tmp, rd - 1) || pg_catalog.right(tmp, pg_catalog.length(tmp) - rd);
size := size - 1;
END loop;
return res;
@ -62,13 +62,13 @@ $$ LANGUAGE plpgsql;
create or replace function pg_catalog.randommasking(col text) RETURNS text AS $$
begin
return left(MD5(random()::text), length(col));
return pg_catalog.left(pg_catalog.MD5(pg_catalog.random()::text), pg_catalog.length(col));
end;
$$ LANGUAGE plpgsql;
create or replace function pg_catalog.regexpmasking(col text, reg text, replace_text text, pos INTEGER default 0, reg_len INTEGER default -1) RETURNS text AS $$
declare
size INTEGER := length(col);
size INTEGER := pg_catalog.length(col);
endpos INTEGER;
startpos INTEGER;
lstr text;
@ -81,9 +81,9 @@ begin
endpos := reg_len + startpos - 1;
IF reg_len < 0 THEN endpos := size - 1; END IF;
IF reg_len + startpos >= size THEN endpos := size - 1; END IF;
lstr := left(col, startpos);
rstr := right(col, size - endpos - 1);
ltarget := substring(col, startpos+1, endpos - startpos + 1);
lstr := pg_catalog.left(col, startpos);
rstr := pg_catalog.right(col, size - endpos - 1);
ltarget := pg_catalog.substring(col, startpos+1, endpos - startpos + 1);
ltarget := pg_catalog.REGEXP_REPLACE(ltarget, reg, replace_text, 'g');
return lstr || ltarget || rstr;
end;

View File

@ -346,7 +346,7 @@ void sepgsql_relation_drop(Oid relOid)
attrList = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(relOid));
for (i = 0; i < attrList->n_members; i++) {
atttup = &attrList->members[i]->tuple;
atttup = t_thrd.lsc_cxt.FetchTupleFromCatCList(attrList, i);
attForm = (Form_pg_attribute)GETSTRUCT(atttup);
if (attForm->attisdropped)
@ -360,7 +360,7 @@ void sepgsql_relation_drop(Oid relOid)
sepgsql_avc_check_perms(&object, SEPG_CLASS_DB_COLUMN, SEPG_DB_COLUMN__DROP, audit_name, true);
pfree(audit_name);
}
ReleaseCatCacheList(attrList);
ReleaseSysCacheList(attrList);
}
}

View File

@ -0,0 +1,11 @@
#This is the main CMAKE for build all components.
AUX_SOURCE_DIRECTORY(${PROJECT_OPENGS_DIR}/contrib/sql_decoding TGT_sql_decoding_SRC)
set(sql_decoding_DEF_OPTIONS -D_GLIBCXX_USE_CXX11_ABI=0 -DSTREAMPLAN -DPGXC -DENABLE_GSTRACE -D_GNU_SOURCE)
set(sql_decoding_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${LIB_SECURE_OPTIONS} ${CHECK_OPTIONS} -fstack-protector-all)
list(REMOVE_ITEM sql_decoding_COMPILE_OPTIONS -fstack-protector)
set(sql_decoding_LINK_OPTIONS ${LIB_LINK_OPTIONS})
add_shared_libtarget(sql_decoding TGT_sql_decoding_SRC "" "${sql_decoding_DEF_OPTIONS}" "${sql_decoding_COMPILE_OPTIONS}" "${sql_decoding_LINK_OPTIONS}")
set_target_properties(sql_decoding PROPERTIES PREFIX "")
install(TARGETS sql_decoding LIBRARY DESTINATION lib/postgresql)

View File

@ -0,0 +1,2 @@
wal_level = logical
max_replication_slots = 8

View File

@ -0,0 +1,5 @@
# roach_api extension
comment = 'sql_decoding wrapper'
default_version = '1.0'
module_pathname = '$libdir/sql_decoding'
relocatable = true

View File

@ -0,0 +1,516 @@
/*
* Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd.
* Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
* ---------------------------------------------------------------------------------------
*
* sql_decoding.cpp
* logical decoding output plugin (sql)
*
*
*
* IDENTIFICATION
* contrib/sql_decoding/sql_decoding.cpp
*
* ---------------------------------------------------------------------------------------
*/
#include "postgres.h"
#include "knl/knl_variable.h"
#include "access/sysattr.h"
#include "access/ustore/knl_utuple.h"
#include "catalog/pg_class.h"
#include "catalog/pg_type.h"
#include "nodes/parsenodes.h"
#include "replication/logical.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/relcache.h"
#include "utils/syscache.h"
#include "utils/typcache.h"
#include "replication/output_plugin.h"
#include "replication/logical.h"
PG_MODULE_MAGIC;
/* These must be available to pg_dlsym() */
extern "C" void _PG_init(void);
extern "C" void _PG_output_plugin_init(OutputPluginCallbacks* cb);
static void pg_decode_startup(LogicalDecodingContext* ctx, OutputPluginOptions* opt, bool is_init);
static void pg_decode_shutdown(LogicalDecodingContext* ctx);
static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn);
static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn);
static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn);
static void pg_decode_change(
LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation rel, ReorderBufferChange* change);
static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id);
typedef struct {
MemoryContext context;
bool include_xids;
bool include_timestamp;
bool skip_empty_xacts;
bool xact_wrote_changes;
bool only_local;
} TestDecodingData;
/* specify output plugin callbacks */
void _PG_output_plugin_init(OutputPluginCallbacks* cb)
{
AssertVariableIsOfType(&_PG_output_plugin_init, LogicalOutputPluginInit);
cb->startup_cb = pg_decode_startup;
cb->begin_cb = pg_decode_begin_txn;
cb->change_cb = pg_decode_change;
cb->commit_cb = pg_decode_commit_txn;
cb->abort_cb = pg_decode_abort_txn;
cb->filter_by_origin_cb = pg_decode_filter;
cb->shutdown_cb = pg_decode_shutdown;
}
void _PG_init(void)
{
/* other plugins can perform things here */
}
/* initialize this plugin */
static void pg_decode_startup(LogicalDecodingContext* ctx, OutputPluginOptions* opt, bool is_init = true)
{
ListCell* option = NULL;
TestDecodingData *data = (TestDecodingData*)palloc0(sizeof(TestDecodingData));
data->context = AllocSetContextCreate(ctx->context,
"text conversion context", ALLOCSET_DEFAULT_SIZES);
data->include_xids = true;
data->include_timestamp = false;
data->skip_empty_xacts = false;
data->only_local = true;
ctx->output_plugin_private = data;
opt->output_type = OUTPUT_PLUGIN_TEXTUAL_OUTPUT;
foreach (option, ctx->output_plugin_options) {
DefElem* elem = (DefElem*)lfirst(option);
Assert(elem->arg == NULL || IsA(elem->arg, String));
if (strcmp(elem->defname, "include-xids") == 0) {
/* if option does not provide a value, it means its value is true */
if (elem->arg == NULL) {
data->include_xids = true;
} else if (!parse_bool(strVal(elem->arg), &data->include_xids)) {
ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname),
errdetail("N/A"), errcause("Wrong input value"), erraction("Input \"on\" or \"off\"")));
}
} else if (strcmp(elem->defname, "include-timestamp") == 0) {
if (elem->arg == NULL) {
data->include_timestamp = true;
} else if (!parse_bool(strVal(elem->arg), &data->include_timestamp)) {
ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname),
errdetail("N/A"), errcause("Wrong input value"), erraction("Input \"on\" or \"off\"")));
}
} else if (strcmp(elem->defname, "skip-empty-xacts") == 0) {
if (elem->arg == NULL) {
data->skip_empty_xacts = true;
} else if (!parse_bool(strVal(elem->arg), &data->skip_empty_xacts)) {
ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname),
errdetail("N/A"), errcause("Wrong input value"), erraction("Input \"on\" or \"off\"")));
}
} else if (strcmp(elem->defname, "only-local") == 0) {
if (elem->arg == NULL) {
data->only_local = true;
} else if (!parse_bool(strVal(elem->arg), &data->only_local)) {
ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname),
errdetail("N/A"), errcause("Wrong input value"), erraction("Input \"on\" or \"off\"")));
}
} else {
ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("option \"%s\" = \"%s\" is unknown", elem->defname, elem->arg ? strVal(elem->arg) : "(null)"),
errdetail("N/A"), errcause("Wrong input option"),
erraction("Check the product documentation for legal options")));
}
}
}
/* cleanup this plugin's resources */
static void pg_decode_shutdown(LogicalDecodingContext* ctx)
{
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
/* cleanup our own resources via memory context reset */
MemoryContextDelete(data->context);
}
/*
* Prepare output plugin.
*/
void pg_output_begin(LogicalDecodingContext* ctx, TestDecodingData* data, ReorderBufferTXN* txn, bool last_write)
{
OutputPluginPrepareWrite(ctx, last_write);
appendStringInfo(ctx->out, "BEGIN %lu", txn->csn);
OutputPluginWrite(ctx, last_write);
}
/* BEGIN callback */
static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn)
{
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
data->xact_wrote_changes = false;
if (data->skip_empty_xacts) {
return;
}
pg_output_begin(ctx, data, txn, true);
}
/* COMMIT callback */
static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn)
{
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
if (data->skip_empty_xacts && !data->xact_wrote_changes) {
return;
}
OutputPluginPrepareWrite(ctx, true);
appendStringInfoString(ctx->out, "COMMIT");
appendStringInfo(ctx->out, " (at %s)", timestamptz_to_str(txn->commit_time));
appendStringInfo(ctx->out, " %lu", txn->csn);
OutputPluginWrite(ctx, true);
}
/* ABORT callback */
static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn)
{
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
if (data->skip_empty_xacts && !data->xact_wrote_changes) {
return;
}
OutputPluginPrepareWrite(ctx, true);
if (data->include_xids) {
appendStringInfo(ctx->out, "ABORT %lu", txn->xid);
} else {
appendStringInfoString(ctx->out, "ABORT");
}
if (data->include_timestamp) {
appendStringInfo(ctx->out, " (at %s)", timestamptz_to_str(txn->commit_time));
}
OutputPluginWrite(ctx, true);
}
static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id)
{
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
if (data->only_local && origin_id != InvalidRepOriginId) {
return true;
}
return false;
}
/*
* Print literal `outputstr' already represented as string of type `typid'
* into stringbuf `s'.
*
* Some builtin types aren't quoted, the rest is quoted. Escaping is done as
* if u_sess->parser_cxt.standard_conforming_strings were enabled.
*/
static void print_literal(StringInfo s, Oid typid, char* outputstr)
{
const char* valptr = NULL;
switch (typid) {
case FLOAT4OID:
case FLOAT8OID:
case NUMERICOID:
case INT1OID:
case INT2OID:
case INT4OID:
case INT8OID:
case OIDOID:
/* NB: We don't care about Inf, NaN et al. */
appendStringInfoString(s, outputstr);
break;
case BITOID:
case VARBITOID:
appendStringInfo(s, "B'%s'", outputstr);
break;
case BOOLOID:
if (strcmp(outputstr, "t") == 0) {
appendStringInfoString(s, "true");
} else {
appendStringInfoString(s, "false");
}
break;
default:
appendStringInfoChar(s, '\'');
for (valptr = outputstr; *valptr; valptr++) {
char ch = *valptr;
if (SQL_STR_DOUBLE(ch, false)) {
appendStringInfoChar(s, ch);
}
appendStringInfoChar(s, ch);
}
appendStringInfoChar(s, '\'');
break;
}
}
/*
* Decode tuple into stringinfo.
*/
static void TupleToStringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls)
{
Assert(tuple != NULL);
if ((tuple->tupTableType == HEAP_TUPLE) && (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data) ||
(int)HeapTupleHeaderGetNatts(tuple->t_data, tupdesc) > tupdesc->natts)) {
return;
}
appendStringInfoChar(s, '(');
/* print all columns individually */
for (int natt = 0; natt < tupdesc->natts; natt++) {
bool isnull = false; /* column is null? */
bool typisvarlena = false;
Oid typoutput = 0; /* output function */
Datum origval = 0; /* possibly toasted Datum */
Form_pg_attribute attr = tupdesc->attrs[natt]; /* the attribute itself */
if (attr->attisdropped || attr->attnum < 0) {
continue;
}
/* get Datum from tuple */
if (tuple->tupTableType == HEAP_TUPLE) {
origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull);
} else {
origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull);
}
if (skip_nulls && isnull) {
continue;
}
/* query output function */
Oid typid = attr->atttypid; /* type of current attribute */
getTypeOutputInfo(typid, &typoutput, &typisvarlena);
/* print data */
if (isnull) {
appendStringInfoString(s, "null");
} else if (!typisvarlena) {
print_literal(s, typid, OidOutputFunctionCall(typoutput, origval));
} else {
Datum val = PointerGetDatum(PG_DETOAST_DATUM(origval));
print_literal(s, typid, OidOutputFunctionCall(typoutput, val));
}
if (natt < tupdesc->natts - 1) {
appendStringInfoString(s, ", ");
}
}
appendStringInfoChar(s, ')');
}
/*
* Decode tuple into stringinfo.
* This function is used for UPDATE or DELETE statements.
*/
static void TupleToStringinfoUpd(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls)
{
if ((tuple->tupTableType == HEAP_TUPLE) && (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data) ||
(int)HeapTupleHeaderGetNatts(tuple->t_data, tupdesc) > tupdesc->natts)) {
return;
}
bool isFirstAtt = true;
/* print all columns individually */
for (int natt = 0; natt < tupdesc->natts; natt++) {
Oid typoutput = 0; /* output function */
Datum origval = 0; /* possibly toasted Datum */
bool isnull = false; /* column is null? */
bool typisvarlena = false;
Form_pg_attribute attr = tupdesc->attrs[natt]; /* the attribute itself */
if (attr->attisdropped || attr->attnum < 0) {
continue;
}
/* get Datum from tuple */
if (tuple->tupTableType == HEAP_TUPLE) {
origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull);
} else {
origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull);
}
if (isnull && skip_nulls) {
continue;
}
if (!isFirstAtt) {
appendStringInfoString(s, " and ");
} else {
isFirstAtt = false;
}
/* print attribute name */
appendStringInfoString(s, quote_identifier(NameStr(attr->attname)));
appendStringInfoString(s, " = ");
/* query output function */
Oid typid = attr->atttypid;
getTypeOutputInfo(typid, &typoutput, &typisvarlena);
/* print data */
if (isnull) {
appendStringInfoString(s, "null");
} else if (!typisvarlena) {
print_literal(s, typid, OidOutputFunctionCall(typoutput, origval));
} else {
Datum val = PointerGetDatum(PG_DETOAST_DATUM(origval));
print_literal(s, typid, OidOutputFunctionCall(typoutput, val));
}
}
}
/*
* Callback for handle decoded tuple.
* Additional info will be added if the tuple is found null.
*/
static void TupleHandler(StringInfo s, TupleDesc tupdesc, ReorderBufferChange* change, bool isHeap, bool isNewTuple)
{
if (isHeap && isNewTuple) {
if (change->data.tp.newtuple == NULL) {
appendStringInfoString(s, " (no-tuple-data)");
} else {
TupleToStringinfo(s, tupdesc, &change->data.tp.newtuple->tuple, false);
}
} else if (isHeap && !isNewTuple) {
if (change->data.tp.oldtuple == NULL) {
appendStringInfoString(s, " (no-tuple-data)");
} else {
TupleToStringinfoUpd(s, tupdesc, &change->data.tp.oldtuple->tuple, true);
}
} else if (!isHeap && isNewTuple) {
if (change->data.utp.newtuple == NULL) {
appendStringInfoString(s, " (no-tuple-data)");
} else {
TupleToStringinfo(s, tupdesc, (HeapTuple)(&change->data.utp.newtuple->tuple), false);
}
} else {
if (change->data.utp.oldtuple == NULL) {
appendStringInfoString(s, " (no-tuple-data)");
} else {
TupleToStringinfoUpd(s, tupdesc, (HeapTuple)(&change->data.utp.oldtuple->tuple), true);
}
}
}
/*
* Callback for individual changed tuples.
*/
static void pg_decode_change(
LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation relation, ReorderBufferChange* change)
{
Form_pg_class class_form = NULL;
TupleDesc tupdesc = NULL;
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
u_sess->attr.attr_common.extra_float_digits = 0;
bool isHeap = true;
/* output BEGIN if we haven't yet */
if (txn != NULL && data->skip_empty_xacts && !data->xact_wrote_changes) {
pg_output_begin(ctx, data, txn, false);
}
data->xact_wrote_changes = true;
class_form = RelationGetForm(relation);
tupdesc = RelationGetDescr(relation);
/* Avoid leaking memory by using and resetting our own context */
MemoryContext old = MemoryContextSwitchTo(data->context);
char *schema = NULL;
char *table = NULL;
schema = get_namespace_name(class_form->relnamespace);
table = NameStr(class_form->relname);
OutputPluginPrepareWrite(ctx, true);
switch (change->action) {
case REORDER_BUFFER_CHANGE_INSERT:
case REORDER_BUFFER_CHANGE_UINSERT:
appendStringInfoString(ctx->out, "insert into ");
appendStringInfoString(ctx->out, quote_qualified_identifier(schema, table));
if (change->action == REORDER_BUFFER_CHANGE_UINSERT) {
isHeap = false;
}
appendStringInfoString(ctx->out, " values ");
TupleHandler(ctx->out, tupdesc, change, isHeap, true);
break;
case REORDER_BUFFER_CHANGE_UPDATE:
case REORDER_BUFFER_CHANGE_UUPDATE:
appendStringInfoString(ctx->out, "delete from ");
appendStringInfoString(ctx->out, quote_qualified_identifier(schema, table));
if (change->action == REORDER_BUFFER_CHANGE_UUPDATE) {
isHeap = false;
}
appendStringInfoString(ctx->out, " where ");
TupleHandler(ctx->out, tupdesc, change, isHeap, false);
appendStringInfoChar(ctx->out, ';');
appendStringInfoString(ctx->out, "insert into ");
appendStringInfoString(ctx->out, quote_qualified_identifier(schema, table));
appendStringInfoString(ctx->out, " values ");
TupleHandler(ctx->out, tupdesc, change, isHeap, true);
break;
case REORDER_BUFFER_CHANGE_DELETE:
case REORDER_BUFFER_CHANGE_UDELETE:
appendStringInfoString(ctx->out, "delete from ");
appendStringInfoString(ctx->out, quote_qualified_identifier(schema, table));
if (change->action == REORDER_BUFFER_CHANGE_UDELETE) {
isHeap = false;
}
appendStringInfoString(ctx->out, " where ");
TupleHandler(ctx->out, tupdesc, change, isHeap, false);
break;
default:
Assert(false);
}
appendStringInfoChar(ctx->out, ';');
MemoryContextSwitchTo(old);
MemoryContextReset(data->context);
OutputPluginWrite(ctx, true);
}

View File

@ -36,21 +36,15 @@ PG_MODULE_MAGIC;
extern "C" void _PG_init(void);
extern "C" void _PG_output_plugin_init(OutputPluginCallbacks* cb);
typedef struct {
MemoryContext context;
bool include_xids;
bool include_timestamp;
bool skip_empty_xacts;
bool xact_wrote_changes;
bool only_local;
} TestDecodingData;
static void pg_decode_startup(LogicalDecodingContext* ctx, OutputPluginOptions* opt, bool is_init);
static void pg_decode_shutdown(LogicalDecodingContext* ctx);
static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn);
static void pg_output_begin(
LogicalDecodingContext* ctx, TestDecodingData* data, ReorderBufferTXN* txn, bool last_write);
LogicalDecodingContext* ctx, PluginTestDecodingData* data, ReorderBufferTXN* txn, bool last_write);
static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn);
static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn);
static void pg_decode_prepare_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn);
static void pg_decode_change(
LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation rel, ReorderBufferChange* change);
static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id);
@ -69,6 +63,8 @@ void _PG_output_plugin_init(OutputPluginCallbacks* cb)
cb->begin_cb = pg_decode_begin_txn;
cb->change_cb = pg_decode_change;
cb->commit_cb = pg_decode_commit_txn;
cb->abort_cb = pg_decode_abort_txn;
cb->prepare_cb = pg_decode_prepare_txn;
cb->filter_by_origin_cb = pg_decode_filter;
cb->shutdown_cb = pg_decode_shutdown;
}
@ -77,84 +73,33 @@ void _PG_output_plugin_init(OutputPluginCallbacks* cb)
static void pg_decode_startup(LogicalDecodingContext* ctx, OutputPluginOptions* opt, bool is_init)
{
ListCell* option = NULL;
TestDecodingData* data = NULL;
PluginTestDecodingData* data = NULL;
data = (TestDecodingData*)palloc0(sizeof(TestDecodingData));
data = (PluginTestDecodingData*)palloc0(sizeof(PluginTestDecodingData));
data->context = AllocSetContextCreate(ctx->context,
"text conversion context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
data->include_xids = true;
data->include_timestamp = false;
data->include_timestamp = true;
data->skip_empty_xacts = false;
data->only_local = true;
data->tableWhiteList = NIL;
ctx->output_plugin_private = data;
opt->output_type = OUTPUT_PLUGIN_TEXTUAL_OUTPUT;
foreach (option, ctx->output_plugin_options) {
DefElem* elem = (DefElem*)lfirst(option);
Assert(elem->arg == NULL || IsA(elem->arg, String));
if (strcmp(elem->defname, "include-xids") == 0) {
/* if option does not provide a value, it means its value is true */
if (elem->arg == NULL)
data->include_xids = true;
else if (!parse_bool(strVal(elem->arg), &data->include_xids))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname)));
} else if (strcmp(elem->defname, "include-timestamp") == 0) {
if (elem->arg == NULL)
data->include_timestamp = true;
else if (!parse_bool(strVal(elem->arg), &data->include_timestamp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname)));
} else if (strcmp(elem->defname, "force-binary") == 0) {
bool force_binary = false;
if (elem->arg == NULL)
continue;
else if (!parse_bool(strVal(elem->arg), &force_binary))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname)));
if (force_binary)
opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT;
} else if (strcmp(elem->defname, "skip-empty-xacts") == 0) {
if (elem->arg == NULL)
data->skip_empty_xacts = true;
else if (!parse_bool(strVal(elem->arg), &data->skip_empty_xacts))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname)));
} else if (strcmp(elem->defname, "only-local") == 0) {
if (elem->arg == NULL)
data->only_local = true;
else if (!parse_bool(strVal(elem->arg), &data->only_local))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname)));
} else {
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg(
"option \"%s\" = \"%s\" is unknown", elem->defname, elem->arg ? strVal(elem->arg) : "(null)")));
}
ParseDecodingOptionPlugin(option, data, opt);
}
}
/* cleanup this plugin's resources */
static void pg_decode_shutdown(LogicalDecodingContext* ctx)
{
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private;
/* cleanup our own resources via memory context reset */
MemoryContextDelete(data->context);
@ -163,16 +108,18 @@ static void pg_decode_shutdown(LogicalDecodingContext* ctx)
/* BEGIN callback */
static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn)
{
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private;
data->xact_wrote_changes = false;
if (data->skip_empty_xacts)
if (data->skip_empty_xacts) {
return;
}
pg_output_begin(ctx, data, txn, true);
}
static void pg_output_begin(LogicalDecodingContext* ctx, TestDecodingData* data, ReorderBufferTXN* txn, bool last_write)
static void pg_output_begin(LogicalDecodingContext* ctx, PluginTestDecodingData* data, ReorderBufferTXN* txn,
bool last_write)
{
OutputPluginPrepareWrite(ctx, last_write);
if (data->include_xids)
@ -185,7 +132,7 @@ static void pg_output_begin(LogicalDecodingContext* ctx, TestDecodingData* data,
/* COMMIT callback */
static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn)
{
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private;
if (data->skip_empty_xacts && !data->xact_wrote_changes)
return;
@ -203,65 +150,55 @@ static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN*
OutputPluginWrite(ctx, true);
}
/* ABORT callback */
static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn)
{
PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private;
if (data->skip_empty_xacts && !data->xact_wrote_changes)
return;
OutputPluginPrepareWrite(ctx, true);
if (data->include_xids)
appendStringInfo(ctx->out, "ABORT %lu", txn->xid);
else
appendStringInfoString(ctx->out, "ABORT");
if (data->include_timestamp)
appendStringInfo(ctx->out, " (at %s)", timestamptz_to_str(txn->commit_time));
OutputPluginWrite(ctx, true);
}
/* PREPARE callback */
static void pg_decode_prepare_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn)
{
PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private;
if (data->skip_empty_xacts && !data->xact_wrote_changes)
return;
OutputPluginPrepareWrite(ctx, true);
if (data->include_xids)
appendStringInfo(ctx->out, "PREPARE %lu", txn->xid);
else
appendStringInfoString(ctx->out, "PREPARE");
if (data->include_timestamp)
appendStringInfo(ctx->out, " (at %s)", timestamptz_to_str(txn->commit_time));
OutputPluginWrite(ctx, true);
}
static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id)
{
TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private;
PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private;
if (data->only_local && origin_id != InvalidRepOriginId)
return true;
return false;
}
/*
* Print literal `outputstr' already represented as string of type `typid'
* into stringbuf `s'.
*
* Some builtin types aren't quoted, the rest is quoted. Escaping is done as
* if u_sess->parser_cxt.standard_conforming_strings were enabled.
*/
static void print_literal(StringInfo s, Oid typid, char* outputstr)
{
const char* valptr = NULL;
switch (typid) {
case INT1OID:
case INT2OID:
case INT4OID:
case INT8OID:
case OIDOID:
case FLOAT4OID:
case FLOAT8OID:
case NUMERICOID:
/* NB: We don't care about Inf, NaN et al. */
appendStringInfoString(s, outputstr);
break;
case BITOID:
case VARBITOID:
appendStringInfo(s, "B'%s'", outputstr);
break;
case BOOLOID:
if (strcmp(outputstr, "t") == 0)
appendStringInfoString(s, "true");
else
appendStringInfoString(s, "false");
break;
default:
appendStringInfoChar(s, '\'');
for (valptr = outputstr; *valptr; valptr++) {
char ch = *valptr;
if (SQL_STR_DOUBLE(ch, false))
appendStringInfoChar(s, ch);
appendStringInfoChar(s, ch);
}
appendStringInfoChar(s, '\'');
break;
}
}
static void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls)
{
if (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data))
@ -337,11 +274,11 @@ static void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple
else if (typisvarlena && VARATT_IS_EXTERNAL_ONDISK_B(origval))
appendStringInfoString(s, "unchanged-toast-datum");
else if (!typisvarlena)
print_literal(s, typid, OidOutputFunctionCall(typoutput, origval));
PrintLiteral(s, typid, OidOutputFunctionCall(typoutput, origval));
else {
Datum val; /* definitely detoasted Datum */
val = PointerGetDatum(PG_DETOAST_DATUM(origval));
print_literal(s, typid, OidOutputFunctionCall(typoutput, val));
PrintLiteral(s, typid, OidOutputFunctionCall(typoutput, val));
}
}
}
@ -351,13 +288,12 @@ static void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple
static void pg_decode_change(
LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation relation, ReorderBufferChange* change)
{
TestDecodingData* data = NULL;
PluginTestDecodingData* data = NULL;
Form_pg_class class_form;
TupleDesc tupdesc;
MemoryContext old;
data = (TestDecodingData*)ctx->output_plugin_private;
u_sess->attr.attr_common.extra_float_digits = 0;
data = (PluginTestDecodingData*)ctx->output_plugin_private;
/* output BEGIN if we haven't yet */
if (data->skip_empty_xacts && !data->xact_wrote_changes) {
@ -371,12 +307,18 @@ static void pg_decode_change(
/* Avoid leaking memory by using and resetting our own context */
old = MemoryContextSwitchTo(data->context);
char *schema = get_namespace_name(class_form->relnamespace);
char *table = NameStr(class_form->relname);
if (data->tableWhiteList != NIL && !CheckWhiteList(data->tableWhiteList, schema, table)) {
(void)MemoryContextSwitchTo(old);
MemoryContextReset(data->context);
return;
}
OutputPluginPrepareWrite(ctx, true);
appendStringInfoString(ctx->out, "table ");
appendStringInfoString(ctx->out,
quote_qualified_identifier(
get_namespace_name(get_rel_namespace(RelationGetRelid(relation))), NameStr(class_form->relname)));
appendStringInfoString(ctx->out, quote_qualified_identifier(schema, table));
appendStringInfoString(ctx->out, ":");
switch (change->action) {

View File

@ -5,10 +5,10 @@
<repoType>Generic</repoType>
<id>
<offering>${offering}</offering>
<version>${BVersion}</version>
<snapshot>Y</snapshot>
<version>${version}</version>
<snapshot>${snapshot}</snapshot>
</id>
<isClear>Y</isClear>
<isClear>N</isClear>
<copies>
<copy>
<source></source>
@ -17,66 +17,61 @@
</copies>
</artifact>
<dependencies>
<dependency>
<dependency>
<versionType>BVersion</versionType>
<repoType>Generic</repoType>
<id>
<offering>Huawei Secure C</offering>
<version>Huawei Secure C V100R001C01SPC010B002</version>
</id>
<isClear>Y</isClear>
<copies>
<copy>
<source></source>
<dest></dest>
</copy>
</copies>
</dependency>
<dependency>
</dependency>
<dependency>
<versionType>BVersion</versionType>
<repoType>Generic</repoType>
<id>
<offering>DOPRA SSP</offering>
<version>DOPRA SSP V300R021C00SPC020B100</version>
<version>DOPRA SSP V300R021C10SPC010B100</version>
</id>
<isClear>Y</isClear>
<copies>
<copy>
<source></source>
<dest></dest>
<dest>dopra_ssp</dest>
</copy>
</copies>
</dependency>
<dependency>
</dependency>
<dependency>
<versionType>BVersion</versionType>
<repoType>Generic</repoType>
<id>
<offering>Cloud Compiler JDK</offering>
<version>Cloud Compiler JDK V100R003C30SPC300B001</version>
<offering>BiSheng JDK Enterprise</offering>
<version>BiSheng JDK Enterprise 2.1.0.320.B001</version>
</id>
<isClear>Y</isClear>
<copies>
<copy>
<source></source>
<dest></dest>
<dest>huaweijdk</dest>
</copy>
</copies>
</dependency>
<dependency>
</dependency>
<dependency>
<versionType>BVersion</versionType>
<repoType>Generic</repoType>
<id>
<offering>KMC</offering>
<version>KMC 21.0.0.B003</version>
<version>KMC 21.1.0.B006</version>
</id>
<isClear>Y</isClear>
<copies>
<copy>
<source></source>
<dest></dest>
<DEL>Y</DEL>
</copy>
</copies>
</dependency>
</dependency>
</dependencies>
</project>
</project>

4964
distribute_errmsg.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,16 @@
<refentry id="SQL-ALTER_GLOBAL_CONFIGURATION">
<refmeta>
<refentrytitle>ALTER GLOBAL CONFIGURATION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER GLOBAL CONFIGURATION</refname>
<refpurpose>add or fix records of gs_global_config</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER GLOBAL CONFIGURATION with(paraname=value, paraname=value...);
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -0,0 +1,16 @@
<refentry id="SQL-ALTER_PACKAGE">
<refmeta>
<refentrytitle>ALTER PACKAGE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER PACKAGE</refname>
<refpurpose>change the definition of a package</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER PACKAGE package_name OWNER TO new_owner;
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -22,6 +22,8 @@ ALTER [ LARGE ] SEQUENCE [ IF EXISTS ] <replaceable class="parameter">name</repl
[ MAXVALUE <replaceable class="parameter">maxvalue</replaceable> | NO MAXVALUE | NOMAXVALUE ]
[ OWNED BY { <replaceable class="parameter">table_name</replaceable>.<replaceable class="parameter">column_name</replaceable> | NONE } ];
ALTER [ LARGE ] SEQUENCE [ IF EXISTS ] <replaceable class="parameter">name</replaceable> OWNER TO <replaceable class="PARAMETER">new_owner</replaceable>;
NOTICE: '[ LARGE ]' is only avaliable in CENTRALIZED mode!
</synopsis>
</refsynopsisdiv>

View File

@ -1,61 +1,69 @@
<refentry id="SQL-ALTER_TABLE_PARTITION">
<refmeta>
<refentrytitle>ALTER TABLE PARTITION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER TABLE PARTITION</refname>
<refpurpose>change the definition of a partition</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
action [, ... ];
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
RENAME PARTITION { partion_name | FOR ( partition_value [, ...] ) } TO partition_new_name;
where action can be:
move_clause |
exchange_clause |
row_clause |
merge_clause |
modify_clause |
split_clause |
add_clause |
drop_clause
where move_clause can be:
MOVE PARTITION { partion_name | FOR ( partition_value [, ...] ) } TABLESPACE tablespacename
where exchange_clause can be:
EXCHANGE PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) }
WITH TABLE {[ ONLY ] ordinary_table_name | ordinary_table_name * | ONLY ( ordinary_table_name )}
[ { WITH | WITHOUT } VALIDATION ] [ VERBOSE ]
where row_clause can be:
{ ENABLE | DISABLE } ROW MOVEMENT
where merge_clause can be:
MERGE PARTITIONS { partition_name } [, ...] INTO PARTITION partition_name
[ TABLESPACE tablespacename ]
where modify_clause can be:
MODIFY PARTITION partition_name { UNUSABLE LOCAL INDEXES | REBUILD UNUSABLE LOCAL INDEXES }
where split_clause can be:
SPLIT PARTITION { partition_name | FOR ( partition_value [, ...] ) } { split_point_clause | no_split_point_clause }
where split_point_clause can be:
AT ( partition_value ) INTO ( PARTITION partition_name [ TABLESPACE tablespacename ] , PARTITION partition_name [ TABLESPACE tablespacename ] )
where no_split_point_clause can be:
INTO {(partition_less_than_item [, ...] ) | (partition_start_end_item [, ...] )}
where add_clause can be:
ADD {partition_less_than_item | partition_start_end_item}
where partition_less_than_item can be:
PARTITION partition_name VALUES LESS THAN ( { partition_value | MAXVALUE } [, ...] ) [ TABLESPACE tablespacename ]
where partition_start_end_item can be:
PARTITION partition_name {
{START(partition_value) END (partition_value) EVERY (interval_value)} |
{START(partition_value) END ({partition_value | MAXVALUE})} |
{START(partition_value)} |
{END({partition_value | MAXVALUE})}
} [TABLESPACE tablespace_name]
where drop_clause can be:
DROP PARTITION { partition_name | FOR ( partition_value [, ...] ) }
</synopsis>
</refsynopsisdiv>
<refentry id="SQL-ALTER_TABLE_PARTITION">
<refmeta>
<refentrytitle>ALTER TABLE PARTITION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER TABLE PARTITION</refname>
<refpurpose>change the definition of a partition</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
action [, ... ];
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
RENAME PARTITION { partion_name | FOR ( partition_value [, ...] ) } TO partition_new_name;
where action can be:
move_clause |
exchange_clause |
row_clause |
merge_clause |
modify_clause |
split_clause |
add_clause |
drop_clause |
truncate_clause
where move_clause can be:
MOVE PARTITION { partion_name | FOR ( partition_value [, ...] ) } TABLESPACE tablespacename
where exchange_clause can be:
EXCHANGE PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) }
WITH TABLE {[ ONLY ] ordinary_table_name | ordinary_table_name * | ONLY ( ordinary_table_name )}
[ { WITH | WITHOUT } VALIDATION ] [ VERBOSE ]
where row_clause can be:
{ ENABLE | DISABLE } ROW MOVEMENT
where merge_clause can be:
MERGE PARTITIONS { partition_name } [, ...] INTO PARTITION partition_name
[ TABLESPACE tablespacename ]
where modify_clause can be:
MODIFY PARTITION partition_name { UNUSABLE LOCAL INDEXES | REBUILD UNUSABLE LOCAL INDEXES }
where split_clause can be:
SPLIT PARTITION { partition_name | FOR ( partition_value [, ...] ) } { split_point_clause | no_split_point_clause }
where split_point_clause can be:
AT ( partition_value ) INTO ( PARTITION partition_name [ TABLESPACE tablespacename ] , PARTITION partition_name [ TABLESPACE tablespacename ] )
where no_split_point_clause can be:
INTO {(partition_less_than_item [, ...] ) | (partition_start_end_item [, ...] )}
where add_clause can be:
ADD PARTITION ( partition_col1_name = partition_col1_value [, partition_col2_name = partition_col2_value ] [, ...] )
[ LOCATION 'location1' ]
[ PARTITION (partition_colA_name = partition_colA_value [, partition_colB_name = partition_colB_value ] [, ...] ) ]
[ LOCATION 'location2' ]
ADD {partition_less_than_item | partition_start_end_item}
where partition_less_than_item can be:
PARTITION partition_name VALUES LESS THAN ( { partition_value | MAXVALUE } [, ...] ) [ TABLESPACE tablespacename ]
where partition_start_end_item can be:
PARTITION partition_name {
{START(partition_value) END (partition_value) EVERY (interval_value)} |
{START(partition_value) END ({partition_value | MAXVALUE})} |
{START(partition_value)} |
{END({partition_value | MAXVALUE})}
} [TABLESPACE tablespace_name]
where drop_clause can be:
DROP PARTITION { partition_name | FOR ( partition_value [, ...] ) }
where truncate_clause can be:
TRUNCATE PARTITION { partition_name | FOR ( partition_value [, ...] ) } [ UPDATE GLOBAL INDEX ]
NOTICE: 'truncate_clause' is only avaliable in CENTRALIZED mode!
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -0,0 +1,43 @@
<refentry id="SQL-ALTER_TABLE_SUBPARTITION">
<refmeta>
<refentrytitle>ALTER TABLE SUBPARTITION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>ALTER TABLE SUBPARTITION</refname>
<refpurpose>change the definition of a subpartition</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
action [, ... ];
where action can be:
add_clause |
drop_clause |
split_clause |
truncate_clause
where add_clause can be:
ADD { partition_less_than_item | partition_list_item } [ ( subpartition_definition_list ) ]
MODIFY PARTITION partition_name ADD subpartition_definition
where partition_less_than_item can be:
PARTITION partition_name VALUES LESS THAN ( partition_value | MAXVALUE ) [ TABLESPACE tablespacename ]
where partition_list_item can be:
PARTITION partition_name VALUES ( partition_value [, ...] | DEFAULT ) [ TABLESPACE tablespacename ]
where subpartition_definition_list can be:
SUBPARTITION subpartition_name [ VALUES LESS THAN ( partition_value | MAXVALUE ) | VALUES ( partition_value [, ...] | DEFAULT )] [ TABLESPACE tablespace ]
where drop_clause can be:
DROP PARTITION { partition_name | FOR ( partition_value ) } [ UPDATE GLOBAL INDEX ]
DROP SUBPARTITION { subpartition_name | FOR ( partition_value, subpartition_value ) } [ UPDATE GLOBAL INDEX ]
where split_clause can be:
SPLIT SUBPARTITION { subpartition_name } { split_point_clause } [ UPDATE GLOBAL INDEX ]
where split_point_clause can be:
AT ( subpartition_value ) INTO ( SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] , SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] ) |
VALUES ( subpartition_value ) INTO ( SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] , SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] )
where truncate_clause can be:
TRUNCATE SUBPARTITION { subpartition_name } [ UPDATE GLOBAL INDEX ]
NOTICE: 'ALTER TABLE SUBPARTITION' is only avaliable in CENTRALIZED mode!
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,25 +1,29 @@
<refentry id="SQL-CREATE_INDEX">
<refmeta>
<refentrytitle>CREATE INDEX</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE INDEX</refname>
<refpurpose>define a new index</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE [ UNIQUE ] INDEX [ [schema_name.] index_name ] ON table_name [ USING method ]
({ { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] }[, ...] )
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ TABLESPACE tablespace_name ]
[ WHERE predicate ];
CREATE [ UNIQUE ] INDEX [ [schema_name.] index_name ] ON table_name [ USING method ]
( { { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS LAST ] } [, ...] )
[ LOCAL [ ( { PARTITION index_partition_name [ TABLESPACE index_partition_tablespace ] } [, ...] ) ] | GLOBAL ]
[ WITH ( { storage_parameter = value } [, ...] ) ]
[ TABLESPACE tablespace_name ];
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-CREATE_INDEX">
<refmeta>
<refentrytitle>CREATE INDEX</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE INDEX</refname>
<refpurpose>define a new index</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [schema_name.] index_name ] ON table_name [ USING method ]
({ { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] }[, ...] )
[ INCLUDE ( { column_name | ( expression ) }[, ...] ) ]
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ TABLESPACE tablespace_name ]
[ WHERE predicate ];
CREATE [ UNIQUE ] INDEX [ [schema_name.] index_name ] ON table_name [ USING method ]
( { { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS LAST ] } [, ...] )
[ LOCAL [ ( { PARTITION index_partition_name | SUBPARTITION index_subpartition_name [ TABLESPACE index_partition_tablespace ] } [, ...] ) ] | GLOBAL ]
[ INCLUDE ( { column_name | ( expression ) }[, ...] ) ]
[ WITH ( { storage_parameter = value } [, ...] ) ]
[ TABLESPACE tablespace_name ];
NOTICE: 'SUBPARTITION index_subpartition_name' is only avaliable in CENTRALIZED mode!
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -23,6 +23,12 @@ where:
* linear_regression: Compute a linear regression using Gradient Descent
* svm_classification: Compute a support vector machine classifier using Gradient Descent
* kmeans: Compute an unsupervised clustering
* pca: Compute a principal component analysis result using Gradient Descent
* multiclass: Compute a classification result for multiple types of data
* xgboost_regression_logistic: Compute logistic regression using XGBoost
* xgboost_binary_logistic: Compute logistic regression for binary classification using XGBoost
* xgboost_regression_squarederror: Compute regression with squarred loss
* xgboost_regression_gamma: Compute gamma regression with log-link using XGBoost
* select_query is a standard SELECT query
For supervised machine learning algorithms, FEATURES and TARGET clauses are mandatory. For unsupervised machine learning algorithms, FEATURES
@ -54,6 +60,12 @@ For example:
* max_seconds: Maximum number of seconds doing the optimization
* optimizer: Select optimzier between: gd (gradient descent) or ngd (normalized gradient descent)
* tolerance: System stops when the percentage of changes between two iterations is below this percentage
* seed: Seed value for random
* kernel: Name of kernel for svm classification, valid values are 'linear'(default), 'gaussian' and 'polynomial'
* components: Number of output dimensions for kernels different than linear, default is MAX(2*features, 128)
* gamma: Gamma parameter for gaussian kernel, default value is 0.5
* degree: Degree parameter for polynomial kernel in the range 2 to 9, default is 2
* coef0: Coef0 parameter for polynomial kernel and value is greater or equal than zero, default is 1.0
* verbose: 0 (no output), 1 (more output)
# Hyperparameter list for 'kmeans':
@ -67,7 +79,31 @@ For example:
* seeding_function: Algorithm used for initial seeds: 'Random++' or 'Kmeans||'
* verbose: 0 (no output), 1 (less output), or 2 (full output)
# Hyperparameter list for 'xgboost_regression_logistic', 'xgboost_binary_logistic', 'xgboost_regression_gamma' and 'xgboost_regression_squarederror':
* batch_size: Number of tuples in each processing batch
* booster: Which booster to use, e.g., gbtree, gblinear or dart (default: gbtree)
* tree_method: The tree construction algorithm used in XGBoost. Choices: auto, exact, approx, hist, gpu_hist (gpu_hist only supported with GPU)
* eval_metric: Evaluation metric for validation data, default is 'rmse'
* seed: Seed value for random
* nthread: Number of parallel threads used to run XGBoost
* max_depth: Maximum depth of a tree (default 6) (valid only for tree boosters)
* gamma: Minimum loss reduction required to make a further partition on a leaf node of the tree
* eta: Step size shrinkage used in update to prevents overfitting (default 0.3)
* min_child_weight: Minimum sum of instance weight (hessian) needed in a child (default 1)
* verbosity: Verbosity of printing messages: 0 (silent), 1 (warning), 2 (info), 3 (debug)
# Hyperparameter list for 'pca':
* max_iterations: Maximum iterations until convergence
* batch_size: Number of tuples in each processing batch
* max_seconds: Maximum number of seconds doing the optimization
* number_components: Number of components to keep and value is greater or equal than 1, default 1
* tolerance: System stops when the percentage of changes between two iterations is below this percentage, default is 0.0005
* seed: Seed value for random
* verbose: 0 (no output), 1 (more output)
# Hyperparameter list for 'multiclass':
* classifier: name of gradient descent binary classifier, currently supports 'svm_classification' and 'logistic_regression'
* and all hyperparameters of the selected binary classifier
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -20,6 +20,8 @@ CREATE [ LARGE ] SEQUENCE <replaceable class="parameter">name</replaceable> [ IN
[ MINVALUE <replaceable class="parameter">minvalue</replaceable> | NO MINVALUE | NOMINVALUE] [ MAXVALUE <replaceable class="parameter">maxvalue</replaceable> | NO MAXVALUE | NOMAXVALUE]
[ START [ WITH ] <replaceable class="parameter">start</replaceable> ] [ CACHE <replaceable class="parameter">cache</replaceable> ] [ [ NO ] CYCLE | NOCYCLE]
[ OWNED BY { <replaceable class="parameter">table_name</replaceable>.<replaceable class="parameter">column_name</replaceable> | NONE } ];
NOTICE: '[ LARGE ]' is only avaliable in CENTRALIZED mode!
</synopsis>
</refsynopsisdiv>

View File

@ -1,73 +1,55 @@
<refentry id="SQL-CREATE_TABLE">
<refmeta>
<refentrytitle>CREATE TABLE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE TABLE</refname>
<refpurpose>define a new table</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name
( { column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]
| table_constraint
| LIKE source_table [ like_option [...] ] }
[, ... ])
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]
[ COMPRESS | NOCOMPRESS ]
[ TABLESPACE tablespace_name ]
[ DISTRIBUTE BY { REPLICATION | { HASH ( column_name [,...] )
| RANGE ( column_name [,...] ) range_distribution_rules
| LIST ( column_name [,...] ) list_distribution_rules }
} ]
[ TO { GROUP groupname | NODE ( nodename [, ... ] ) } ];
where column_constraint can be:
[ CONSTRAINT constraint_name ]
{ NOT NULL |
NULL |
CHECK ( expression ) |
DEFAULT default_expr |
GENERATED ALWAYS AS ( generation_expr ) STORED |
UNIQUE index_parameters |
PRIMARY KEY index_parameters |
ENCRYPTED WITH ( COLUMN_ENCRYPTION_KEY = column_encryption_key, ENCRYPTION_TYPE = encryption_type_value ) |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
where table_constraint can be:
[ CONSTRAINT constraint_name ]
{ CHECK ( expression ) |
UNIQUE ( column_name [, ... ] ) index_parameters |
PRIMARY KEY ( column_name [, ... ] ) index_parameters |
PARTIAL CLUSTER KEY ( column_name [, ... ] ) |
FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ]
[ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
where compress_mode can be:
{ DELTA | PREFIX | DICTIONARY | NUMSTR | NOCOMPRESS }
where like_option can be:
{ INCLUDING | EXCLUDING } { DEFAULTS | GENERATED | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | PARTITION | RELOPTIONS | DISTRIBUTION | ALL }
where index_parameters can be:
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ USING INDEX TABLESPACE tablespace_name ]
where range_distribution_rules can be:
[ ( SLICE name VALUES LESS THAN (expression | MAXVALUE [, ... ]) [DATANODE datanode_name]
[, ... ] ) |
( SLICE name START (expression) END (expression) EVERY (expression) [DATANODE datanode_name]
[, ... ] ) |
SLICE REFERENCES table_name
]
where list_distribution_rules can be:
[ ( SLICE name VALUES (expression [, ... ]) [DATANODE datanode_name]
[, ... ] ) |
( SLICE name VALUES (DEFAULT) [DATANODE datanode_name] ) |
SLICE REFERENCES table_name
]
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-CREATE_TABLE">
<refmeta>
<refentrytitle>CREATE TABLE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE TABLE</refname>
<refpurpose>define a new table</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name
( { column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]
| table_constraint
| LIKE source_table [ like_option [...] ] }
[, ... ])
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]
[ COMPRESS | NOCOMPRESS ]
[ TABLESPACE tablespace_name ];
where column_constraint can be:
[ CONSTRAINT constraint_name ]
{ NOT NULL |
NULL |
CHECK ( expression ) |
DEFAULT default_expr |
GENERATED ALWAYS AS ( generation_expr ) STORED |
UNIQUE index_parameters |
PRIMARY KEY index_parameters |
ENCRYPTED WITH ( COLUMN_ENCRYPTION_KEY = column_encryption_key, ENCRYPTION_TYPE = encryption_type_value ) |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
where table_constraint can be:
[ CONSTRAINT constraint_name ]
{ CHECK ( expression ) |
UNIQUE ( column_name [, ... ] ) index_parameters |
PRIMARY KEY ( column_name [, ... ] ) index_parameters |
PARTIAL CLUSTER KEY ( column_name [, ... ] ) |
FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ]
[ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
where compress_mode can be:
{ DELTA | PREFIX | DICTIONARY | NUMSTR | NOCOMPRESS }
where like_option can be:
{ INCLUDING | EXCLUDING } { DEFAULTS | GENERATED | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | PARTITION | RELOPTIONS | DISTRIBUTION | ALL }
where index_parameters can be:
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ USING INDEX TABLESPACE tablespace_name ]
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -0,0 +1,59 @@
<refentry id="SQL-CREATE_TABLE_SUBPARTITION">
<refmeta>
<refentrytitle>CREATE TABLE SUBPARTITION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>CREATE TABLE SUBPARTITION</refname>
<refpurpose>define a new table subpartition</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
CREATE TABLE [ IF NOT EXISTS ] subpartition_table_name
( { column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ]
| table_constraint
| LIKE source_table [ like_option [...] ] }
[, ... ]
)
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ COMPRESS | NOCOMPRESS ]
[ TABLESPACE tablespace_name ]
PARTITION BY {RANGE | LIST | HASH} (partition_key) SUBPARTITION BY {RANGE | LIST | HASH} (subpartition_key)
(
PARTITION partition_name1 [ VALUES LESS THAN (val1) | VALUES (val1[, ...]) ] [ TABLESPACE tablespace ]
(
{ SUBPARTITION subpartition_name1 [ VALUES LESS THAN (val1_1) | VALUES (val1_1[, ...])] [ TABLESPACE tablespace ] } [, ...]
)
[, ...]
) [ { ENABLE | DISABLE } ROW MOVEMENT ];
where column_constraint can be:
[ CONSTRAINT constraint_name ]
{ NOT NULL |
NULL |
CHECK ( expression ) |
DEFAULT default_expr |
GENERATED ALWAYS AS ( generation_expr ) STORED |
UNIQUE index_parameters |
PRIMARY KEY index_parameters |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
where table_constraint can be:
[ CONSTRAINT constraint_name ]
{ CHECK ( expression ) |
UNIQUE ( column_name [, ... ] ) index_parameters |
PRIMARY KEY ( column_name [, ... ] ) index_parameters |
FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ]
[ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
where like_option can be:
{ INCLUDING | EXCLUDING } { DEFAULTS | GENERATED | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | RELOPTIONS | ALL }
where index_parameters can be:
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ USING INDEX TABLESPACE tablespace_name ]
NOTICE: 'CREATE TABLE SUBPARTITION' is only avaliable in CENTRALIZED mode!
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -47,6 +47,8 @@ CREATE TYPE <replaceable class="parameter">name</replaceable>
CREATE TYPE name AS ENUM
( [ 'lable' [, ... ] ] )
CREATE TYPE name AS TABLE OF data_type
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -1,20 +1,28 @@
<refentry id="SQL-DELETE">
<refmeta>
<refentrytitle>DELETE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>DELETE</refname>
<refpurpose>delete rows of a table</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
[ WITH [ RECURSIVE ] with_query [, ...] ]
DELETE [/*+ plan_hint */] FROM [ ONLY ] table_name [ * ] [ [ AS ] alias ]
[ USING using_list ]
[ WHERE condition | WHERE CURRENT OF cursor_name ] [ LIMIT row_count ]
[ RETURNING { * | { output_expr [ [ AS ] output_name ] } [, ...] } ];
</synopsis>
</refsynopsisdiv>
</refentry>
<refentry id="SQL-DELETE">
<refmeta>
<refentrytitle>DELETE</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>DELETE</refname>
<refpurpose>delete rows of a table</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
[ WITH [ RECURSIVE ] with_query [, ...] ]
DELETE [/*+ plan_hint */] FROM [ ONLY ] table_name [partition_clause] [ * ] [ [ AS ] alias ]
[ USING using_list ]
[ WHERE condition | WHERE CURRENT OF cursor_name ] [ LIMIT row_count ]
[ RETURNING { * | { output_expr [ [ AS ] output_name ] } [, ...] } ];
where with_query can be:
with_query_name [ ( column_name [, ...] ) ] AS [ [ NOT ] MATERIALIZED ]
( {select | values | insert | update | delete} )
where partition_clause can be:
PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) } |
SUBPARTITION { ( subpartition_name ) | FOR ( subpartition_value [, ...] ) }
NOTICE: 'partition_clause' is only avaliable in CENTRALIZED mode!
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -0,0 +1,16 @@
<refentry id="SQL-DROP_GLOBAL_CONFIGURATION">
<refmeta>
<refentrytitle>DROP GLOBAL CONFIGURATION</refentrytitle>
<manvolnum>7</manvolnum>
<refmiscinfo>SQL - Language Statements</refmiscinfo>
</refmeta>
<refnamediv>
<refname>DROP GLOBAL CONFIGURATION</refname>
<refpurpose>drop records from gs_global_config</refpurpose>
</refnamediv>
<refsynopsisdiv>
<synopsis>
DROP GLOBAL CONFIGURATION paraname, paraname...;
</synopsis>
</refsynopsisdiv>
</refentry>

View File

@ -17,6 +17,8 @@
<refsynopsisdiv>
<synopsis>
DROP [ LARGE ] SEQUENCE [ IF EXISTS ] { [schema.] sequence_name } [, ...] [ CASCADE | RESTRICT ];
NOTICE: '[ LARGE ]' is only avaliable in CENTRALIZED mode!
</synopsis>
</refsynopsisdiv>

Some files were not shown because too many files have changed in this diff Show More