update version to 3.1.0

This commit is contained in:
zhang_xubo 2022-09-03 16:42:53 +08:00
parent b919f404e8
commit ffd35fc5d1
41 changed files with 393 additions and 2994 deletions

View File

@ -904,4 +904,4 @@
./include/pqcomm.h
./include/pqexpbuffer.h
[version]
3.0.0
3.1.0

View File

@ -1,2 +1,2 @@
PRODUCT=openGauss
VERSION=3.0.0
VERSION=3.1.0

View File

@ -903,4 +903,4 @@
./include/pqcomm.h
./include/pqexpbuffer.h
[version]
3.0.0
3.1.0

View File

@ -198,7 +198,7 @@ ENDMACRO(CHECK_CC_ENABLE)
function(GET_VERSIONSTR_FROMGIT ret)
set(PG_VERSION "9.2.4")
set(OPENGAUSS_VERSION "3.0.0")
set(OPENGAUSS_VERSION "3.1.0")
execute_process(
COMMAND ${CMAKE_SOURCE_DIR}/${openGauss}/cmake/src/buildfunction.sh --s ${PROJECT_TRUNK_DIR} OUTPUT_VARIABLE GS_VERSION_STR)
set(PG_VERSION "${PG_VERSION}" PARENT_SCOPE)

View File

@ -714,7 +714,7 @@
#define PGXC_VERSION_NUM
/* openGauss version as a number string */
#define OPENGAUSS_VERSION_NUM_STR "3.0.0"
#define OPENGAUSS_VERSION_NUM_STR "3.1.0"
/* A string containing the version number, platform, and C compiler */
#define PG_VERSION_STR "@PG_VERSION_STR@"

View File

@ -695,7 +695,7 @@
#define PG_VERSION "9.2.4"
/* openGauss version as a string */
#define OPENGAUSS_VERSION "3.0.0"
#define OPENGAUSS_VERSION "3.1.0"
/* Gaussdb version as a string*/
#define DEF_GS_VERSION "(GaussDB A 8.0.0 build 21f07aff) compiled at 2020-03-17 10:59:07 commit 7431 last mr 12039 debug"

View File

@ -63,7 +63,7 @@ set(NANOMSG_HOME ${DEPENDENCY_PATH}/nng/${LIB_UNIFIED_SUPPORT})
set(NCURSES_HOME ${DEPENDENCY_PATH}/ncurses/${SUPPORT_LLT})
set(OPENSSL_HOME ${DEPENDENCY_PATH}/openssl/${LIB_UNIFIED_SUPPORT})
set(PLJAVA_HOME ${DEPENDENCY_PATH}/pljava/${LIB_UNIFIED_SUPPORT})
if (EXISTS "${3RD_PATH}/platform/openjdk8/${BUILD_TUPLE}/jdk")
if (EXISTS "${PLATFORM_PATH}/openjdk8/${BUILD_TUPLE}/jdk")
set(JAVA_HOME ${PLATFORM_PATH}/openjdk8/${BUILD_TUPLE}/jdk)
else()
set(JAVA_HOME ${PLATFORM_PATH}/huaweijdk8/${BUILD_TUPLE}/jdk)

2
configure vendored
View File

@ -2195,7 +2195,7 @@ PACKAGE_VERSION='9.2.4'
# Postgres-XC 1.1devel is based on PostgreSQL 9.2.4
PACKAGE_XC_VERSION='1.1'
# openGauss is based on PostgreSQL 9.2.4 and it will be the Kernel of GaussDB database
OPENGAUSS_VERSION='3.0.0'
OPENGAUSS_VERSION='3.1.0'
cat >>confdefs.h <<_ACEOF
#define PG_VERSION "$PACKAGE_VERSION"

View File

@ -0,0 +1,321 @@
--
-- Test exchange operator for interval partitioned table
--
--
---- create interval partitioned table
--
CREATE TABLE interval_normal_exchange (logdate date not null)
PARTITION BY RANGE (logdate)
INTERVAL ('1 month')
(
PARTITION interval_normal_exchange_p1 VALUES LESS THAN ('2020-03-01'),
PARTITION interval_normal_exchange_p2 VALUES LESS THAN ('2020-04-01'),
PARTITION interval_normal_exchange_p3 VALUES LESS THAN ('2020-05-01')
);
-- see about the info of the partitioned table in pg_partition
select relname, parttype, partstrategy, boundaries from pg_partition
where parentid = (select oid from pg_class where relname = 'interval_normal_exchange')
order by relname;
relname | parttype | partstrategy | boundaries
-----------------------------+----------+--------------+--------------
interval_normal_exchange | r | i |
interval_normal_exchange_p1 | p | r | {2020-03-01}
interval_normal_exchange_p2 | p | r | {2020-04-01}
interval_normal_exchange_p3 | p | r | {2020-05-01}
(4 rows)
-- insert the record that is smaller than the lower boundary
insert into interval_normal_exchange values ('2020-02-21');
insert into interval_normal_exchange values ('2020-02-22');
insert into interval_normal_exchange values ('2020-02-23');
insert into interval_normal_exchange values ('2020-5-01');
insert into interval_normal_exchange values ('2020-5-02');
insert into interval_normal_exchange values ('2020-5-03');
-- see about the info of the partitioned table in pg_partition
select relname, parttype, partstrategy, boundaries from pg_partition
where parentid = (select oid from pg_class where relname = 'interval_normal_exchange')
order by relname;
relname | parttype | partstrategy | boundaries
-----------------------------+----------+--------------+------------------------------
interval_normal_exchange | r | i |
interval_normal_exchange_p1 | p | r | {2020-03-01}
interval_normal_exchange_p2 | p | r | {2020-04-01}
interval_normal_exchange_p3 | p | r | {2020-05-01}
sys_p1 | p | i | {"Mon Jun 01 00:00:00 2020"}
(5 rows)
--
---- create to be exchanged table and test range partition exchange
--
CREATE TABLE interval_exchange_test (logdate date not null);
insert into interval_exchange_test values ('2020-02-24');
insert into interval_exchange_test values ('2020-02-25');
insert into interval_exchange_test values ('2020-02-26');
-- do exchange partition interval_normal_exchange_p1 and interval_exchange_test
-- The data they have belongs to the same range.
ALTER TABLE interval_normal_exchange EXCHANGE PARTITION (interval_normal_exchange_p1)
WITH TABLE interval_exchange_test;
select * from interval_normal_exchange partition (interval_normal_exchange_p1)order by logdate;
logdate
--------------------------
Mon Feb 24 00:00:00 2020
Tue Feb 25 00:00:00 2020
Wed Feb 26 00:00:00 2020
(3 rows)
select * from interval_exchange_test order by logdate;
logdate
--------------------------
Fri Feb 21 00:00:00 2020
Sat Feb 22 00:00:00 2020
Sun Feb 23 00:00:00 2020
(3 rows)
-- exchange back
ALTER TABLE interval_normal_exchange EXCHANGE PARTITION (interval_normal_exchange_p1)
WITH TABLE interval_exchange_test;
select * from interval_normal_exchange partition (interval_normal_exchange_p1)order by logdate;
logdate
--------------------------
Fri Feb 21 00:00:00 2020
Sat Feb 22 00:00:00 2020
Sun Feb 23 00:00:00 2020
(3 rows)
select * from interval_exchange_test order by logdate;
logdate
--------------------------
Mon Feb 24 00:00:00 2020
Tue Feb 25 00:00:00 2020
Wed Feb 26 00:00:00 2020
(3 rows)
-- Insert a new record not belongs to interval_normal_exchange_p1
insert into interval_exchange_test values ('2020-3-05');
-- defaut is WITH VALIDATION, and the exchange will be failed
ALTER TABLE interval_normal_exchange EXCHANGE PARTITION (interval_normal_exchange_p1)
WITH TABLE interval_exchange_test;
ERROR: some rows in table do not qualify for specified partition
-- WITHOUT VALIDATION and the exchange will be success, but some date will in the wrong range;
ALTER TABLE interval_normal_exchange EXCHANGE PARTITION (interval_normal_exchange_p1)
WITH TABLE interval_exchange_test WITHOUT VALIDATION;
select * from interval_normal_exchange partition (interval_normal_exchange_p1)order by logdate;
logdate
--------------------------
Mon Feb 24 00:00:00 2020
Tue Feb 25 00:00:00 2020
Wed Feb 26 00:00:00 2020
Thu Mar 05 00:00:00 2020
(4 rows)
select * from interval_exchange_test order by logdate;
logdate
--------------------------
Fri Feb 21 00:00:00 2020
Sat Feb 22 00:00:00 2020
Sun Feb 23 00:00:00 2020
(3 rows)
-- not include '2020-3-05'
select * from interval_normal_exchange where logdate > '2020-03-01' order by logdate;
logdate
--------------------------
Fri May 01 00:00:00 2020
Sat May 02 00:00:00 2020
Sun May 03 00:00:00 2020
(3 rows)
--
---- clean the data and test interval partition exchange
--
truncate table interval_exchange_test;
insert into interval_exchange_test values ('2020-5-04');
insert into interval_exchange_test values ('2020-5-05');
insert into interval_exchange_test values ('2020-5-06');
-- exchange table
ALTER TABLE interval_normal_exchange EXCHANGE PARTITION (sys_p1)
WITH TABLE interval_exchange_test;
select * from interval_normal_exchange partition (sys_p1)order by logdate;
logdate
--------------------------
Mon May 04 00:00:00 2020
Tue May 05 00:00:00 2020
Wed May 06 00:00:00 2020
(3 rows)
select * from interval_exchange_test order by logdate;
logdate
--------------------------
Fri May 01 00:00:00 2020
Sat May 02 00:00:00 2020
Sun May 03 00:00:00 2020
(3 rows)
-- exchange back
ALTER TABLE interval_normal_exchange EXCHANGE PARTITION (sys_p1)
WITH TABLE interval_exchange_test;
select * from interval_normal_exchange partition (sys_p1)order by logdate;
logdate
--------------------------
Fri May 01 00:00:00 2020
Sat May 02 00:00:00 2020
Sun May 03 00:00:00 2020
(3 rows)
select * from interval_exchange_test order by logdate;
logdate
--------------------------
Mon May 04 00:00:00 2020
Tue May 05 00:00:00 2020
Wed May 06 00:00:00 2020
(3 rows)
insert into interval_exchange_test values ('2020-6-05');
-- defaut is WITH VALIDATION, and the exchange will be failed
ALTER TABLE interval_normal_exchange EXCHANGE PARTITION (interval_normal_exchange_p1)
WITH TABLE interval_exchange_test;
ERROR: some rows in table do not qualify for specified partition
-- WITHOUT VALIDATION and the exchange will be success, but some date will in the wrong range;
ALTER TABLE interval_normal_exchange EXCHANGE PARTITION (interval_normal_exchange_p1)
WITH TABLE interval_exchange_test WITHOUT VALIDATION;
select * from interval_normal_exchange partition (interval_normal_exchange_p1)order by logdate;
logdate
--------------------------
Mon May 04 00:00:00 2020
Tue May 05 00:00:00 2020
Wed May 06 00:00:00 2020
Fri Jun 05 00:00:00 2020
(4 rows)
select * from interval_exchange_test order by logdate;
logdate
--------------------------
Mon Feb 24 00:00:00 2020
Tue Feb 25 00:00:00 2020
Wed Feb 26 00:00:00 2020
Thu Mar 05 00:00:00 2020
(4 rows)
-- not include '2020-6-05'
select * from interval_normal_exchange order by logdate;
logdate
--------------------------
Fri May 01 00:00:00 2020
Sat May 02 00:00:00 2020
Sun May 03 00:00:00 2020
Mon May 04 00:00:00 2020
Tue May 05 00:00:00 2020
Wed May 06 00:00:00 2020
Fri Jun 05 00:00:00 2020
(7 rows)
select * from interval_normal_exchange where logdate > '2020-06-01' order by logdate;
logdate
---------
(0 rows)
drop table interval_normal_exchange;
drop table table_001;
ERROR: table "table_001" does not exist
create table table_001(
COL_1 smallint,
COL_2 char(5),
COL_3 int,
COL_4 date,
COL_5 boolean,
COL_6 nchar(5),
COL_7 float
);
drop table partition_table_001;
ERROR: table "partition_table_001" does not exist
create table partition_table_001(
COL_1 smallint,
COL_2 char(5),
COL_3 int,
COL_4 date,
COL_5 boolean,
COL_6 nchar(5),
COL_7 float
)
PARTITION BY RANGE (COL_4)
INTERVAL ('1 month')
(
PARTITION partition_table_001_p1 VALUES LESS THAN ('2020-03-01'),
PARTITION partition_table_001_p2 VALUES LESS THAN ('2020-04-01'),
PARTITION partition_table_001_p3 VALUES LESS THAN ('2020-05-01')
);
insert into partition_table_001 values (1,'aaa',1,'2020-02-23',true,'aaa',1.1);
insert into partition_table_001 values (2,'bbb',2,'2020-03-23',false,'bbb',2.2);
insert into partition_table_001 values (3,'ccc',3,'2020-04-23',true,'ccc',3.3);
insert into partition_table_001 values (4,'ddd',4,'2020-05-23',false,'ddd',4.4);
insert into partition_table_001 values (5,'eee',5,'2020-06-23',true,'eee',5.5);
insert into partition_table_001 values (6,'fff',6,'2020-07-23',false,'fff',6.6);
ALTER TABLE partition_table_001 EXCHANGE PARTITION (sys_p1) WITH TABLE table_001;
select * from table_001 order by 1;
col_1 | col_2 | col_3 | col_4 | col_5 | col_6 | col_7
-------+-------+-------+--------------------------+-------+-------+-------
4 | ddd | 4 | Sat May 23 00:00:00 2020 | f | ddd | 4.4
(1 row)
select * from partition_table_001 order by 1;
col_1 | col_2 | col_3 | col_4 | col_5 | col_6 | col_7
-------+-------+-------+--------------------------+-------+-------+-------
1 | aaa | 1 | Sun Feb 23 00:00:00 2020 | t | aaa | 1.1
2 | bbb | 2 | Mon Mar 23 00:00:00 2020 | f | bbb | 2.2
3 | ccc | 3 | Thu Apr 23 00:00:00 2020 | t | ccc | 3.3
5 | eee | 5 | Tue Jun 23 00:00:00 2020 | t | eee | 5.5
6 | fff | 6 | Thu Jul 23 00:00:00 2020 | f | fff | 6.6
(5 rows)
select relname, parttype, partstrategy, boundaries from pg_partition
where parentid = (select oid from pg_class where relname = 'partition_table_001')
order by relname;
relname | parttype | partstrategy | boundaries
------------------------+----------+--------------+------------------------------
partition_table_001 | r | i |
partition_table_001_p1 | p | r | {2020-03-01}
partition_table_001_p2 | p | r | {2020-04-01}
partition_table_001_p3 | p | r | {2020-05-01}
sys_p1 | p | i | {"Mon Jun 01 00:00:00 2020"}
sys_p2 | p | i | {"Wed Jul 01 00:00:00 2020"}
sys_p3 | p | i | {"Sat Aug 01 00:00:00 2020"}
(7 rows)
ALTER TABLE partition_table_001 EXCHANGE PARTITION (sys_p1) WITH TABLE table_001;
select * from table_001 order by 1;
col_1 | col_2 | col_3 | col_4 | col_5 | col_6 | col_7
-------+-------+-------+-------+-------+-------+-------
(0 rows)
select * from partition_table_001 order by 1;
col_1 | col_2 | col_3 | col_4 | col_5 | col_6 | col_7
-------+-------+-------+--------------------------+-------+-------+-------
1 | aaa | 1 | Sun Feb 23 00:00:00 2020 | t | aaa | 1.1
2 | bbb | 2 | Mon Mar 23 00:00:00 2020 | f | bbb | 2.2
3 | ccc | 3 | Thu Apr 23 00:00:00 2020 | t | ccc | 3.3
4 | ddd | 4 | Sat May 23 00:00:00 2020 | f | ddd | 4.4
5 | eee | 5 | Tue Jun 23 00:00:00 2020 | t | eee | 5.5
6 | fff | 6 | Thu Jul 23 00:00:00 2020 | f | fff | 6.6
(6 rows)
insert into table_001 values (7,'eee',7,'2020-08-23',true,'eee',7.7);
ALTER TABLE partition_table_001 EXCHANGE PARTITION (sys_p1) WITH TABLE table_001 with validation verbose;
select * from table_001 order by 1;
col_1 | col_2 | col_3 | col_4 | col_5 | col_6 | col_7
-------+-------+-------+--------------------------+-------+-------+-------
4 | ddd | 4 | Sat May 23 00:00:00 2020 | f | ddd | 4.4
(1 row)
select * from partition_table_001 order by 1;
col_1 | col_2 | col_3 | col_4 | col_5 | col_6 | col_7
-------+-------+-------+--------------------------+-------+-------+-------
1 | aaa | 1 | Sun Feb 23 00:00:00 2020 | t | aaa | 1.1
2 | bbb | 2 | Mon Mar 23 00:00:00 2020 | f | bbb | 2.2
3 | ccc | 3 | Thu Apr 23 00:00:00 2020 | t | ccc | 3.3
5 | eee | 5 | Tue Jun 23 00:00:00 2020 | t | eee | 5.5
6 | fff | 6 | Thu Jul 23 00:00:00 2020 | f | fff | 6.6
7 | eee | 7 | Sun Aug 23 00:00:00 2020 | t | eee | 7.7
(6 rows)
drop table table_001;
drop table partition_table_001;

View File

@ -336,9 +336,9 @@ endif
PLJAVA_HOME = $(with_3rd)/$(BINARYPATH)/pljava/$(LIB_SUPPORT_LLT)
using_openjdk = $(shell if [ -d "$(with_3rd)/$(PLATFORMPATH)/openjdk8" ]; then echo "yes"; else echo "no"; fi;)
ifeq ($(using_openjdk), yes)
JAVA_HOME = $(with_3rd)/$(PLATFORMPATH)/openjdk8/jdk1.8.0_222
JAVA_HOME = $(with_3rd)/${PLATFORMPATH}/openjdk8/$(shell uname -p)/jdk
else
JAVA_HOME = $(with_3rd)/$(PLATFORMPATH)/huaweijdk8/x86_64/jdk
JAVA_HOME = $(with_3rd)/${PLATFORMPATH}/huaweijdk8/$(shell uname -p)/jdk
endif
MASSTREE_HOME = $(with_3rd)/$(BINARYPATH)/masstree/comm
MYFDW_HOME = $(with_3rd)/$(BINARYPATH)/mysql_fdw

View File

@ -88,7 +88,7 @@ function logo()
echo " | | | | (_| | | | | ____) | | | | | | | |_| | | (_| | || (_) | | "
echo " |_| |_|\__,_|_| |_| |_____/|_|_| |_| |_|\__,_|_|\__,_|\__|___/|_| "
echo " "
echo " Version 3.0.0"
echo " Version 3.1.0"
echo "--------------------------------------------------------------------------"
}

View File

@ -297,7 +297,7 @@ static void show_version(int alls)
if (alls) {
printf("OPENGAUSS_VERSION = ");
}
printf("openGauss 3.0.0\n");
printf("openGauss 3.1.0\n");
#endif
}

View File

@ -201,8 +201,7 @@ void extract_graph(const char *edges_of_network, int num_of_edge, int num_nodes,
}
if (!validateParamEdges(node_pos, edge_size, substr, substr_len)) {
ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("Incorrect Hyperparameters (edges_of_network unmatch with num_edges), \
parse failed.")));
errmsg("Incorrect Hyperparameters (edges_of_network unmatch with num_edges), parse failed.")));
}
all_edges[node_pos++] = atoi(substr);
start = end + 1;
@ -219,8 +218,7 @@ void extract_graph(const char *edges_of_network, int num_of_edge, int num_nodes,
substr[substr_len] = '\0';
if (!validateParamEdges(node_pos, edge_size, substr, substr_len) || node_pos != edge_size - 1) {
ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("Incorrect Hyperparameters (edges_of_network should match with num_edges), \
parse failed.")));
errmsg("Incorrect Hyperparameters (edges_of_network should match with num_edges), parse failed.")));
}
all_edges[node_pos] = atoi(substr);
if (!validateParamNodes(all_edges, edge_size, num_nodes)) {

View File

@ -1,496 +0,0 @@
DROP FUNCTION IF EXISTS pg_catalog.get_client_info;
DROP FUNCTION IF EXISTS pg_catalog.pg_ls_tmpdir() CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.pg_ls_tmpdir(oid) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.pg_ls_waldir() CASCADE;
------------------------------------------------------------------------------------------------------------------------------------
DO $DO$
DECLARE
ans boolean;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
if ans = true then
-----------------------------------------------------------------------------------------------------------------------------------------------------
DROP VIEW IF EXISTS DBE_PERF.local_active_session cascade;
DROP FUNCTION IF EXISTS pg_catalog.get_local_active_session(OUT sampleid bigint, OUT sample_time timestamp with time zone, OUT need_flush_sample boolean, OUT databaseid oid, OUT thread_id bigint, OUT sessionid bigint, OUT start_time timestamp with time zone, OUT event text, OUT lwtid integer, OUT psessionid bigint, OUT tlevel integer, OUT smpid integer, OUT userid oid, OUT application_name text, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT query_id bigint, OUT unique_query_id bigint, OUT user_id oid, OUT cn_id integer, OUT unique_query text, OUT locktag text, OUT lockmode text, OUT block_sessionid bigint, OUT wait_status text, OUT global_sessionid text, OUT xact_start_time timestamp with time zone, OUT query_start_time timestamp with time zone, OUT state text) cascade;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5721;
CREATE OR REPLACE FUNCTION pg_catalog.get_local_active_session
(OUT sampleid bigint, OUT sample_time timestamp with time zone, OUT need_flush_sample boolean, OUT databaseid oid, OUT thread_id bigint, OUT sessionid bigint, OUT start_time timestamp with time zone, OUT event text, OUT lwtid integer, OUT psessionid bigint, OUT tlevel integer, OUT smpid integer, OUT userid oid, OUT application_name text, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT query_id bigint, OUT unique_query_id bigint, OUT user_id oid, OUT cn_id integer, OUT unique_query text, OUT locktag text, OUT lockmode text, OUT block_sessionid bigint, OUT wait_status text, OUT global_sessionid text)
RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'get_local_active_session';
CREATE OR REPLACE VIEW DBE_PERF.local_active_session AS
WITH RECURSIVE
las(sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid,
tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id,
user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, wait_status, global_sessionid)
AS (select t.* from get_local_active_session() as t),
tt(sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid,
tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id,
user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, wait_status, global_sessionid, final_block_sessionid, level, head)
AS(SELECT las.*, las.block_sessionid AS final_block_sessionid, 1 AS level, array_append('{}', las.sessionid) AS head FROM las
UNION ALL
SELECT tt.sampleid, tt.sample_time, tt.need_flush_sample, tt.databaseid, tt.thread_id, tt.sessionid, tt.start_time, tt.event, tt.lwtid, tt.psessionid,
tt.tlevel, tt.smpid, tt.userid, tt.application_name, tt.client_addr, tt.client_hostname, tt.client_port, tt.query_id, tt.unique_query_id,
tt.user_id, tt.cn_id, tt.unique_query, tt.locktag, tt.lockmode, tt.block_sessionid, tt.wait_status, tt.global_sessionid, las.block_sessionid AS final_block_sessionid, tt.level + 1 AS level, array_append(tt.head, las.sessionid) AS head
FROM tt INNER JOIN las ON tt.final_block_sessionid = las.sessionid
WHERE las.sampleid = tt.sampleid AND (las.block_sessionid IS NOT NULL OR las.block_sessionid != 0)
AND las.sessionid != all(head) AND las.sessionid != las.block_sessionid)
SELECT sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid,
tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id,
user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, final_block_sessionid, wait_status, global_sessionid FROM tt
WHERE level = (SELECT MAX(level) FROM tt t1 WHERE t1.sampleid = tt.sampleid AND t1.sessionid = tt.sessionid);
end if;
END$DO$;
DO $DO$
DECLARE
ans boolean;
user_name text;
query_str text;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
if ans = true then
SELECT SESSION_USER INTO user_name;
query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.local_active_session TO ' || quote_ident(user_name) || ';';
EXECUTE IMMEDIATE query_str;
GRANT SELECT ON TABLE DBE_PERF.local_active_session TO PUBLIC;
GRANT SELECT ON TABLE pg_catalog.gs_asp TO PUBLIC;
end if;
END$DO$;
DO $DO$
DECLARE
ans boolean;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
if ans = true then
DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp() cascade;
DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp() cascade;
CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp
(in start_timestamp timestamp with time zone,
in end_timestamp timestamp with time zone,
OUT node_name name,
OUT db_name name,
OUT schema_name name,
OUT origin_node integer,
OUT user_name name,
OUT application_name text,
OUT client_addr text,
OUT client_port integer,
OUT unique_query_id bigint,
OUT debug_query_id bigint,
OUT query text,
OUT start_time timestamp with time zone,
OUT finish_time timestamp with time zone,
OUT slow_sql_threshold bigint,
OUT transaction_id bigint,
OUT thread_id bigint,
OUT session_id bigint,
OUT n_soft_parse bigint,
OUT n_hard_parse bigint,
OUT query_plan text,
OUT n_returned_rows bigint,
OUT n_tuples_fetched bigint,
OUT n_tuples_returned bigint,
OUT n_tuples_inserted bigint,
OUT n_tuples_updated bigint,
OUT n_tuples_deleted bigint,
OUT n_blocks_fetched bigint,
OUT n_blocks_hit bigint,
OUT db_time bigint,
OUT cpu_time bigint,
OUT execution_time bigint,
OUT parse_time bigint,
OUT plan_time bigint,
OUT rewrite_time bigint,
OUT pl_execution_time bigint,
OUT pl_compilation_time bigint,
OUT data_io_time bigint,
OUT net_send_info text,
OUT net_recv_info text,
OUT net_stream_send_info text,
OUT net_stream_recv_info text,
OUT lock_count bigint,
OUT lock_time bigint,
OUT lock_wait_count bigint,
OUT lock_wait_time bigint,
OUT lock_max_count bigint,
OUT lwlock_count bigint,
OUT lwlock_wait_count bigint,
OUT lwlock_time bigint,
OUT lwlock_wait_time bigint,
OUT details bytea,
OUT is_slow_sql bool,
OUT trace_id text)
RETURNS setof record
AS $$
DECLARE
row_data pg_catalog.statement_history%rowtype;
row_name record;
query_str text;
-- node name
query_str_nodes text;
BEGIN
-- Get all node names(CN + master DN)
query_str_nodes := 'select * from dbe_perf.node_name';
FOR row_name IN EXECUTE(query_str_nodes) LOOP
query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || '''';
FOR row_data IN EXECUTE(query_str) LOOP
node_name := row_name.node_name;
db_name := row_data.db_name;
schema_name := row_data.schema_name;
origin_node := row_data.origin_node;
user_name := row_data.user_name;
application_name := row_data.application_name;
client_addr := row_data.client_addr;
client_port := row_data.client_port;
unique_query_id := row_data.unique_query_id;
debug_query_id := row_data.debug_query_id;
query := row_data.query;
start_time := row_data.start_time;
finish_time := row_data.finish_time;
slow_sql_threshold := row_data.slow_sql_threshold;
transaction_id := row_data.transaction_id;
thread_id := row_data.thread_id;
session_id := row_data.session_id;
n_soft_parse := row_data.n_soft_parse;
n_hard_parse := row_data.n_hard_parse;
query_plan := row_data.query_plan;
n_returned_rows := row_data.n_returned_rows;
n_tuples_fetched := row_data.n_tuples_fetched;
n_tuples_returned := row_data.n_tuples_returned;
n_tuples_inserted := row_data.n_tuples_inserted;
n_tuples_updated := row_data.n_tuples_updated;
n_tuples_deleted := row_data.n_tuples_deleted;
n_blocks_fetched := row_data.n_blocks_fetched;
n_blocks_hit := row_data.n_blocks_hit;
db_time := row_data.db_time;
cpu_time := row_data.cpu_time;
execution_time := row_data.execution_time;
parse_time := row_data.parse_time;
plan_time := row_data.plan_time;
rewrite_time := row_data.rewrite_time;
pl_execution_time := row_data.pl_execution_time;
pl_compilation_time := row_data.pl_compilation_time;
data_io_time := row_data.data_io_time;
net_send_info := row_data.net_send_info;
net_recv_info := row_data.net_recv_info;
net_stream_send_info := row_data.net_stream_send_info;
net_stream_recv_info := row_data.net_stream_recv_info;
lock_count := row_data.lock_count;
lock_time := row_data.lock_time;
lock_wait_count := row_data.lock_wait_count;
lock_wait_time := row_data.lock_wait_time;
lock_max_count := row_data.lock_max_count;
lwlock_count := row_data.lwlock_count;
lwlock_wait_count := row_data.lwlock_wait_count;
lwlock_time := row_data.lwlock_time;
lwlock_wait_time := row_data.lwlock_wait_time;
details := row_data.details;
is_slow_sql := row_data.is_slow_sql;
trace_id := row_data.trace_id;
return next;
END LOOP;
END LOOP;
return;
END; $$
LANGUAGE 'plpgsql' NOT FENCED;
CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp
(in start_timestamp timestamp with time zone,
in end_timestamp timestamp with time zone,
OUT node_name name,
OUT db_name name,
OUT schema_name name,
OUT origin_node integer,
OUT user_name name,
OUT application_name text,
OUT client_addr text,
OUT client_port integer,
OUT unique_query_id bigint,
OUT debug_query_id bigint,
OUT query text,
OUT start_time timestamp with time zone,
OUT finish_time timestamp with time zone,
OUT slow_sql_threshold bigint,
OUT transaction_id bigint,
OUT thread_id bigint,
OUT session_id bigint,
OUT n_soft_parse bigint,
OUT n_hard_parse bigint,
OUT query_plan text,
OUT n_returned_rows bigint,
OUT n_tuples_fetched bigint,
OUT n_tuples_returned bigint,
OUT n_tuples_inserted bigint,
OUT n_tuples_updated bigint,
OUT n_tuples_deleted bigint,
OUT n_blocks_fetched bigint,
OUT n_blocks_hit bigint,
OUT db_time bigint,
OUT cpu_time bigint,
OUT execution_time bigint,
OUT parse_time bigint,
OUT plan_time bigint,
OUT rewrite_time bigint,
OUT pl_execution_time bigint,
OUT pl_compilation_time bigint,
OUT data_io_time bigint,
OUT net_send_info text,
OUT net_recv_info text,
OUT net_stream_send_info text,
OUT net_stream_recv_info text,
OUT lock_count bigint,
OUT lock_time bigint,
OUT lock_wait_count bigint,
OUT lock_wait_time bigint,
OUT lock_max_count bigint,
OUT lwlock_count bigint,
OUT lwlock_wait_count bigint,
OUT lwlock_time bigint,
OUT lwlock_wait_time bigint,
OUT details bytea,
OUT is_slow_sql bool,
OUT trace_id text)
RETURNS setof record
AS $$
DECLARE
row_data pg_catalog.statement_history%rowtype;
row_name record;
query_str text;
-- node name
query_str_nodes text;
BEGIN
-- Get all node names(CN + master DN)
query_str_nodes := 'select * from dbe_perf.node_name';
FOR row_name IN EXECUTE(query_str_nodes) LOOP
query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true ';
FOR row_data IN EXECUTE(query_str) LOOP
node_name := row_name.node_name;
db_name := row_data.db_name;
schema_name := row_data.schema_name;
origin_node := row_data.origin_node;
user_name := row_data.user_name;
application_name := row_data.application_name;
client_addr := row_data.client_addr;
client_port := row_data.client_port;
unique_query_id := row_data.unique_query_id;
debug_query_id := row_data.debug_query_id;
query := row_data.query;
start_time := row_data.start_time;
finish_time := row_data.finish_time;
slow_sql_threshold := row_data.slow_sql_threshold;
transaction_id := row_data.transaction_id;
thread_id := row_data.thread_id;
session_id := row_data.session_id;
n_soft_parse := row_data.n_soft_parse;
n_hard_parse := row_data.n_hard_parse;
query_plan := row_data.query_plan;
n_returned_rows := row_data.n_returned_rows;
n_tuples_fetched := row_data.n_tuples_fetched;
n_tuples_returned := row_data.n_tuples_returned;
n_tuples_inserted := row_data.n_tuples_inserted;
n_tuples_updated := row_data.n_tuples_updated;
n_tuples_deleted := row_data.n_tuples_deleted;
n_blocks_fetched := row_data.n_blocks_fetched;
n_blocks_hit := row_data.n_blocks_hit;
db_time := row_data.db_time;
cpu_time := row_data.cpu_time;
execution_time := row_data.execution_time;
parse_time := row_data.parse_time;
plan_time := row_data.plan_time;
rewrite_time := row_data.rewrite_time;
pl_execution_time := row_data.pl_execution_time;
pl_compilation_time := row_data.pl_compilation_time;
data_io_time := row_data.data_io_time;
net_send_info := row_data.net_send_info;
net_recv_info := row_data.net_recv_info;
net_stream_send_info := row_data.net_stream_send_info;
net_stream_recv_info := row_data.net_stream_recv_info;
lock_count := row_data.lock_count;
lock_time := row_data.lock_time;
lock_wait_count := row_data.lock_wait_count;
lock_wait_time := row_data.lock_wait_time;
lock_max_count := row_data.lock_max_count;
lwlock_count := row_data.lwlock_count;
lwlock_wait_count := row_data.lwlock_wait_count;
lwlock_time := row_data.lwlock_time;
lwlock_wait_time := row_data.lwlock_wait_time;
details := row_data.details;
is_slow_sql := row_data.is_slow_sql;
trace_id := row_data.trace_id;
return next;
END LOOP;
END LOOP;
return;
END; $$
LANGUAGE 'plpgsql' NOT FENCED;
end if;
END$DO$;
DO $$
DECLARE
query_str text;
ans bool;
BEGIN
select case when count(*)=1 then true else false end as ans from
(select *from pg_class where relname='snapshot_sequence' and relnamespace = 4991) into ans;
if ans = true then
query_str := 'DROP SEQUENCE db4ai.snapshot_sequence;';
EXECUTE IMMEDIATE query_str;
end if;
END$$;DROP FUNCTION IF EXISTS pg_catalog.gs_stack() CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(INT8) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(pid bigint) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(OUT tid bigint, OUT lwtid bigint, OUT stack text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9997;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack(pid bigint)
RETURNS SETOF text
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 1 ROWS 5
AS $function$gs_stack$function$;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9998;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack(OUT tid bigint, OUT lwtid bigint, OUT stack text)
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 1 ROWS 5
AS $function$gs_stack$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;DROP VIEW IF EXISTS pg_catalog.gs_session_cpu_statistics cascade;
CREATE VIEW pg_catalog.gs_session_cpu_statistics AS
SELECT
S.datid AS datid,
S.usename,
S.pid,
S.query_start AS start_time,
T.min_cpu_time,
T.max_cpu_time,
T.total_cpu_time,
S.query,
S.node_group,
T.top_cpu_dn
FROM pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T
WHERE S.sessionid = T.threadid;
GRANT SELECT ON TABLE pg_catalog.gs_session_cpu_statistics TO PUBLIC;
do $$DECLARE ans boolean;
BEGIN
for ans in select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_sql_util' limit 1)
LOOP
if ans = true then
DROP FUNCTION IF EXISTS dbe_sql_util.create_hint_sql_patch(name, bigint, text, text, boolean);
DROP FUNCTION IF EXISTS dbe_sql_util.create_abort_sql_patch(name, bigint, text, boolean);
DROP FUNCTION IF EXISTS dbe_sql_util.enable_sql_patch(name);
DROP FUNCTION IF EXISTS dbe_sql_util.disable_sql_patch(name);
DROP FUNCTION IF EXISTS dbe_sql_util.show_sql_patch(name);
DROP FUNCTION IF EXISTS dbe_sql_util.drop_sql_patch(name);
end if;
exit;
END LOOP;
END$$;
DROP SCHEMA IF EXISTS dbe_sql_util cascade;
DROP INDEX IF EXISTS pg_catalog.gs_sql_patch_unique_sql_id_index;
DROP INDEX IF EXISTS pg_catalog.gs_sql_patch_patch_name_index;
DROP TYPE IF EXISTS pg_catalog.gs_sql_patch;
DROP TABLE IF EXISTS pg_catalog.gs_sql_patch;CREATE OR REPLACE FUNCTION pg_catalog.gs_session_memory_detail_tp(OUT sessid TEXT, OUT sesstype TEXT, OUT contextname TEXT, OUT level INT2, OUT parent TEXT, OUT totalsize INT8, OUT freesize INT8, OUT usedsize INT8)
RETURNS setof record
AS $$
DECLARE
enable_threadpool bool;
row_data record;
query_str text;
BEGIN
show enable_thread_pool into enable_threadpool;
IF enable_threadpool THEN
query_str := 'with SM AS
(SELECT
S.sessid AS sessid,
T.thrdtype AS sesstype,
S.contextname AS contextname,
S.level AS level,
S.parent AS parent,
S.totalsize AS totalsize,
S.freesize AS freesize,
S.usedsize AS usedsize
FROM
gs_session_memory_context S
LEFT JOIN
(SELECT DISTINCT thrdtype, tid
FROM gs_thread_memory_context) T
on S.threadid = T.tid
),
TM AS
(SELECT
S.sessid AS Ssessid,
T.thrdtype AS sesstype,
T.threadid AS Tsessid,
T.contextname AS contextname,
T.level AS level,
T.parent AS parent,
T.totalsize AS totalsize,
T.freesize AS freesize,
T.usedsize AS usedsize
FROM
gs_thread_memory_context T
LEFT JOIN
(SELECT DISTINCT sessid, threadid
FROM gs_session_memory_context) S
ON T.tid = S.threadid
)
SELECT * from SM
UNION
SELECT
Ssessid AS sessid, sesstype, contextname, level, parent, totalsize, freesize, usedsize
FROM TM WHERE Ssessid IS NOT NULL
UNION
SELECT
Tsessid AS sessid, sesstype, contextname, level, parent, totalsize, freesize, usedsize
FROM TM WHERE Ssessid IS NULL;';
FOR row_data IN EXECUTE(query_str) LOOP
sessid = row_data.sessid;
sesstype = row_data.sesstype;
contextname = row_data.contextname;
level = row_data.level;
parent = row_data.parent;
totalsize = row_data.totalsize;
freesize = row_data.freesize;
usedsize = row_data.usedsize;
return next;
END LOOP;
ELSE
query_str := 'SELECT
T.threadid AS sessid,
T.thrdtype AS sesstype,
T.contextname AS contextname,
T.level AS level,
T.parent AS parent,
T.totalsize AS totalsize,
T.freesize AS freesize,
T.usedsize AS usedsize
FROM pg_catalog.pv_thread_memory_detail() T;';
FOR row_data IN EXECUTE(query_str) LOOP
sessid = row_data.sessid;
sesstype = row_data.sesstype;
contextname = row_data.contextname;
level = row_data.level;
parent = row_data.parent;
totalsize = row_data.totalsize;
freesize = row_data.freesize;
usedsize = row_data.usedsize;
return next;
END LOOP;
END IF;
RETURN;
END; $$
LANGUAGE plpgsql NOT FENCED;DROP FUNCTION IF EXISTS pg_catalog.gs_get_history_memory_detail() cascade;DROP FUNCTION IF EXISTS gs_is_dw_io_blocked() CASCADE;
DROP FUNCTION IF EXISTS gs_block_dw_io(integer, text) CASCADE;

View File

@ -1,138 +0,0 @@
DROP FUNCTION IF EXISTS pg_catalog.get_client_info;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7732;
CREATE OR REPLACE FUNCTION pg_catalog.get_client_info()
RETURNS text
LANGUAGE internal
STABLE STRICT NOT FENCED SHIPPABLE
AS $function$get_client_info$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
DO $$
DECLARE
ans boolean;
BEGIN
select case when count(*)=1 then true else false end as ans from (select * from pg_extension where extname = 'security_plugin' limit 1) into ans;
if ans = true then
drop extension if exists security_plugin cascade;
create extension security_plugin;
end if;
END$$;
------------------------------------------------------------------------------------------------------------------------------------
DO $DO$
DECLARE
ans boolean;
user_name text;
grant_query text;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
-----------------------------------------------------------------------------
-- DROP: pg_catalog.pg_replication_slots
DROP VIEW IF EXISTS pg_catalog.pg_replication_slots CASCADE;
-- DROP: pg_get_replication_slots()
DROP FUNCTION IF EXISTS pg_catalog.pg_get_replication_slots(OUT slot_name text, OUT plugin text, OUT slot_type text, OUT datoid oid, OUT active boolean, OUT xmin xid, OUT catalog_xmin xid, OUT restart_lsn text, OUT dummy_standby boolean, OUT confirmed_flush text) CASCADE;
-- DROP: gs_get_parallel_decode_status()
DROP FUNCTION IF EXISTS pg_catalog.gs_get_parallel_decode_status(OUT slot_name text, OUT parallel_decode_num int4, OUT read_change_queue_length text, OUT decode_change_queue_length text, OUT reader_lsn text, OUT working_txn_cnt int8, OUT working_txn_memory int8) CASCADE;
-----------------------------------------------------------------------------
-- CREATE: pg_get_replication_slots
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3784;
CREATE OR REPLACE FUNCTION pg_catalog.pg_get_replication_slots(OUT slot_name text, OUT plugin text, OUT slot_type text, OUT datoid oid, OUT active boolean, OUT xmin xid, OUT catalog_xmin xid, OUT restart_lsn text, OUT dummy_standby boolean) RETURNS setof record LANGUAGE INTERNAL STABLE NOT FENCED as 'pg_get_replication_slots';
COMMENT ON FUNCTION pg_catalog.pg_get_replication_slots() is 'information about replication slots currently in use';
-- CREATE: pg_catalog.pg_replication_slots
CREATE VIEW pg_catalog.pg_replication_slots AS
SELECT
L.slot_name,
L.plugin,
L.slot_type,
L.datoid,
D.datname AS database,
L.active,
L.xmin,
L.catalog_xmin,
L.restart_lsn,
L.dummy_standby
FROM pg_catalog.pg_get_replication_slots() AS L
LEFT JOIN pg_database D ON (L.datoid = D.oid);
-- CREATE: dbe_perf.replication_slots
IF ans = true THEN
CREATE OR REPLACE VIEW dbe_perf.replication_slots AS
SELECT
L.slot_name,
L.plugin,
L.slot_type,
L.datoid,
D.datname AS database,
L.active,
L.xmin,
L.catalog_xmin,
L.restart_lsn,
L.dummy_standby
FROM pg_get_replication_slots() AS L
LEFT JOIN pg_database D ON (L.datoid = D.oid);
END IF;
-- CREATE: gs_get_parallel_decode_status
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9377;
CREATE OR REPLACE FUNCTION pg_catalog.gs_get_parallel_decode_status(OUT slot_name text, OUT parallel_decode_num int4, OUT read_change_queue_length text, OUT decode_change_queue_length text)
RETURNS SETOF RECORD
LANGUAGE internal
AS $function$gs_get_parallel_decode_status$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
-----------------------------------------------------------------------------
-- privileges
SELECT SESSION_USER INTO user_name;
-- dbe_perf
IF ans = true THEN
grant_query := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON dbe_perf.replication_slots TO ' || quote_ident(user_name) || ';';
EXECUTE IMMEDIATE grant_query;
GRANT SELECT ON dbe_perf.replication_slots TO PUBLIC;
END IF;
-- pg_catalog
GRANT SELECT ON pg_catalog.pg_replication_slots TO PUBLIC;
-----------------------------------------------------------------------------
END$DO$;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta_dump_zone(int4, boolean) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta_dump_spaces(int4, boolean) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta_dump_slot(int4, boolean) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_dump_record(bigint) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_dump_xid(xid) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_dump_parsepage_mv(text, bigint, text, boolean) CASCADE;DO
$do$
DECLARE
query_str text;
ans boolean;
old_version boolean;
has_version_proc boolean;
BEGIN
FOR ans in select case when count(*) = 1 then true else false end as ans from (select 1 from pg_catalog.pg_extension where extname = 'hdfs_fdw' limit 1) LOOP
IF ans = false then
select case when count(*)=1 then true else false end as has_version_proc from (select * from pg_proc where proname = 'working_version_num' limit 1) into has_version_proc;
IF has_version_proc = true then
select working_version_num < 92626 as old_version from working_version_num() into old_version;
IF old_version = true then
raise info 'Processing hdfs extension';
query_str := 'CREATE EXTENSION IF NOT EXISTS hdfs_fdw;';
EXECUTE query_str;
END IF;
END IF;
END IF;
END LOOP;
END
$do$;

View File

@ -8,3 +8,25 @@ DROP FUNCTION IF EXISTS pg_catalog.gs_read_block_from_remote(oid, oid, oid, smal
DROP FUNCTION IF EXISTS pg_catalog.gs_read_file_from_remote(oid, oid, oid, smallint, smallint, integer, integer, integer, xid, integer, OUT bytea, OUT xid) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_read_file_size_from_remote(oid, oid, oid, smallint, smallint, integer, xid, integer, OUT bigint) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.pg_read_binary_file_blocks(IN input text, IN blocknum bigint, IN blockcount bigint, OUT path text, OUT blocknum integer, OUT len integer, OUT algorithm integer, OUT chunk_size integer, OUT data bytea) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5843;
CREATE OR REPLACE FUNCTION pg_catalog.gs_read_block_from_remote
( int4,
int4,
int4,
int2,
int2,
int4,
xid,
int4,
xid,
boolean,
int4)
RETURNS SETOF record LANGUAGE INTERNAL ROWS 1 STRICT as 'gs_read_block_from_remote_compress';
DROP FUNCTION IF EXISTS pg_catalog.pg_read_binary_file_blocks(IN input text, IN blocknum bigint, IN blockcount bigint, OUT path text, OUT blocknum integer, OUT len integer, OUT algorithm integer, OUT chunk_size integer, OUT data bytea) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 8413;
CREATE FUNCTION pg_catalog.pg_read_binary_file_blocks(IN inputpath text, IN startblocknum int8, IN count int8,
OUT path text,
OUT blocknum int4,
OUT len int4,
OUT data bytea)
AS 'pg_read_binary_file_blocks' LANGUAGE INTERNAL IMMUTABLE STRICT;

View File

@ -1,496 +0,0 @@
DROP FUNCTION IF EXISTS pg_catalog.get_client_info;
DROP FUNCTION IF EXISTS pg_catalog.pg_ls_tmpdir() CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.pg_ls_tmpdir(oid) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.pg_ls_waldir() CASCADE;
------------------------------------------------------------------------------------------------------------------------------------
DO $DO$
DECLARE
ans boolean;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
if ans = true then
-----------------------------------------------------------------------------------------------------------------------------------------------------
DROP VIEW IF EXISTS DBE_PERF.local_active_session cascade;
DROP FUNCTION IF EXISTS pg_catalog.get_local_active_session(OUT sampleid bigint, OUT sample_time timestamp with time zone, OUT need_flush_sample boolean, OUT databaseid oid, OUT thread_id bigint, OUT sessionid bigint, OUT start_time timestamp with time zone, OUT event text, OUT lwtid integer, OUT psessionid bigint, OUT tlevel integer, OUT smpid integer, OUT userid oid, OUT application_name text, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT query_id bigint, OUT unique_query_id bigint, OUT user_id oid, OUT cn_id integer, OUT unique_query text, OUT locktag text, OUT lockmode text, OUT block_sessionid bigint, OUT wait_status text, OUT global_sessionid text, OUT xact_start_time timestamp with time zone, OUT query_start_time timestamp with time zone, OUT state text) cascade;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5721;
CREATE OR REPLACE FUNCTION pg_catalog.get_local_active_session
(OUT sampleid bigint, OUT sample_time timestamp with time zone, OUT need_flush_sample boolean, OUT databaseid oid, OUT thread_id bigint, OUT sessionid bigint, OUT start_time timestamp with time zone, OUT event text, OUT lwtid integer, OUT psessionid bigint, OUT tlevel integer, OUT smpid integer, OUT userid oid, OUT application_name text, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT query_id bigint, OUT unique_query_id bigint, OUT user_id oid, OUT cn_id integer, OUT unique_query text, OUT locktag text, OUT lockmode text, OUT block_sessionid bigint, OUT wait_status text, OUT global_sessionid text)
RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'get_local_active_session';
CREATE OR REPLACE VIEW DBE_PERF.local_active_session AS
WITH RECURSIVE
las(sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid,
tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id,
user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, wait_status, global_sessionid)
AS (select t.* from get_local_active_session() as t),
tt(sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid,
tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id,
user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, wait_status, global_sessionid, final_block_sessionid, level, head)
AS(SELECT las.*, las.block_sessionid AS final_block_sessionid, 1 AS level, array_append('{}', las.sessionid) AS head FROM las
UNION ALL
SELECT tt.sampleid, tt.sample_time, tt.need_flush_sample, tt.databaseid, tt.thread_id, tt.sessionid, tt.start_time, tt.event, tt.lwtid, tt.psessionid,
tt.tlevel, tt.smpid, tt.userid, tt.application_name, tt.client_addr, tt.client_hostname, tt.client_port, tt.query_id, tt.unique_query_id,
tt.user_id, tt.cn_id, tt.unique_query, tt.locktag, tt.lockmode, tt.block_sessionid, tt.wait_status, tt.global_sessionid, las.block_sessionid AS final_block_sessionid, tt.level + 1 AS level, array_append(tt.head, las.sessionid) AS head
FROM tt INNER JOIN las ON tt.final_block_sessionid = las.sessionid
WHERE las.sampleid = tt.sampleid AND (las.block_sessionid IS NOT NULL OR las.block_sessionid != 0)
AND las.sessionid != all(head) AND las.sessionid != las.block_sessionid)
SELECT sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid,
tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id,
user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, final_block_sessionid, wait_status, global_sessionid FROM tt
WHERE level = (SELECT MAX(level) FROM tt t1 WHERE t1.sampleid = tt.sampleid AND t1.sessionid = tt.sessionid);
end if;
END$DO$;
DO $DO$
DECLARE
ans boolean;
user_name text;
query_str text;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
if ans = true then
SELECT SESSION_USER INTO user_name;
query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.local_active_session TO ' || quote_ident(user_name) || ';';
EXECUTE IMMEDIATE query_str;
GRANT SELECT ON TABLE DBE_PERF.local_active_session TO PUBLIC;
GRANT SELECT ON TABLE pg_catalog.gs_asp TO PUBLIC;
end if;
END$DO$;
DO $DO$
DECLARE
ans boolean;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
if ans = true then
DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp() cascade;
DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp() cascade;
CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp
(in start_timestamp timestamp with time zone,
in end_timestamp timestamp with time zone,
OUT node_name name,
OUT db_name name,
OUT schema_name name,
OUT origin_node integer,
OUT user_name name,
OUT application_name text,
OUT client_addr text,
OUT client_port integer,
OUT unique_query_id bigint,
OUT debug_query_id bigint,
OUT query text,
OUT start_time timestamp with time zone,
OUT finish_time timestamp with time zone,
OUT slow_sql_threshold bigint,
OUT transaction_id bigint,
OUT thread_id bigint,
OUT session_id bigint,
OUT n_soft_parse bigint,
OUT n_hard_parse bigint,
OUT query_plan text,
OUT n_returned_rows bigint,
OUT n_tuples_fetched bigint,
OUT n_tuples_returned bigint,
OUT n_tuples_inserted bigint,
OUT n_tuples_updated bigint,
OUT n_tuples_deleted bigint,
OUT n_blocks_fetched bigint,
OUT n_blocks_hit bigint,
OUT db_time bigint,
OUT cpu_time bigint,
OUT execution_time bigint,
OUT parse_time bigint,
OUT plan_time bigint,
OUT rewrite_time bigint,
OUT pl_execution_time bigint,
OUT pl_compilation_time bigint,
OUT data_io_time bigint,
OUT net_send_info text,
OUT net_recv_info text,
OUT net_stream_send_info text,
OUT net_stream_recv_info text,
OUT lock_count bigint,
OUT lock_time bigint,
OUT lock_wait_count bigint,
OUT lock_wait_time bigint,
OUT lock_max_count bigint,
OUT lwlock_count bigint,
OUT lwlock_wait_count bigint,
OUT lwlock_time bigint,
OUT lwlock_wait_time bigint,
OUT details bytea,
OUT is_slow_sql bool,
OUT trace_id text)
RETURNS setof record
AS $$
DECLARE
row_data pg_catalog.statement_history%rowtype;
row_name record;
query_str text;
-- node name
query_str_nodes text;
BEGIN
-- Get all node names(CN + master DN)
query_str_nodes := 'select * from dbe_perf.node_name';
FOR row_name IN EXECUTE(query_str_nodes) LOOP
query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || '''';
FOR row_data IN EXECUTE(query_str) LOOP
node_name := row_name.node_name;
db_name := row_data.db_name;
schema_name := row_data.schema_name;
origin_node := row_data.origin_node;
user_name := row_data.user_name;
application_name := row_data.application_name;
client_addr := row_data.client_addr;
client_port := row_data.client_port;
unique_query_id := row_data.unique_query_id;
debug_query_id := row_data.debug_query_id;
query := row_data.query;
start_time := row_data.start_time;
finish_time := row_data.finish_time;
slow_sql_threshold := row_data.slow_sql_threshold;
transaction_id := row_data.transaction_id;
thread_id := row_data.thread_id;
session_id := row_data.session_id;
n_soft_parse := row_data.n_soft_parse;
n_hard_parse := row_data.n_hard_parse;
query_plan := row_data.query_plan;
n_returned_rows := row_data.n_returned_rows;
n_tuples_fetched := row_data.n_tuples_fetched;
n_tuples_returned := row_data.n_tuples_returned;
n_tuples_inserted := row_data.n_tuples_inserted;
n_tuples_updated := row_data.n_tuples_updated;
n_tuples_deleted := row_data.n_tuples_deleted;
n_blocks_fetched := row_data.n_blocks_fetched;
n_blocks_hit := row_data.n_blocks_hit;
db_time := row_data.db_time;
cpu_time := row_data.cpu_time;
execution_time := row_data.execution_time;
parse_time := row_data.parse_time;
plan_time := row_data.plan_time;
rewrite_time := row_data.rewrite_time;
pl_execution_time := row_data.pl_execution_time;
pl_compilation_time := row_data.pl_compilation_time;
data_io_time := row_data.data_io_time;
net_send_info := row_data.net_send_info;
net_recv_info := row_data.net_recv_info;
net_stream_send_info := row_data.net_stream_send_info;
net_stream_recv_info := row_data.net_stream_recv_info;
lock_count := row_data.lock_count;
lock_time := row_data.lock_time;
lock_wait_count := row_data.lock_wait_count;
lock_wait_time := row_data.lock_wait_time;
lock_max_count := row_data.lock_max_count;
lwlock_count := row_data.lwlock_count;
lwlock_wait_count := row_data.lwlock_wait_count;
lwlock_time := row_data.lwlock_time;
lwlock_wait_time := row_data.lwlock_wait_time;
details := row_data.details;
is_slow_sql := row_data.is_slow_sql;
trace_id := row_data.trace_id;
return next;
END LOOP;
END LOOP;
return;
END; $$
LANGUAGE 'plpgsql' NOT FENCED;
CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp
(in start_timestamp timestamp with time zone,
in end_timestamp timestamp with time zone,
OUT node_name name,
OUT db_name name,
OUT schema_name name,
OUT origin_node integer,
OUT user_name name,
OUT application_name text,
OUT client_addr text,
OUT client_port integer,
OUT unique_query_id bigint,
OUT debug_query_id bigint,
OUT query text,
OUT start_time timestamp with time zone,
OUT finish_time timestamp with time zone,
OUT slow_sql_threshold bigint,
OUT transaction_id bigint,
OUT thread_id bigint,
OUT session_id bigint,
OUT n_soft_parse bigint,
OUT n_hard_parse bigint,
OUT query_plan text,
OUT n_returned_rows bigint,
OUT n_tuples_fetched bigint,
OUT n_tuples_returned bigint,
OUT n_tuples_inserted bigint,
OUT n_tuples_updated bigint,
OUT n_tuples_deleted bigint,
OUT n_blocks_fetched bigint,
OUT n_blocks_hit bigint,
OUT db_time bigint,
OUT cpu_time bigint,
OUT execution_time bigint,
OUT parse_time bigint,
OUT plan_time bigint,
OUT rewrite_time bigint,
OUT pl_execution_time bigint,
OUT pl_compilation_time bigint,
OUT data_io_time bigint,
OUT net_send_info text,
OUT net_recv_info text,
OUT net_stream_send_info text,
OUT net_stream_recv_info text,
OUT lock_count bigint,
OUT lock_time bigint,
OUT lock_wait_count bigint,
OUT lock_wait_time bigint,
OUT lock_max_count bigint,
OUT lwlock_count bigint,
OUT lwlock_wait_count bigint,
OUT lwlock_time bigint,
OUT lwlock_wait_time bigint,
OUT details bytea,
OUT is_slow_sql bool,
OUT trace_id text)
RETURNS setof record
AS $$
DECLARE
row_data pg_catalog.statement_history%rowtype;
row_name record;
query_str text;
-- node name
query_str_nodes text;
BEGIN
-- Get all node names(CN + master DN)
query_str_nodes := 'select * from dbe_perf.node_name';
FOR row_name IN EXECUTE(query_str_nodes) LOOP
query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true ';
FOR row_data IN EXECUTE(query_str) LOOP
node_name := row_name.node_name;
db_name := row_data.db_name;
schema_name := row_data.schema_name;
origin_node := row_data.origin_node;
user_name := row_data.user_name;
application_name := row_data.application_name;
client_addr := row_data.client_addr;
client_port := row_data.client_port;
unique_query_id := row_data.unique_query_id;
debug_query_id := row_data.debug_query_id;
query := row_data.query;
start_time := row_data.start_time;
finish_time := row_data.finish_time;
slow_sql_threshold := row_data.slow_sql_threshold;
transaction_id := row_data.transaction_id;
thread_id := row_data.thread_id;
session_id := row_data.session_id;
n_soft_parse := row_data.n_soft_parse;
n_hard_parse := row_data.n_hard_parse;
query_plan := row_data.query_plan;
n_returned_rows := row_data.n_returned_rows;
n_tuples_fetched := row_data.n_tuples_fetched;
n_tuples_returned := row_data.n_tuples_returned;
n_tuples_inserted := row_data.n_tuples_inserted;
n_tuples_updated := row_data.n_tuples_updated;
n_tuples_deleted := row_data.n_tuples_deleted;
n_blocks_fetched := row_data.n_blocks_fetched;
n_blocks_hit := row_data.n_blocks_hit;
db_time := row_data.db_time;
cpu_time := row_data.cpu_time;
execution_time := row_data.execution_time;
parse_time := row_data.parse_time;
plan_time := row_data.plan_time;
rewrite_time := row_data.rewrite_time;
pl_execution_time := row_data.pl_execution_time;
pl_compilation_time := row_data.pl_compilation_time;
data_io_time := row_data.data_io_time;
net_send_info := row_data.net_send_info;
net_recv_info := row_data.net_recv_info;
net_stream_send_info := row_data.net_stream_send_info;
net_stream_recv_info := row_data.net_stream_recv_info;
lock_count := row_data.lock_count;
lock_time := row_data.lock_time;
lock_wait_count := row_data.lock_wait_count;
lock_wait_time := row_data.lock_wait_time;
lock_max_count := row_data.lock_max_count;
lwlock_count := row_data.lwlock_count;
lwlock_wait_count := row_data.lwlock_wait_count;
lwlock_time := row_data.lwlock_time;
lwlock_wait_time := row_data.lwlock_wait_time;
details := row_data.details;
is_slow_sql := row_data.is_slow_sql;
trace_id := row_data.trace_id;
return next;
END LOOP;
END LOOP;
return;
END; $$
LANGUAGE 'plpgsql' NOT FENCED;
end if;
END$DO$;
DO $$
DECLARE
query_str text;
ans bool;
BEGIN
select case when count(*)=1 then true else false end as ans from
(select *from pg_class where relname='snapshot_sequence' and relnamespace = 4991) into ans;
if ans = true then
query_str := 'DROP SEQUENCE db4ai.snapshot_sequence;';
EXECUTE IMMEDIATE query_str;
end if;
END$$;DROP FUNCTION IF EXISTS pg_catalog.gs_stack() CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(INT8) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(pid bigint) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(OUT tid bigint, OUT lwtid bigint, OUT stack text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9997;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack(pid bigint)
RETURNS SETOF text
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 1 ROWS 5
AS $function$gs_stack$function$;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9998;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack(OUT tid bigint, OUT lwtid bigint, OUT stack text)
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 1 ROWS 5
AS $function$gs_stack$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;DROP VIEW IF EXISTS pg_catalog.gs_session_cpu_statistics cascade;
CREATE VIEW pg_catalog.gs_session_cpu_statistics AS
SELECT
S.datid AS datid,
S.usename,
S.pid,
S.query_start AS start_time,
T.min_cpu_time,
T.max_cpu_time,
T.total_cpu_time,
S.query,
S.node_group,
T.top_cpu_dn
FROM pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T
WHERE S.sessionid = T.threadid;
GRANT SELECT ON TABLE pg_catalog.gs_session_cpu_statistics TO PUBLIC;
do $$DECLARE ans boolean;
BEGIN
for ans in select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_sql_util' limit 1)
LOOP
if ans = true then
DROP FUNCTION IF EXISTS dbe_sql_util.create_hint_sql_patch(name, bigint, text, text, boolean);
DROP FUNCTION IF EXISTS dbe_sql_util.create_abort_sql_patch(name, bigint, text, boolean);
DROP FUNCTION IF EXISTS dbe_sql_util.enable_sql_patch(name);
DROP FUNCTION IF EXISTS dbe_sql_util.disable_sql_patch(name);
DROP FUNCTION IF EXISTS dbe_sql_util.show_sql_patch(name);
DROP FUNCTION IF EXISTS dbe_sql_util.drop_sql_patch(name);
end if;
exit;
END LOOP;
END$$;
DROP SCHEMA IF EXISTS dbe_sql_util cascade;
DROP INDEX IF EXISTS pg_catalog.gs_sql_patch_unique_sql_id_index;
DROP INDEX IF EXISTS pg_catalog.gs_sql_patch_patch_name_index;
DROP TYPE IF EXISTS pg_catalog.gs_sql_patch;
DROP TABLE IF EXISTS pg_catalog.gs_sql_patch;CREATE OR REPLACE FUNCTION pg_catalog.gs_session_memory_detail_tp(OUT sessid TEXT, OUT sesstype TEXT, OUT contextname TEXT, OUT level INT2, OUT parent TEXT, OUT totalsize INT8, OUT freesize INT8, OUT usedsize INT8)
RETURNS setof record
AS $$
DECLARE
enable_threadpool bool;
row_data record;
query_str text;
BEGIN
show enable_thread_pool into enable_threadpool;
IF enable_threadpool THEN
query_str := 'with SM AS
(SELECT
S.sessid AS sessid,
T.thrdtype AS sesstype,
S.contextname AS contextname,
S.level AS level,
S.parent AS parent,
S.totalsize AS totalsize,
S.freesize AS freesize,
S.usedsize AS usedsize
FROM
gs_session_memory_context S
LEFT JOIN
(SELECT DISTINCT thrdtype, tid
FROM gs_thread_memory_context) T
on S.threadid = T.tid
),
TM AS
(SELECT
S.sessid AS Ssessid,
T.thrdtype AS sesstype,
T.threadid AS Tsessid,
T.contextname AS contextname,
T.level AS level,
T.parent AS parent,
T.totalsize AS totalsize,
T.freesize AS freesize,
T.usedsize AS usedsize
FROM
gs_thread_memory_context T
LEFT JOIN
(SELECT DISTINCT sessid, threadid
FROM gs_session_memory_context) S
ON T.tid = S.threadid
)
SELECT * from SM
UNION
SELECT
Ssessid AS sessid, sesstype, contextname, level, parent, totalsize, freesize, usedsize
FROM TM WHERE Ssessid IS NOT NULL
UNION
SELECT
Tsessid AS sessid, sesstype, contextname, level, parent, totalsize, freesize, usedsize
FROM TM WHERE Ssessid IS NULL;';
FOR row_data IN EXECUTE(query_str) LOOP
sessid = row_data.sessid;
sesstype = row_data.sesstype;
contextname = row_data.contextname;
level = row_data.level;
parent = row_data.parent;
totalsize = row_data.totalsize;
freesize = row_data.freesize;
usedsize = row_data.usedsize;
return next;
END LOOP;
ELSE
query_str := 'SELECT
T.threadid AS sessid,
T.thrdtype AS sesstype,
T.contextname AS contextname,
T.level AS level,
T.parent AS parent,
T.totalsize AS totalsize,
T.freesize AS freesize,
T.usedsize AS usedsize
FROM pg_catalog.pv_thread_memory_detail() T;';
FOR row_data IN EXECUTE(query_str) LOOP
sessid = row_data.sessid;
sesstype = row_data.sesstype;
contextname = row_data.contextname;
level = row_data.level;
parent = row_data.parent;
totalsize = row_data.totalsize;
freesize = row_data.freesize;
usedsize = row_data.usedsize;
return next;
END LOOP;
END IF;
RETURN;
END; $$
LANGUAGE plpgsql NOT FENCED;DROP FUNCTION IF EXISTS pg_catalog.gs_get_history_memory_detail() cascade;DROP FUNCTION IF EXISTS gs_is_dw_io_blocked() CASCADE;
DROP FUNCTION IF EXISTS gs_block_dw_io(integer, text) CASCADE;

View File

@ -1,138 +0,0 @@
DROP FUNCTION IF EXISTS pg_catalog.get_client_info;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7732;
CREATE OR REPLACE FUNCTION pg_catalog.get_client_info()
RETURNS text
LANGUAGE internal
STABLE STRICT NOT FENCED SHIPPABLE
AS $function$get_client_info$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
DO $$
DECLARE
ans boolean;
BEGIN
select case when count(*)=1 then true else false end as ans from (select * from pg_extension where extname = 'security_plugin' limit 1) into ans;
if ans = true then
drop extension if exists security_plugin cascade;
create extension security_plugin;
end if;
END$$;
------------------------------------------------------------------------------------------------------------------------------------
DO $DO$
DECLARE
ans boolean;
user_name text;
grant_query text;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
-----------------------------------------------------------------------------
-- DROP: pg_catalog.pg_replication_slots
DROP VIEW IF EXISTS pg_catalog.pg_replication_slots CASCADE;
-- DROP: pg_get_replication_slots()
DROP FUNCTION IF EXISTS pg_catalog.pg_get_replication_slots(OUT slot_name text, OUT plugin text, OUT slot_type text, OUT datoid oid, OUT active boolean, OUT xmin xid, OUT catalog_xmin xid, OUT restart_lsn text, OUT dummy_standby boolean, OUT confirmed_flush text) CASCADE;
-- DROP: gs_get_parallel_decode_status()
DROP FUNCTION IF EXISTS pg_catalog.gs_get_parallel_decode_status(OUT slot_name text, OUT parallel_decode_num int4, OUT read_change_queue_length text, OUT decode_change_queue_length text, OUT reader_lsn text, OUT working_txn_cnt int8, OUT working_txn_memory int8) CASCADE;
-----------------------------------------------------------------------------
-- CREATE: pg_get_replication_slots
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3784;
CREATE OR REPLACE FUNCTION pg_catalog.pg_get_replication_slots(OUT slot_name text, OUT plugin text, OUT slot_type text, OUT datoid oid, OUT active boolean, OUT xmin xid, OUT catalog_xmin xid, OUT restart_lsn text, OUT dummy_standby boolean) RETURNS setof record LANGUAGE INTERNAL STABLE NOT FENCED as 'pg_get_replication_slots';
COMMENT ON FUNCTION pg_catalog.pg_get_replication_slots() is 'information about replication slots currently in use';
-- CREATE: pg_catalog.pg_replication_slots
CREATE VIEW pg_catalog.pg_replication_slots AS
SELECT
L.slot_name,
L.plugin,
L.slot_type,
L.datoid,
D.datname AS database,
L.active,
L.xmin,
L.catalog_xmin,
L.restart_lsn,
L.dummy_standby
FROM pg_catalog.pg_get_replication_slots() AS L
LEFT JOIN pg_database D ON (L.datoid = D.oid);
-- CREATE: dbe_perf.replication_slots
IF ans = true THEN
CREATE OR REPLACE VIEW dbe_perf.replication_slots AS
SELECT
L.slot_name,
L.plugin,
L.slot_type,
L.datoid,
D.datname AS database,
L.active,
L.xmin,
L.catalog_xmin,
L.restart_lsn,
L.dummy_standby
FROM pg_get_replication_slots() AS L
LEFT JOIN pg_database D ON (L.datoid = D.oid);
END IF;
-- CREATE: gs_get_parallel_decode_status
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9377;
CREATE OR REPLACE FUNCTION pg_catalog.gs_get_parallel_decode_status(OUT slot_name text, OUT parallel_decode_num int4, OUT read_change_queue_length text, OUT decode_change_queue_length text)
RETURNS SETOF RECORD
LANGUAGE internal
AS $function$gs_get_parallel_decode_status$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
-----------------------------------------------------------------------------
-- privileges
SELECT SESSION_USER INTO user_name;
-- dbe_perf
IF ans = true THEN
grant_query := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON dbe_perf.replication_slots TO ' || quote_ident(user_name) || ';';
EXECUTE IMMEDIATE grant_query;
GRANT SELECT ON dbe_perf.replication_slots TO PUBLIC;
END IF;
-- pg_catalog
GRANT SELECT ON pg_catalog.pg_replication_slots TO PUBLIC;
-----------------------------------------------------------------------------
END$DO$;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta_dump_zone(int4, boolean) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta_dump_spaces(int4, boolean) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta_dump_slot(int4, boolean) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_dump_record(bigint) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_dump_xid(xid) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_dump_parsepage_mv(text, bigint, text, boolean) CASCADE;DO
$do$
DECLARE
query_str text;
ans boolean;
old_version boolean;
has_version_proc boolean;
BEGIN
FOR ans in select case when count(*) = 1 then true else false end as ans from (select 1 from pg_catalog.pg_extension where extname = 'hdfs_fdw' limit 1) LOOP
IF ans = false then
select case when count(*)=1 then true else false end as has_version_proc from (select * from pg_proc where proname = 'working_version_num' limit 1) into has_version_proc;
IF has_version_proc = true then
select working_version_num < 92626 as old_version from working_version_num() into old_version;
IF old_version = true then
raise info 'Processing hdfs extension';
query_str := 'CREATE EXTENSION IF NOT EXISTS hdfs_fdw;';
EXECUTE query_str;
END IF;
END IF;
END IF;
END LOOP;
END
$do$;

View File

@ -8,3 +8,25 @@ DROP FUNCTION IF EXISTS pg_catalog.gs_read_block_from_remote(oid, oid, oid, smal
DROP FUNCTION IF EXISTS pg_catalog.gs_read_file_from_remote(oid, oid, oid, smallint, smallint, integer, integer, integer, xid, integer, OUT bytea, OUT xid) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_read_file_size_from_remote(oid, oid, oid, smallint, smallint, integer, xid, integer, OUT bigint) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.pg_read_binary_file_blocks(IN input text, IN blocknum bigint, IN blockcount bigint, OUT path text, OUT blocknum integer, OUT len integer, OUT algorithm integer, OUT chunk_size integer, OUT data bytea) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5843;
CREATE OR REPLACE FUNCTION pg_catalog.gs_read_block_from_remote
( int4,
int4,
int4,
int2,
int2,
int4,
xid,
int4,
xid,
boolean,
int4)
RETURNS SETOF record LANGUAGE INTERNAL ROWS 1 STRICT as 'gs_read_block_from_remote_compress';
DROP FUNCTION IF EXISTS pg_catalog.pg_read_binary_file_blocks(IN input text, IN blocknum bigint, IN blockcount bigint, OUT path text, OUT blocknum integer, OUT len integer, OUT algorithm integer, OUT chunk_size integer, OUT data bytea) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 8413;
CREATE FUNCTION pg_catalog.pg_read_binary_file_blocks(IN inputpath text, IN startblocknum int8, IN count int8,
OUT path text,
OUT blocknum int4,
OUT len int4,
OUT data bytea)
AS 'pg_read_binary_file_blocks' LANGUAGE INTERNAL IMMUTABLE STRICT;

View File

@ -1,719 +0,0 @@
DROP FUNCTION IF EXISTS pg_catalog.get_client_info;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7732;
CREATE OR REPLACE FUNCTION pg_catalog.get_client_info()
RETURNS text
LANGUAGE internal
STABLE STRICT NOT FENCED SHIPPABLE
AS $function$get_client_info$function$;
comment on function PG_CATALOG.get_client_info() is 'read current client';
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
DROP FUNCTION IF EXISTS pg_catalog.get_client_info;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7732;
CREATE OR REPLACE FUNCTION pg_catalog.get_client_info(OUT sid bigint, OUT client_info text)
RETURNS SETOF record
LANGUAGE internal
STABLE NOT FENCED SHIPPABLE ROWS 100
AS $function$get_client_info$function$;
comment on function PG_CATALOG.get_client_info() is 'read current client';
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
-- pg_ls_tmpdir_noargs
DROP FUNCTION IF EXISTS pg_catalog.pg_ls_tmpdir() CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3354;
CREATE OR REPLACE FUNCTION pg_catalog.pg_ls_tmpdir()
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 10 ROWS 20
AS $function$pg_ls_tmpdir_noargs$function$;
comment on function PG_CATALOG.pg_ls_tmpdir() is 'list of temporary files in the pg_default tablespace\''s pgsql_tmp directory';
-- pg_ls_tmpdir_1arg
DROP FUNCTION IF EXISTS pg_catalog.pg_ls_tmpdir(oid) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3355;
CREATE OR REPLACE FUNCTION pg_catalog.pg_ls_tmpdir(oid)
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 10 ROWS 20
AS $function$pg_ls_tmpdir_1arg$function$;
comment on function PG_CATALOG.pg_ls_tmpdir(oid) is 'list of temporary files in the specified tablespace\''s pgsql_tmp directory';
-- pg_ls_waldir
DROP FUNCTION IF EXISTS pg_catalog.pg_ls_waldir() CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3356;
CREATE OR REPLACE FUNCTION pg_catalog.pg_ls_waldir()
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 10 ROWS 20
AS $function$pg_ls_waldir$function$;
comment on function PG_CATALOG.pg_ls_waldir() is 'list of files in the WAL directory';
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
------------------------------------------------------------------------------------------------------------------------------------
DO $DO$
DECLARE
ans boolean;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
if ans = true then
-----------------------------------------------------------------------------------------------------------------------------------------------------
DROP VIEW IF EXISTS DBE_PERF.local_active_session cascade;
DROP FUNCTION IF EXISTS pg_catalog.get_local_active_session(OUT sampleid bigint, OUT sample_time timestamp with time zone, OUT need_flush_sample boolean, OUT databaseid oid, OUT thread_id bigint, OUT sessionid bigint, OUT start_time timestamp with time zone, OUT event text, OUT lwtid integer, OUT psessionid bigint, OUT tlevel integer, OUT smpid integer, OUT userid oid, OUT application_name text, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT query_id bigint, OUT unique_query_id bigint, OUT user_id oid, OUT cn_id integer, OUT unique_query text, OUT locktag text, OUT lockmode text, OUT block_sessionid bigint, OUT wait_status text, OUT global_sessionid text) cascade;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5721;
CREATE OR REPLACE FUNCTION pg_catalog.get_local_active_session
(OUT sampleid bigint, OUT sample_time timestamp with time zone, OUT need_flush_sample boolean, OUT databaseid oid, OUT thread_id bigint, OUT sessionid bigint, OUT start_time timestamp with time zone, OUT event text, OUT lwtid integer, OUT psessionid bigint, OUT tlevel integer, OUT smpid integer, OUT userid oid, OUT application_name text, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT query_id bigint, OUT unique_query_id bigint, OUT user_id oid, OUT cn_id integer, OUT unique_query text, OUT locktag text, OUT lockmode text, OUT block_sessionid bigint, OUT wait_status text, OUT global_sessionid text, OUT xact_start_time timestamp with time zone, OUT query_start_time timestamp with time zone, OUT state text)
RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'get_local_active_session';
CREATE OR REPLACE VIEW DBE_PERF.local_active_session AS
WITH RECURSIVE
las(sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid,
tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id,
user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, wait_status, global_sessionid, xact_start_time, query_start_time, state)
AS (select t.* from get_local_active_session() as t),
tt(sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid,
tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id,
user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, wait_status, global_sessionid, xact_start_time, query_start_time, state, final_block_sessionid, level, head)
AS(SELECT las.*, las.block_sessionid AS final_block_sessionid, 1 AS level, array_append('{}', las.sessionid) AS head FROM las
UNION ALL
SELECT tt.sampleid, tt.sample_time, tt.need_flush_sample, tt.databaseid, tt.thread_id, tt.sessionid, tt.start_time, tt.event, tt.lwtid, tt.psessionid,
tt.tlevel, tt.smpid, tt.userid, tt.application_name, tt.client_addr, tt.client_hostname, tt.client_port, tt.query_id, tt.unique_query_id,
tt.user_id, tt.cn_id, tt.unique_query, tt.locktag, tt.lockmode, tt.block_sessionid, tt.wait_status, tt.global_sessionid, tt.xact_start_time, tt.query_start_time, tt.state, las.block_sessionid AS final_block_sessionid, tt.level + 1 AS level, array_append(tt.head, las.sessionid) AS head
FROM tt INNER JOIN las ON tt.final_block_sessionid = las.sessionid
WHERE las.sampleid = tt.sampleid AND (las.block_sessionid IS NOT NULL OR las.block_sessionid != 0)
AND las.sessionid != all(head) AND las.sessionid != las.block_sessionid)
SELECT sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid,
tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id,
user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, final_block_sessionid, wait_status, global_sessionid, xact_start_time, query_start_time, state FROM tt
WHERE level = (SELECT MAX(level) FROM tt t1 WHERE t1.sampleid = tt.sampleid AND t1.sessionid = tt.sessionid);
end if;
END$DO$;
DO $DO$
DECLARE
ans boolean;
user_name text;
query_str text;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
if ans = true then
SELECT SESSION_USER INTO user_name;
query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.local_active_session TO ' || quote_ident(user_name) || ';';
EXECUTE IMMEDIATE query_str;
GRANT SELECT ON TABLE DBE_PERF.local_active_session TO PUBLIC;
GRANT SELECT ON TABLE pg_catalog.gs_asp TO PUBLIC;
end if;
END$DO$;
DROP EXTENSION IF EXISTS security_plugin CASCADE;
CREATE EXTENSION IF NOT EXISTS security_plugin;
DO $DO$
DECLARE
ans boolean;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
if ans = true then
DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp() cascade;
DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp() cascade;
CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp
(in start_timestamp timestamp with time zone,
in end_timestamp timestamp with time zone,
OUT node_name name,
OUT db_name name,
OUT schema_name name,
OUT origin_node integer,
OUT user_name name,
OUT application_name text,
OUT client_addr text,
OUT client_port integer,
OUT unique_query_id bigint,
OUT debug_query_id bigint,
OUT query text,
OUT start_time timestamp with time zone,
OUT finish_time timestamp with time zone,
OUT slow_sql_threshold bigint,
OUT transaction_id bigint,
OUT thread_id bigint,
OUT session_id bigint,
OUT n_soft_parse bigint,
OUT n_hard_parse bigint,
OUT query_plan text,
OUT n_returned_rows bigint,
OUT n_tuples_fetched bigint,
OUT n_tuples_returned bigint,
OUT n_tuples_inserted bigint,
OUT n_tuples_updated bigint,
OUT n_tuples_deleted bigint,
OUT n_blocks_fetched bigint,
OUT n_blocks_hit bigint,
OUT db_time bigint,
OUT cpu_time bigint,
OUT execution_time bigint,
OUT parse_time bigint,
OUT plan_time bigint,
OUT rewrite_time bigint,
OUT pl_execution_time bigint,
OUT pl_compilation_time bigint,
OUT data_io_time bigint,
OUT net_send_info text,
OUT net_recv_info text,
OUT net_stream_send_info text,
OUT net_stream_recv_info text,
OUT lock_count bigint,
OUT lock_time bigint,
OUT lock_wait_count bigint,
OUT lock_wait_time bigint,
OUT lock_max_count bigint,
OUT lwlock_count bigint,
OUT lwlock_wait_count bigint,
OUT lwlock_time bigint,
OUT lwlock_wait_time bigint,
OUT details bytea,
OUT is_slow_sql bool,
OUT trace_id text)
RETURNS setof record
AS $$
DECLARE
row_data pg_catalog.statement_history%rowtype;
row_name record;
query_str text;
-- node name
query_str_nodes text;
BEGIN
-- Get all node names(CN + master DN)
query_str_nodes := 'select * from dbe_perf.node_name';
FOR row_name IN EXECUTE(query_str_nodes) LOOP
query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || '''';
FOR row_data IN EXECUTE(query_str) LOOP
node_name := row_name.node_name;
db_name := row_data.db_name;
schema_name := row_data.schema_name;
origin_node := row_data.origin_node;
user_name := row_data.user_name;
application_name := row_data.application_name;
client_addr := row_data.client_addr;
client_port := row_data.client_port;
unique_query_id := row_data.unique_query_id;
debug_query_id := row_data.debug_query_id;
query := row_data.query;
start_time := row_data.start_time;
finish_time := row_data.finish_time;
slow_sql_threshold := row_data.slow_sql_threshold;
transaction_id := row_data.transaction_id;
thread_id := row_data.thread_id;
session_id := row_data.session_id;
n_soft_parse := row_data.n_soft_parse;
n_hard_parse := row_data.n_hard_parse;
query_plan := row_data.query_plan;
n_returned_rows := row_data.n_returned_rows;
n_tuples_fetched := row_data.n_tuples_fetched;
n_tuples_returned := row_data.n_tuples_returned;
n_tuples_inserted := row_data.n_tuples_inserted;
n_tuples_updated := row_data.n_tuples_updated;
n_tuples_deleted := row_data.n_tuples_deleted;
n_blocks_fetched := row_data.n_blocks_fetched;
n_blocks_hit := row_data.n_blocks_hit;
db_time := row_data.db_time;
cpu_time := row_data.cpu_time;
execution_time := row_data.execution_time;
parse_time := row_data.parse_time;
plan_time := row_data.plan_time;
rewrite_time := row_data.rewrite_time;
pl_execution_time := row_data.pl_execution_time;
pl_compilation_time := row_data.pl_compilation_time;
data_io_time := row_data.data_io_time;
net_send_info := row_data.net_send_info;
net_recv_info := row_data.net_recv_info;
net_stream_send_info := row_data.net_stream_send_info;
net_stream_recv_info := row_data.net_stream_recv_info;
lock_count := row_data.lock_count;
lock_time := row_data.lock_time;
lock_wait_count := row_data.lock_wait_count;
lock_wait_time := row_data.lock_wait_time;
lock_max_count := row_data.lock_max_count;
lwlock_count := row_data.lwlock_count;
lwlock_wait_count := row_data.lwlock_wait_count;
lwlock_time := row_data.lwlock_time;
lwlock_wait_time := row_data.lwlock_wait_time;
details := row_data.details;
is_slow_sql := row_data.is_slow_sql;
trace_id := row_data.trace_id;
return next;
END LOOP;
END LOOP;
return;
END; $$
LANGUAGE 'plpgsql' NOT FENCED;
CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp
(in start_timestamp timestamp with time zone,
in end_timestamp timestamp with time zone,
OUT node_name name,
OUT db_name name,
OUT schema_name name,
OUT origin_node integer,
OUT user_name name,
OUT application_name text,
OUT client_addr text,
OUT client_port integer,
OUT unique_query_id bigint,
OUT debug_query_id bigint,
OUT query text,
OUT start_time timestamp with time zone,
OUT finish_time timestamp with time zone,
OUT slow_sql_threshold bigint,
OUT transaction_id bigint,
OUT thread_id bigint,
OUT session_id bigint,
OUT n_soft_parse bigint,
OUT n_hard_parse bigint,
OUT query_plan text,
OUT n_returned_rows bigint,
OUT n_tuples_fetched bigint,
OUT n_tuples_returned bigint,
OUT n_tuples_inserted bigint,
OUT n_tuples_updated bigint,
OUT n_tuples_deleted bigint,
OUT n_blocks_fetched bigint,
OUT n_blocks_hit bigint,
OUT db_time bigint,
OUT cpu_time bigint,
OUT execution_time bigint,
OUT parse_time bigint,
OUT plan_time bigint,
OUT rewrite_time bigint,
OUT pl_execution_time bigint,
OUT pl_compilation_time bigint,
OUT data_io_time bigint,
OUT net_send_info text,
OUT net_recv_info text,
OUT net_stream_send_info text,
OUT net_stream_recv_info text,
OUT lock_count bigint,
OUT lock_time bigint,
OUT lock_wait_count bigint,
OUT lock_wait_time bigint,
OUT lock_max_count bigint,
OUT lwlock_count bigint,
OUT lwlock_wait_count bigint,
OUT lwlock_time bigint,
OUT lwlock_wait_time bigint,
OUT details bytea,
OUT is_slow_sql bool,
OUT trace_id text)
RETURNS setof record
AS $$
DECLARE
row_data pg_catalog.statement_history%rowtype;
row_name record;
query_str text;
-- node name
query_str_nodes text;
BEGIN
-- Get all node names(CN + master DN)
query_str_nodes := 'select * from dbe_perf.node_name';
FOR row_name IN EXECUTE(query_str_nodes) LOOP
query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true ';
FOR row_data IN EXECUTE(query_str) LOOP
node_name := row_name.node_name;
db_name := row_data.db_name;
schema_name := row_data.schema_name;
origin_node := row_data.origin_node;
user_name := row_data.user_name;
application_name := row_data.application_name;
client_addr := row_data.client_addr;
client_port := row_data.client_port;
unique_query_id := row_data.unique_query_id;
debug_query_id := row_data.debug_query_id;
query := row_data.query;
start_time := row_data.start_time;
finish_time := row_data.finish_time;
slow_sql_threshold := row_data.slow_sql_threshold;
transaction_id := row_data.transaction_id;
thread_id := row_data.thread_id;
session_id := row_data.session_id;
n_soft_parse := row_data.n_soft_parse;
n_hard_parse := row_data.n_hard_parse;
query_plan := row_data.query_plan;
n_returned_rows := row_data.n_returned_rows;
n_tuples_fetched := row_data.n_tuples_fetched;
n_tuples_returned := row_data.n_tuples_returned;
n_tuples_inserted := row_data.n_tuples_inserted;
n_tuples_updated := row_data.n_tuples_updated;
n_tuples_deleted := row_data.n_tuples_deleted;
n_blocks_fetched := row_data.n_blocks_fetched;
n_blocks_hit := row_data.n_blocks_hit;
db_time := row_data.db_time;
cpu_time := row_data.cpu_time;
execution_time := row_data.execution_time;
parse_time := row_data.parse_time;
plan_time := row_data.plan_time;
rewrite_time := row_data.rewrite_time;
pl_execution_time := row_data.pl_execution_time;
pl_compilation_time := row_data.pl_compilation_time;
data_io_time := row_data.data_io_time;
net_send_info := row_data.net_send_info;
net_recv_info := row_data.net_recv_info;
net_stream_send_info := row_data.net_stream_send_info;
net_stream_recv_info := row_data.net_stream_recv_info;
lock_count := row_data.lock_count;
lock_time := row_data.lock_time;
lock_wait_count := row_data.lock_wait_count;
lock_wait_time := row_data.lock_wait_time;
lock_max_count := row_data.lock_max_count;
lwlock_count := row_data.lwlock_count;
lwlock_wait_count := row_data.lwlock_wait_count;
lwlock_time := row_data.lwlock_time;
lwlock_wait_time := row_data.lwlock_wait_time;
details := row_data.details;
is_slow_sql := row_data.is_slow_sql;
trace_id := row_data.trace_id;
return next;
END LOOP;
END LOOP;
return;
END; $$
LANGUAGE 'plpgsql' NOT FENCED;
end if;
END$DO$;
DO $$
DECLARE
query_str text;
ans bool;
BEGIN
select case when count(*)=1 then true else false end as ans from (select *from pg_class where relname='snapshot_sequence') into ans;
if ans = false then
query_str := 'CREATE SEQUENCE db4ai.snapshot_sequence;';
EXECUTE IMMEDIATE query_str;
end if;
update pg_class set relacl = null where relname = 'snapshot_sequence' and relnamespace = 4991;
query_str := 'GRANT UPDATE ON db4ai.snapshot_sequence TO PUBLIC;';
EXECUTE IMMEDIATE query_str;
END$$;-- gs_stack_int8
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(INT8) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9997;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack(INT8)
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 1 ROWS 5
AS $function$gs_stack_int8$function$;
-- gs_stack_noargs
DROP FUNCTION IF EXISTS pg_catalog.gs_stack() CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9998;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack()
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 1 ROWS 5
AS $function$gs_stack_noargs$function$;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(pid bigint) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(OUT tid bigint, OUT lwtid bigint, OUT stack text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9997;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack(pid bigint)
RETURNS SETOF text
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 1 ROWS 5
AS $function$gs_stack$function$;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9998;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack(OUT tid bigint, OUT lwtid bigint, OUT stack text)
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 1 ROWS 5
AS $function$gs_stack$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
------------------------------------------------------------------------------------------------------------------------------------
DO $DO$
DECLARE
ans boolean;
user_name text;
grant_query text;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
-----------------------------------------------------------------------------
-- DROP: pg_catalog.pg_replication_slots
DROP VIEW IF EXISTS pg_catalog.pg_replication_slots CASCADE;
-- DROP: pg_get_replication_slot()
DROP FUNCTION IF EXISTS pg_catalog.pg_get_replication_slots(OUT slot_name text, OUT plugin text, OUT slot_type text, OUT datoid oid, OUT active boolean, OUT xmin xid, OUT catalog_xmin xid, OUT restart_lsn text, OUT dummy_standby boolean) CASCADE;
-- DROP: gs_get_parallel_decode_status()
DROP FUNCTION IF EXISTS pg_catalog.gs_get_parallel_decode_status(OUT slot_name text, OUT parallel_decode_num int4, OUT read_change_queue_length text, OUT decode_change_queue_length text) CASCADE;
-----------------------------------------------------------------------------
-- CREATE: pg_get_replication_slots
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3784;
CREATE OR REPLACE FUNCTION pg_catalog.pg_get_replication_slots(OUT slot_name text, OUT plugin text, OUT slot_type text, OUT datoid oid, OUT active boolean, OUT xmin xid, OUT catalog_xmin xid, OUT restart_lsn text, OUT dummy_standby boolean, OUT confirmed_flush text) RETURNS setof record LANGUAGE INTERNAL STABLE NOT FENCED as 'pg_get_replication_slots';
COMMENT ON FUNCTION pg_catalog.pg_get_replication_slots() is 'information about replication slots currently in use';
-- CREATE: pg_catalog.pg_replication_slots
CREATE VIEW pg_catalog.pg_replication_slots AS
SELECT
L.slot_name,
L.plugin,
L.slot_type,
L.datoid,
D.datname AS database,
L.active,
L.xmin,
L.catalog_xmin,
L.restart_lsn,
L.dummy_standby,
L.confirmed_flush
FROM pg_catalog.pg_get_replication_slots() AS L
LEFT JOIN pg_database D ON (L.datoid = D.oid);
-- CREATE: dbe_perf.replication_slots
IF ans = true THEN
CREATE OR REPLACE VIEW dbe_perf.replication_slots AS
SELECT
L.slot_name,
L.plugin,
L.slot_type,
L.datoid,
D.datname AS database,
L.active,
L.xmin,
L.catalog_xmin,
L.restart_lsn,
L.dummy_standby
FROM pg_get_replication_slots() AS L
LEFT JOIN pg_database D ON (L.datoid = D.oid);
END IF;
-- CREATE: gs_get_parallel_decode_status
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9377;
CREATE OR REPLACE FUNCTION pg_catalog.gs_get_parallel_decode_status(OUT slot_name text, OUT parallel_decode_num int4, OUT read_change_queue_length text, OUT decode_change_queue_length text, OUT reader_lsn text, OUT working_txn_cnt int8, OUT working_txn_memory int8)
RETURNS SETOF RECORD
LANGUAGE internal
AS $function$gs_get_parallel_decode_status$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
-----------------------------------------------------------------------------
-- privileges
SELECT SESSION_USER INTO user_name;
-- dbe_perf
IF ans = true THEN
grant_query := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON dbe_perf.replication_slots TO ' || quote_ident(user_name) || ';';
EXECUTE IMMEDIATE grant_query;
GRANT SELECT ON dbe_perf.replication_slots TO PUBLIC;
END IF;
-- pg_catalog
GRANT SELECT ON pg_catalog.pg_replication_slots TO PUBLIC;
-----------------------------------------------------------------------------
END$DO$;
DROP VIEW IF EXISTS pg_catalog.gs_session_cpu_statistics cascade;
CREATE VIEW pg_catalog.gs_session_cpu_statistics AS
SELECT
S.datid AS datid,
S.usename,
S.pid,
S.query_start AS start_time,
T.min_cpu_time,
T.max_cpu_time,
T.total_cpu_time,
S.query,
S.node_group,
T.top_cpu_dn
FROM pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T
WHERE S.sessionid = T.threadid;
GRANT SELECT ON TABLE pg_catalog.gs_session_cpu_statistics TO PUBLIC;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 9050, 9051, 0, 0;
CREATE TABLE IF NOT EXISTS pg_catalog.gs_sql_patch (
patch_name name NOT NULL,
unique_sql_id bigint NOT NULL,
owner Oid NOT NULL,
enable boolean NOT NULL,
status "char" NOT NULL,
abort boolean NOT NULL,
hint_string text,
hint_node pg_node_tree,
original_query text,
original_query_tree pg_node_tree,
patched_query text,
patched_query_tree pg_node_tree,
description text
);
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 9053;
CREATE UNIQUE INDEX pg_catalog.gs_sql_patch_patch_name_index ON pg_catalog.gs_sql_patch USING btree (patch_name);
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 9054;
CREATE INDEX pg_catalog.gs_sql_patch_unique_sql_id_index ON pg_catalog.gs_sql_patch USING btree (unique_sql_id);
GRANT SELECT ON TABLE pg_catalog.gs_sql_patch TO PUBLIC;
DROP SCHEMA IF EXISTS dbe_sql_util cascade;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_NAMESPACE, 9049;
CREATE SCHEMA dbe_sql_util;
GRANT USAGE ON SCHEMA dbe_sql_util TO PUBLIC;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9060;
CREATE OR REPLACE FUNCTION dbe_sql_util.create_hint_sql_patch(name, bigint, text, text DEFAULT NULL::text, boolean DEFAULT true)
RETURNS boolean
LANGUAGE internal
NOT FENCED NOT SHIPPABLE
AS $function$create_sql_patch_by_id_hint$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9064;
CREATE OR REPLACE FUNCTION dbe_sql_util.create_abort_sql_patch(name, bigint, text DEFAULT NULL::text, boolean DEFAULT true)
RETURNS boolean
LANGUAGE internal
NOT FENCED NOT SHIPPABLE
AS $function$create_abort_patch_by_id$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9061;
CREATE OR REPLACE FUNCTION dbe_sql_util.enable_sql_patch(name)
RETURNS boolean
LANGUAGE internal
NOT FENCED NOT SHIPPABLE
AS $function$enable_sql_patch$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9062;
CREATE OR REPLACE FUNCTION dbe_sql_util.disable_sql_patch(name)
RETURNS boolean
LANGUAGE internal
NOT FENCED NOT SHIPPABLE
AS $function$disable_sql_patch$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9063;
CREATE OR REPLACE FUNCTION dbe_sql_util.drop_sql_patch(name)
RETURNS boolean
LANGUAGE internal
NOT FENCED NOT SHIPPABLE
AS $function$drop_sql_patch$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9065;
CREATE OR REPLACE FUNCTION dbe_sql_util.show_sql_patch(patch_name name, OUT unique_sql_id bigint, OUT enable boolean, OUT abort boolean, OUT hint_str text)
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE ROWS 1
AS $function$show_sql_patch$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;CREATE OR REPLACE FUNCTION pg_catalog.gs_session_memory_detail_tp(OUT sessid TEXT, OUT sesstype TEXT, OUT contextname TEXT, OUT level INT2, OUT parent TEXT, OUT totalsize INT8, OUT freesize INT8, OUT usedsize INT8)
RETURNS setof record
AS $$
DECLARE
enable_threadpool bool;
row_data record;
query_str text;
BEGIN
show enable_thread_pool into enable_threadpool;
IF enable_threadpool THEN
query_str := 'with SM AS
(SELECT
S.sessid AS sessid,
T.thrdtype AS sesstype,
S.contextname AS contextname,
S.level AS level,
S.parent AS parent,
S.totalsize AS totalsize,
S.freesize AS freesize,
S.usedsize AS usedsize
FROM
gs_session_memory_context S
LEFT JOIN
(SELECT DISTINCT thrdtype, tid
FROM gs_thread_memory_context) T
on S.threadid = T.tid
),
TM AS
(SELECT
S.sessid AS Ssessid,
T.thrdtype AS sesstype,
T.threadid AS Tsessid,
T.contextname AS contextname,
T.level AS level,
T.parent AS parent,
T.totalsize AS totalsize,
T.freesize AS freesize,
T.usedsize AS usedsize
FROM
gs_thread_memory_context T
LEFT JOIN
(SELECT DISTINCT sessid, threadid
FROM gs_session_memory_context) S
ON T.tid = S.threadid
)
SELECT * from SM
UNION ALL
SELECT
Ssessid AS sessid, sesstype, contextname, level, parent, totalsize, freesize, usedsize
FROM TM WHERE Ssessid IS NOT NULL
UNION ALL
SELECT
Tsessid AS sessid, sesstype, contextname, level, parent, totalsize, freesize, usedsize
FROM TM WHERE Ssessid IS NULL;';
FOR row_data IN EXECUTE(query_str) LOOP
sessid = row_data.sessid;
sesstype = row_data.sesstype;
contextname = row_data.contextname;
level = row_data.level;
parent = row_data.parent;
totalsize = row_data.totalsize;
freesize = row_data.freesize;
usedsize = row_data.usedsize;
return next;
END LOOP;
ELSE
query_str := 'SELECT
T.threadid AS sessid,
T.thrdtype AS sesstype,
T.contextname AS contextname,
T.level AS level,
T.parent AS parent,
T.totalsize AS totalsize,
T.freesize AS freesize,
T.usedsize AS usedsize
FROM pg_catalog.pv_thread_memory_detail() T;';
FOR row_data IN EXECUTE(query_str) LOOP
sessid = row_data.sessid;
sesstype = row_data.sesstype;
contextname = row_data.contextname;
level = row_data.level;
parent = row_data.parent;
totalsize = row_data.totalsize;
freesize = row_data.freesize;
usedsize = row_data.usedsize;
return next;
END LOOP;
END IF;
RETURN;
END; $$
LANGUAGE plpgsql NOT FENCED;DROP FUNCTION IF EXISTS pg_catalog.gs_get_history_memory_detail() cascade;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5257;
CREATE OR REPLACE FUNCTION pg_catalog.gs_get_history_memory_detail(
cstring,
OUT memory_info text) RETURNS SETOF TEXT LANGUAGE INTERNAL as 'gs_get_history_memory_detail';DROP FUNCTION IF EXISTS gs_is_dw_io_blocked() CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4772;
CREATE OR REPLACE FUNCTION pg_catalog.gs_is_dw_io_blocked(OUT result boolean)
RETURNS SETOF boolean
LANGUAGE internal
STABLE STRICT NOT FENCED NOT SHIPPABLE
AS $function$gs_is_dw_io_blocked$function$;
DROP FUNCTION IF EXISTS gs_block_dw_io(integer, text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4773;
CREATE OR REPLACE FUNCTION pg_catalog.gs_block_dw_io(timeout integer, identifier text, OUT result boolean)
RETURNS SETOF boolean
LANGUAGE internal
STABLE STRICT NOT FENCED NOT SHIPPABLE
AS $function$gs_block_dw_io$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;

View File

@ -1,59 +0,0 @@
/*------ add sys fuction gs_undo_meta_dump_zone ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta_dump_zone(int4, boolean, OUT zone_id oid, OUT persist_type oid, OUT insert text, OUT discard text, OUT forcediscard text, OUT lsn text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4433;
CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_meta_dump_zone(int4, boolean, OUT zone_id oid, OUT persist_type oid, OUT insert text, OUT discard text, OUT forcediscard text, OUT lsn text)
RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_meta_dump_zone';
/*------ add sys fuction gs_undo_meta_dump_spaces ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta_dump_spaces(int4, boolean, OUT zone_id oid, OUT undorecord_space_tail text, OUT undorecord_space_head text, OUT undorecord_space_lsn text, OUT undoslot_space_tail text, OUT undoslot_space_head text, OUT undoreslot_space_lsn text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4432;
CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_meta_dump_spaces(int4, boolean, OUT zone_id oid, OUT undorecord_space_tail text, OUT undorecord_space_head text, OUT undorecord_space_lsn text, OUT undoslot_space_tail text, OUT undoslot_space_head text, OUT undoreslot_space_lsn text)
RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_meta_dump_spaces';
/*------ add sys fuction gs_undo_meta_dump_slot ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta_dump_slot(int4, boolean, OUT zone_id oid, OUT allocate text, OUT recycle text, OUT frozen_xid text, OUT global_frozen_xid text, OUT recycle_xid text, OUT global_recycle_xid text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4437;
CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_meta_dump_slot(int4, boolean, OUT zone_id oid, OUT allocate text, OUT recycle text, OUT frozen_xid text, OUT global_frozen_xid text, OUT recycle_xid text, OUT global_recycle_xid text)
RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_meta_dump_slot';
/*------ add sys fuction gs_undo_translot_dump_slot ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4541;
CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid)
RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_slot';
/*------ add sys fuction gs_undo_translot_dump_xid ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4438;
CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid)
RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_xid';
/*------ add sys fuction gs_undo_dump_record ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_dump_record(bigint, OUT undoptr oid, OUT xactid oid, OUT cid text,
OUT reloid text, OUT relfilenode text, OUT utype text, OUT blkprev text, OUT blockno text, OUT uoffset text,
OUT prevurp text, OUT payloadlen text, OUT oldxactid text, OUT partitionoid text, OUT tablespace text, OUT alreadyread_bytes text, OUT prev_undorec_len text, OUT td_id text, OUT reserved text, OUT flag text, OUT flag2 text, OUT t_hoff text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4539;
CREATE FUNCTION pg_catalog.gs_undo_dump_record(bigint, OUT undoptr oid, OUT xactid oid, OUT cid text,
OUT reloid text, OUT relfilenode text, OUT utype text, OUT blkprev text, OUT blockno text, OUT uoffset text,
OUT prevurp text, OUT payloadlen text, OUT oldxactid text, OUT partitionoid text, OUT tablespace text, OUT alreadyread_bytes text, OUT prev_undorec_len text, OUT td_id text, OUT reserved text, OUT flag text, OUT flag2 text, OUT t_hoff text)
RETURNS record LANGUAGE INTERNAL as 'gs_undo_dump_record';
/*------ add sys fuction gs_undo_dump_xid ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_dump_xid(xid, OUT undoptr oid, OUT xactid oid, OUT cid text,
OUT reloid text, OUT relfilenode text, OUT utype text, OUT blkprev text, OUT blockno text, OUT uoffset text,
OUT prevurp text, OUT payloadlen text, OUT oldxactid text, OUT partitionoid text, OUT tablespace text, OUT alreadyread_bytes text, OUT prev_undorec_len text, OUT td_id text, OUT reserved text, OUT flag text, OUT flag2 text, OUT t_hoff text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4540;
CREATE FUNCTION pg_catalog.gs_undo_dump_xid(xid, OUT undoptr oid, OUT xactid oid, OUT cid text,
OUT reloid text, OUT relfilenode text, OUT utype text, OUT blkprev text, OUT blockno text, OUT uoffset text,
OUT prevurp text, OUT payloadlen text, OUT oldxactid text, OUT partitionoid text, OUT tablespace text, OUT alreadyread_bytes text, OUT prev_undorec_len text, OUT td_id text, OUT reserved text, OUT flag text, OUT flag2 text, OUT t_hoff text)
RETURNS record LANGUAGE INTERNAL as 'gs_undo_dump_xid';
/*------ add sys fuction gs_undo_dump_parsepage_mv ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_dump_parsepage_mv(text, bigint, text, boolean) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4542;
CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_dump_parsepage_mv(relpath text, blkno bigint, reltype text, rmem boolean, OUT output text)
RETURNS text
LANGUAGE internal
STABLE STRICT NOT FENCED NOT SHIPPABLE
AS $function$gs_undo_dump_parsepage_mv$function$;
comment on function PG_CATALOG.gs_undo_dump_parsepage_mv(relpath text, blkno bigint, reltype text, rmem boolean) is 'parse uheap data page and undo to output file based on given filepath';

View File

@ -15,6 +15,7 @@ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1005;
CREATE OR REPLACE FUNCTION pg_catalog.compress_statistic_info(IN input_path text, IN step smallint, OUT path text, OUT extent_count bigint, OUT dispersion_count bigint, OUT void_count bigint) RETURNS record LANGUAGE INTERNAL AS 'compress_statistic_info';
-- compress read page/file from remote
DROP FUNCTION IF EXISTS pg_catalog.gs_read_block_from_remote(int4, int4, int4, int2, int2, int4, xid, int4, xid, boolean, int4) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5843;
CREATE OR REPLACE FUNCTION pg_catalog.gs_read_block_from_remote(oid, oid, oid, smallint, smallint, integer, xid, integer, xid, boolean, integer)
RETURNS bytea
@ -36,5 +37,10 @@ CREATE OR REPLACE FUNCTION pg_catalog.gs_read_file_size_from_remote(oid, oid, oi
NOT FENCED NOT SHIPPABLE
AS 'gs_read_file_size_from_remote';
DROP FUNCTION IF EXISTS pg_catalog.pg_read_binary_file_blocks(IN inputpath text, IN startblocknum int8, IN count int8,
OUT path text,
OUT blocknum int4,
OUT len int4,
OUT data bytea);
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5846;
CREATE OR REPLACE FUNCTION pg_catalog.pg_read_binary_file_blocks(IN input text, IN blocknum bigint, IN blockcount bigint, OUT path text, OUT blocknum integer, OUT len integer, OUT algorithm integer, OUT chunk_size integer, OUT data bytea) RETURNS SETOF record AS 'pg_read_binary_file_blocks' LANGUAGE INTERNAL IMMUTABLE;

View File

@ -1,719 +0,0 @@
DROP FUNCTION IF EXISTS pg_catalog.get_client_info;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7732;
CREATE OR REPLACE FUNCTION pg_catalog.get_client_info()
RETURNS text
LANGUAGE internal
STABLE STRICT NOT FENCED SHIPPABLE
AS $function$get_client_info$function$;
comment on function PG_CATALOG.get_client_info() is 'read current client';
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
DROP FUNCTION IF EXISTS pg_catalog.get_client_info;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7732;
CREATE OR REPLACE FUNCTION pg_catalog.get_client_info(OUT sid bigint, OUT client_info text)
RETURNS SETOF record
LANGUAGE internal
STABLE NOT FENCED SHIPPABLE ROWS 100
AS $function$get_client_info$function$;
comment on function PG_CATALOG.get_client_info() is 'read current client';
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
-- pg_ls_tmpdir_noargs
DROP FUNCTION IF EXISTS pg_catalog.pg_ls_tmpdir() CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3354;
CREATE OR REPLACE FUNCTION pg_catalog.pg_ls_tmpdir()
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 10 ROWS 20
AS $function$pg_ls_tmpdir_noargs$function$;
comment on function PG_CATALOG.pg_ls_tmpdir() is 'list of temporary files in the pg_default tablespace\''s pgsql_tmp directory';
-- pg_ls_tmpdir_1arg
DROP FUNCTION IF EXISTS pg_catalog.pg_ls_tmpdir(oid) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3355;
CREATE OR REPLACE FUNCTION pg_catalog.pg_ls_tmpdir(oid)
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 10 ROWS 20
AS $function$pg_ls_tmpdir_1arg$function$;
comment on function PG_CATALOG.pg_ls_tmpdir(oid) is 'list of temporary files in the specified tablespace\''s pgsql_tmp directory';
-- pg_ls_waldir
DROP FUNCTION IF EXISTS pg_catalog.pg_ls_waldir() CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3356;
CREATE OR REPLACE FUNCTION pg_catalog.pg_ls_waldir()
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 10 ROWS 20
AS $function$pg_ls_waldir$function$;
comment on function PG_CATALOG.pg_ls_waldir() is 'list of files in the WAL directory';
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
------------------------------------------------------------------------------------------------------------------------------------
DO $DO$
DECLARE
ans boolean;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
if ans = true then
-----------------------------------------------------------------------------------------------------------------------------------------------------
DROP VIEW IF EXISTS DBE_PERF.local_active_session cascade;
DROP FUNCTION IF EXISTS pg_catalog.get_local_active_session(OUT sampleid bigint, OUT sample_time timestamp with time zone, OUT need_flush_sample boolean, OUT databaseid oid, OUT thread_id bigint, OUT sessionid bigint, OUT start_time timestamp with time zone, OUT event text, OUT lwtid integer, OUT psessionid bigint, OUT tlevel integer, OUT smpid integer, OUT userid oid, OUT application_name text, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT query_id bigint, OUT unique_query_id bigint, OUT user_id oid, OUT cn_id integer, OUT unique_query text, OUT locktag text, OUT lockmode text, OUT block_sessionid bigint, OUT wait_status text, OUT global_sessionid text) cascade;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5721;
CREATE OR REPLACE FUNCTION pg_catalog.get_local_active_session
(OUT sampleid bigint, OUT sample_time timestamp with time zone, OUT need_flush_sample boolean, OUT databaseid oid, OUT thread_id bigint, OUT sessionid bigint, OUT start_time timestamp with time zone, OUT event text, OUT lwtid integer, OUT psessionid bigint, OUT tlevel integer, OUT smpid integer, OUT userid oid, OUT application_name text, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT query_id bigint, OUT unique_query_id bigint, OUT user_id oid, OUT cn_id integer, OUT unique_query text, OUT locktag text, OUT lockmode text, OUT block_sessionid bigint, OUT wait_status text, OUT global_sessionid text, OUT xact_start_time timestamp with time zone, OUT query_start_time timestamp with time zone, OUT state text)
RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'get_local_active_session';
CREATE OR REPLACE VIEW DBE_PERF.local_active_session AS
WITH RECURSIVE
las(sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid,
tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id,
user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, wait_status, global_sessionid, xact_start_time, query_start_time, state)
AS (select t.* from get_local_active_session() as t),
tt(sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid,
tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id,
user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, wait_status, global_sessionid, xact_start_time, query_start_time, state, final_block_sessionid, level, head)
AS(SELECT las.*, las.block_sessionid AS final_block_sessionid, 1 AS level, array_append('{}', las.sessionid) AS head FROM las
UNION ALL
SELECT tt.sampleid, tt.sample_time, tt.need_flush_sample, tt.databaseid, tt.thread_id, tt.sessionid, tt.start_time, tt.event, tt.lwtid, tt.psessionid,
tt.tlevel, tt.smpid, tt.userid, tt.application_name, tt.client_addr, tt.client_hostname, tt.client_port, tt.query_id, tt.unique_query_id,
tt.user_id, tt.cn_id, tt.unique_query, tt.locktag, tt.lockmode, tt.block_sessionid, tt.wait_status, tt.global_sessionid, tt.xact_start_time, tt.query_start_time, tt.state, las.block_sessionid AS final_block_sessionid, tt.level + 1 AS level, array_append(tt.head, las.sessionid) AS head
FROM tt INNER JOIN las ON tt.final_block_sessionid = las.sessionid
WHERE las.sampleid = tt.sampleid AND (las.block_sessionid IS NOT NULL OR las.block_sessionid != 0)
AND las.sessionid != all(head) AND las.sessionid != las.block_sessionid)
SELECT sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid,
tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id,
user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, final_block_sessionid, wait_status, global_sessionid, xact_start_time, query_start_time, state FROM tt
WHERE level = (SELECT MAX(level) FROM tt t1 WHERE t1.sampleid = tt.sampleid AND t1.sessionid = tt.sessionid);
end if;
END$DO$;
DO $DO$
DECLARE
ans boolean;
user_name text;
query_str text;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
if ans = true then
SELECT SESSION_USER INTO user_name;
query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.local_active_session TO ' || quote_ident(user_name) || ';';
EXECUTE IMMEDIATE query_str;
GRANT SELECT ON TABLE DBE_PERF.local_active_session TO PUBLIC;
GRANT SELECT ON TABLE pg_catalog.gs_asp TO PUBLIC;
end if;
END$DO$;
DROP EXTENSION IF EXISTS security_plugin CASCADE;
CREATE EXTENSION IF NOT EXISTS security_plugin;
DO $DO$
DECLARE
ans boolean;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
if ans = true then
DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp() cascade;
DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp() cascade;
CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp
(in start_timestamp timestamp with time zone,
in end_timestamp timestamp with time zone,
OUT node_name name,
OUT db_name name,
OUT schema_name name,
OUT origin_node integer,
OUT user_name name,
OUT application_name text,
OUT client_addr text,
OUT client_port integer,
OUT unique_query_id bigint,
OUT debug_query_id bigint,
OUT query text,
OUT start_time timestamp with time zone,
OUT finish_time timestamp with time zone,
OUT slow_sql_threshold bigint,
OUT transaction_id bigint,
OUT thread_id bigint,
OUT session_id bigint,
OUT n_soft_parse bigint,
OUT n_hard_parse bigint,
OUT query_plan text,
OUT n_returned_rows bigint,
OUT n_tuples_fetched bigint,
OUT n_tuples_returned bigint,
OUT n_tuples_inserted bigint,
OUT n_tuples_updated bigint,
OUT n_tuples_deleted bigint,
OUT n_blocks_fetched bigint,
OUT n_blocks_hit bigint,
OUT db_time bigint,
OUT cpu_time bigint,
OUT execution_time bigint,
OUT parse_time bigint,
OUT plan_time bigint,
OUT rewrite_time bigint,
OUT pl_execution_time bigint,
OUT pl_compilation_time bigint,
OUT data_io_time bigint,
OUT net_send_info text,
OUT net_recv_info text,
OUT net_stream_send_info text,
OUT net_stream_recv_info text,
OUT lock_count bigint,
OUT lock_time bigint,
OUT lock_wait_count bigint,
OUT lock_wait_time bigint,
OUT lock_max_count bigint,
OUT lwlock_count bigint,
OUT lwlock_wait_count bigint,
OUT lwlock_time bigint,
OUT lwlock_wait_time bigint,
OUT details bytea,
OUT is_slow_sql bool,
OUT trace_id text)
RETURNS setof record
AS $$
DECLARE
row_data pg_catalog.statement_history%rowtype;
row_name record;
query_str text;
-- node name
query_str_nodes text;
BEGIN
-- Get all node names(CN + master DN)
query_str_nodes := 'select * from dbe_perf.node_name';
FOR row_name IN EXECUTE(query_str_nodes) LOOP
query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || '''';
FOR row_data IN EXECUTE(query_str) LOOP
node_name := row_name.node_name;
db_name := row_data.db_name;
schema_name := row_data.schema_name;
origin_node := row_data.origin_node;
user_name := row_data.user_name;
application_name := row_data.application_name;
client_addr := row_data.client_addr;
client_port := row_data.client_port;
unique_query_id := row_data.unique_query_id;
debug_query_id := row_data.debug_query_id;
query := row_data.query;
start_time := row_data.start_time;
finish_time := row_data.finish_time;
slow_sql_threshold := row_data.slow_sql_threshold;
transaction_id := row_data.transaction_id;
thread_id := row_data.thread_id;
session_id := row_data.session_id;
n_soft_parse := row_data.n_soft_parse;
n_hard_parse := row_data.n_hard_parse;
query_plan := row_data.query_plan;
n_returned_rows := row_data.n_returned_rows;
n_tuples_fetched := row_data.n_tuples_fetched;
n_tuples_returned := row_data.n_tuples_returned;
n_tuples_inserted := row_data.n_tuples_inserted;
n_tuples_updated := row_data.n_tuples_updated;
n_tuples_deleted := row_data.n_tuples_deleted;
n_blocks_fetched := row_data.n_blocks_fetched;
n_blocks_hit := row_data.n_blocks_hit;
db_time := row_data.db_time;
cpu_time := row_data.cpu_time;
execution_time := row_data.execution_time;
parse_time := row_data.parse_time;
plan_time := row_data.plan_time;
rewrite_time := row_data.rewrite_time;
pl_execution_time := row_data.pl_execution_time;
pl_compilation_time := row_data.pl_compilation_time;
data_io_time := row_data.data_io_time;
net_send_info := row_data.net_send_info;
net_recv_info := row_data.net_recv_info;
net_stream_send_info := row_data.net_stream_send_info;
net_stream_recv_info := row_data.net_stream_recv_info;
lock_count := row_data.lock_count;
lock_time := row_data.lock_time;
lock_wait_count := row_data.lock_wait_count;
lock_wait_time := row_data.lock_wait_time;
lock_max_count := row_data.lock_max_count;
lwlock_count := row_data.lwlock_count;
lwlock_wait_count := row_data.lwlock_wait_count;
lwlock_time := row_data.lwlock_time;
lwlock_wait_time := row_data.lwlock_wait_time;
details := row_data.details;
is_slow_sql := row_data.is_slow_sql;
trace_id := row_data.trace_id;
return next;
END LOOP;
END LOOP;
return;
END; $$
LANGUAGE 'plpgsql' NOT FENCED;
CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp
(in start_timestamp timestamp with time zone,
in end_timestamp timestamp with time zone,
OUT node_name name,
OUT db_name name,
OUT schema_name name,
OUT origin_node integer,
OUT user_name name,
OUT application_name text,
OUT client_addr text,
OUT client_port integer,
OUT unique_query_id bigint,
OUT debug_query_id bigint,
OUT query text,
OUT start_time timestamp with time zone,
OUT finish_time timestamp with time zone,
OUT slow_sql_threshold bigint,
OUT transaction_id bigint,
OUT thread_id bigint,
OUT session_id bigint,
OUT n_soft_parse bigint,
OUT n_hard_parse bigint,
OUT query_plan text,
OUT n_returned_rows bigint,
OUT n_tuples_fetched bigint,
OUT n_tuples_returned bigint,
OUT n_tuples_inserted bigint,
OUT n_tuples_updated bigint,
OUT n_tuples_deleted bigint,
OUT n_blocks_fetched bigint,
OUT n_blocks_hit bigint,
OUT db_time bigint,
OUT cpu_time bigint,
OUT execution_time bigint,
OUT parse_time bigint,
OUT plan_time bigint,
OUT rewrite_time bigint,
OUT pl_execution_time bigint,
OUT pl_compilation_time bigint,
OUT data_io_time bigint,
OUT net_send_info text,
OUT net_recv_info text,
OUT net_stream_send_info text,
OUT net_stream_recv_info text,
OUT lock_count bigint,
OUT lock_time bigint,
OUT lock_wait_count bigint,
OUT lock_wait_time bigint,
OUT lock_max_count bigint,
OUT lwlock_count bigint,
OUT lwlock_wait_count bigint,
OUT lwlock_time bigint,
OUT lwlock_wait_time bigint,
OUT details bytea,
OUT is_slow_sql bool,
OUT trace_id text)
RETURNS setof record
AS $$
DECLARE
row_data pg_catalog.statement_history%rowtype;
row_name record;
query_str text;
-- node name
query_str_nodes text;
BEGIN
-- Get all node names(CN + master DN)
query_str_nodes := 'select * from dbe_perf.node_name';
FOR row_name IN EXECUTE(query_str_nodes) LOOP
query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true ';
FOR row_data IN EXECUTE(query_str) LOOP
node_name := row_name.node_name;
db_name := row_data.db_name;
schema_name := row_data.schema_name;
origin_node := row_data.origin_node;
user_name := row_data.user_name;
application_name := row_data.application_name;
client_addr := row_data.client_addr;
client_port := row_data.client_port;
unique_query_id := row_data.unique_query_id;
debug_query_id := row_data.debug_query_id;
query := row_data.query;
start_time := row_data.start_time;
finish_time := row_data.finish_time;
slow_sql_threshold := row_data.slow_sql_threshold;
transaction_id := row_data.transaction_id;
thread_id := row_data.thread_id;
session_id := row_data.session_id;
n_soft_parse := row_data.n_soft_parse;
n_hard_parse := row_data.n_hard_parse;
query_plan := row_data.query_plan;
n_returned_rows := row_data.n_returned_rows;
n_tuples_fetched := row_data.n_tuples_fetched;
n_tuples_returned := row_data.n_tuples_returned;
n_tuples_inserted := row_data.n_tuples_inserted;
n_tuples_updated := row_data.n_tuples_updated;
n_tuples_deleted := row_data.n_tuples_deleted;
n_blocks_fetched := row_data.n_blocks_fetched;
n_blocks_hit := row_data.n_blocks_hit;
db_time := row_data.db_time;
cpu_time := row_data.cpu_time;
execution_time := row_data.execution_time;
parse_time := row_data.parse_time;
plan_time := row_data.plan_time;
rewrite_time := row_data.rewrite_time;
pl_execution_time := row_data.pl_execution_time;
pl_compilation_time := row_data.pl_compilation_time;
data_io_time := row_data.data_io_time;
net_send_info := row_data.net_send_info;
net_recv_info := row_data.net_recv_info;
net_stream_send_info := row_data.net_stream_send_info;
net_stream_recv_info := row_data.net_stream_recv_info;
lock_count := row_data.lock_count;
lock_time := row_data.lock_time;
lock_wait_count := row_data.lock_wait_count;
lock_wait_time := row_data.lock_wait_time;
lock_max_count := row_data.lock_max_count;
lwlock_count := row_data.lwlock_count;
lwlock_wait_count := row_data.lwlock_wait_count;
lwlock_time := row_data.lwlock_time;
lwlock_wait_time := row_data.lwlock_wait_time;
details := row_data.details;
is_slow_sql := row_data.is_slow_sql;
trace_id := row_data.trace_id;
return next;
END LOOP;
END LOOP;
return;
END; $$
LANGUAGE 'plpgsql' NOT FENCED;
end if;
END$DO$;
DO $$
DECLARE
query_str text;
ans bool;
BEGIN
select case when count(*)=1 then true else false end as ans from (select *from pg_class where relname='snapshot_sequence') into ans;
if ans = false then
query_str := 'CREATE SEQUENCE db4ai.snapshot_sequence;';
EXECUTE IMMEDIATE query_str;
end if;
update pg_class set relacl = null where relname = 'snapshot_sequence' and relnamespace = 4991;
query_str := 'GRANT UPDATE ON db4ai.snapshot_sequence TO PUBLIC;';
EXECUTE IMMEDIATE query_str;
END$$;-- gs_stack_int8
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(INT8) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9997;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack(INT8)
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 1 ROWS 5
AS $function$gs_stack_int8$function$;
-- gs_stack_noargs
DROP FUNCTION IF EXISTS pg_catalog.gs_stack() CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9998;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack()
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 1 ROWS 5
AS $function$gs_stack_noargs$function$;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(pid bigint) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(OUT tid bigint, OUT lwtid bigint, OUT stack text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9997;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack(pid bigint)
RETURNS SETOF text
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 1 ROWS 5
AS $function$gs_stack$function$;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9998;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack(OUT tid bigint, OUT lwtid bigint, OUT stack text)
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE COST 1 ROWS 5
AS $function$gs_stack$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
------------------------------------------------------------------------------------------------------------------------------------
DO $DO$
DECLARE
ans boolean;
user_name text;
grant_query text;
BEGIN
select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
-----------------------------------------------------------------------------
-- DROP: pg_catalog.pg_replication_slots
DROP VIEW IF EXISTS pg_catalog.pg_replication_slots CASCADE;
-- DROP: pg_get_replication_slot()
DROP FUNCTION IF EXISTS pg_catalog.pg_get_replication_slots(OUT slot_name text, OUT plugin text, OUT slot_type text, OUT datoid oid, OUT active boolean, OUT xmin xid, OUT catalog_xmin xid, OUT restart_lsn text, OUT dummy_standby boolean) CASCADE;
-- DROP: gs_get_parallel_decode_status()
DROP FUNCTION IF EXISTS pg_catalog.gs_get_parallel_decode_status(OUT slot_name text, OUT parallel_decode_num int4, OUT read_change_queue_length text, OUT decode_change_queue_length text) CASCADE;
-----------------------------------------------------------------------------
-- CREATE: pg_get_replication_slots
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3784;
CREATE OR REPLACE FUNCTION pg_catalog.pg_get_replication_slots(OUT slot_name text, OUT plugin text, OUT slot_type text, OUT datoid oid, OUT active boolean, OUT xmin xid, OUT catalog_xmin xid, OUT restart_lsn text, OUT dummy_standby boolean, OUT confirmed_flush text) RETURNS setof record LANGUAGE INTERNAL STABLE NOT FENCED as 'pg_get_replication_slots';
COMMENT ON FUNCTION pg_catalog.pg_get_replication_slots() is 'information about replication slots currently in use';
-- CREATE: pg_catalog.pg_replication_slots
CREATE VIEW pg_catalog.pg_replication_slots AS
SELECT
L.slot_name,
L.plugin,
L.slot_type,
L.datoid,
D.datname AS database,
L.active,
L.xmin,
L.catalog_xmin,
L.restart_lsn,
L.dummy_standby,
L.confirmed_flush
FROM pg_catalog.pg_get_replication_slots() AS L
LEFT JOIN pg_database D ON (L.datoid = D.oid);
-- CREATE: dbe_perf.replication_slots
IF ans = true THEN
CREATE OR REPLACE VIEW dbe_perf.replication_slots AS
SELECT
L.slot_name,
L.plugin,
L.slot_type,
L.datoid,
D.datname AS database,
L.active,
L.xmin,
L.catalog_xmin,
L.restart_lsn,
L.dummy_standby
FROM pg_get_replication_slots() AS L
LEFT JOIN pg_database D ON (L.datoid = D.oid);
END IF;
-- CREATE: gs_get_parallel_decode_status
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9377;
CREATE OR REPLACE FUNCTION pg_catalog.gs_get_parallel_decode_status(OUT slot_name text, OUT parallel_decode_num int4, OUT read_change_queue_length text, OUT decode_change_queue_length text, OUT reader_lsn text, OUT working_txn_cnt int8, OUT working_txn_memory int8)
RETURNS SETOF RECORD
LANGUAGE internal
AS $function$gs_get_parallel_decode_status$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;
-----------------------------------------------------------------------------
-- privileges
SELECT SESSION_USER INTO user_name;
-- dbe_perf
IF ans = true THEN
grant_query := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON dbe_perf.replication_slots TO ' || quote_ident(user_name) || ';';
EXECUTE IMMEDIATE grant_query;
GRANT SELECT ON dbe_perf.replication_slots TO PUBLIC;
END IF;
-- pg_catalog
GRANT SELECT ON pg_catalog.pg_replication_slots TO PUBLIC;
-----------------------------------------------------------------------------
END$DO$;
DROP VIEW IF EXISTS pg_catalog.gs_session_cpu_statistics cascade;
CREATE VIEW pg_catalog.gs_session_cpu_statistics AS
SELECT
S.datid AS datid,
S.usename,
S.pid,
S.query_start AS start_time,
T.min_cpu_time,
T.max_cpu_time,
T.total_cpu_time,
S.query,
S.node_group,
T.top_cpu_dn
FROM pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T
WHERE S.sessionid = T.threadid;
GRANT SELECT ON TABLE pg_catalog.gs_session_cpu_statistics TO PUBLIC;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 9050, 9051, 0, 0;
CREATE TABLE IF NOT EXISTS pg_catalog.gs_sql_patch (
patch_name name NOT NULL,
unique_sql_id bigint NOT NULL,
owner Oid NOT NULL,
enable boolean NOT NULL,
status "char" NOT NULL,
abort boolean NOT NULL,
hint_string text,
hint_node pg_node_tree,
original_query text,
original_query_tree pg_node_tree,
patched_query text,
patched_query_tree pg_node_tree,
description text
);
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 9053;
CREATE UNIQUE INDEX pg_catalog.gs_sql_patch_patch_name_index ON pg_catalog.gs_sql_patch USING btree (patch_name);
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 9054;
CREATE INDEX pg_catalog.gs_sql_patch_unique_sql_id_index ON pg_catalog.gs_sql_patch USING btree (unique_sql_id);
GRANT SELECT ON TABLE pg_catalog.gs_sql_patch TO PUBLIC;
DROP SCHEMA IF EXISTS dbe_sql_util cascade;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_NAMESPACE, 9049;
CREATE SCHEMA dbe_sql_util;
GRANT USAGE ON SCHEMA dbe_sql_util TO PUBLIC;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9060;
CREATE OR REPLACE FUNCTION dbe_sql_util.create_hint_sql_patch(name, bigint, text, text DEFAULT NULL::text, boolean DEFAULT true)
RETURNS boolean
LANGUAGE internal
NOT FENCED NOT SHIPPABLE
AS $function$create_sql_patch_by_id_hint$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9064;
CREATE OR REPLACE FUNCTION dbe_sql_util.create_abort_sql_patch(name, bigint, text DEFAULT NULL::text, boolean DEFAULT true)
RETURNS boolean
LANGUAGE internal
NOT FENCED NOT SHIPPABLE
AS $function$create_abort_patch_by_id$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9061;
CREATE OR REPLACE FUNCTION dbe_sql_util.enable_sql_patch(name)
RETURNS boolean
LANGUAGE internal
NOT FENCED NOT SHIPPABLE
AS $function$enable_sql_patch$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9062;
CREATE OR REPLACE FUNCTION dbe_sql_util.disable_sql_patch(name)
RETURNS boolean
LANGUAGE internal
NOT FENCED NOT SHIPPABLE
AS $function$disable_sql_patch$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9063;
CREATE OR REPLACE FUNCTION dbe_sql_util.drop_sql_patch(name)
RETURNS boolean
LANGUAGE internal
NOT FENCED NOT SHIPPABLE
AS $function$drop_sql_patch$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9065;
CREATE OR REPLACE FUNCTION dbe_sql_util.show_sql_patch(patch_name name, OUT unique_sql_id bigint, OUT enable boolean, OUT abort boolean, OUT hint_str text)
RETURNS SETOF record
LANGUAGE internal
STRICT NOT FENCED NOT SHIPPABLE ROWS 1
AS $function$show_sql_patch$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;CREATE OR REPLACE FUNCTION pg_catalog.gs_session_memory_detail_tp(OUT sessid TEXT, OUT sesstype TEXT, OUT contextname TEXT, OUT level INT2, OUT parent TEXT, OUT totalsize INT8, OUT freesize INT8, OUT usedsize INT8)
RETURNS setof record
AS $$
DECLARE
enable_threadpool bool;
row_data record;
query_str text;
BEGIN
show enable_thread_pool into enable_threadpool;
IF enable_threadpool THEN
query_str := 'with SM AS
(SELECT
S.sessid AS sessid,
T.thrdtype AS sesstype,
S.contextname AS contextname,
S.level AS level,
S.parent AS parent,
S.totalsize AS totalsize,
S.freesize AS freesize,
S.usedsize AS usedsize
FROM
gs_session_memory_context S
LEFT JOIN
(SELECT DISTINCT thrdtype, tid
FROM gs_thread_memory_context) T
on S.threadid = T.tid
),
TM AS
(SELECT
S.sessid AS Ssessid,
T.thrdtype AS sesstype,
T.threadid AS Tsessid,
T.contextname AS contextname,
T.level AS level,
T.parent AS parent,
T.totalsize AS totalsize,
T.freesize AS freesize,
T.usedsize AS usedsize
FROM
gs_thread_memory_context T
LEFT JOIN
(SELECT DISTINCT sessid, threadid
FROM gs_session_memory_context) S
ON T.tid = S.threadid
)
SELECT * from SM
UNION ALL
SELECT
Ssessid AS sessid, sesstype, contextname, level, parent, totalsize, freesize, usedsize
FROM TM WHERE Ssessid IS NOT NULL
UNION ALL
SELECT
Tsessid AS sessid, sesstype, contextname, level, parent, totalsize, freesize, usedsize
FROM TM WHERE Ssessid IS NULL;';
FOR row_data IN EXECUTE(query_str) LOOP
sessid = row_data.sessid;
sesstype = row_data.sesstype;
contextname = row_data.contextname;
level = row_data.level;
parent = row_data.parent;
totalsize = row_data.totalsize;
freesize = row_data.freesize;
usedsize = row_data.usedsize;
return next;
END LOOP;
ELSE
query_str := 'SELECT
T.threadid AS sessid,
T.thrdtype AS sesstype,
T.contextname AS contextname,
T.level AS level,
T.parent AS parent,
T.totalsize AS totalsize,
T.freesize AS freesize,
T.usedsize AS usedsize
FROM pg_catalog.pv_thread_memory_detail() T;';
FOR row_data IN EXECUTE(query_str) LOOP
sessid = row_data.sessid;
sesstype = row_data.sesstype;
contextname = row_data.contextname;
level = row_data.level;
parent = row_data.parent;
totalsize = row_data.totalsize;
freesize = row_data.freesize;
usedsize = row_data.usedsize;
return next;
END LOOP;
END IF;
RETURN;
END; $$
LANGUAGE plpgsql NOT FENCED;DROP FUNCTION IF EXISTS pg_catalog.gs_get_history_memory_detail() cascade;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5257;
CREATE OR REPLACE FUNCTION pg_catalog.gs_get_history_memory_detail(
cstring,
OUT memory_info text) RETURNS SETOF TEXT LANGUAGE INTERNAL as 'gs_get_history_memory_detail';DROP FUNCTION IF EXISTS gs_is_dw_io_blocked() CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4772;
CREATE OR REPLACE FUNCTION pg_catalog.gs_is_dw_io_blocked(OUT result boolean)
RETURNS SETOF boolean
LANGUAGE internal
STABLE STRICT NOT FENCED NOT SHIPPABLE
AS $function$gs_is_dw_io_blocked$function$;
DROP FUNCTION IF EXISTS gs_block_dw_io(integer, text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4773;
CREATE OR REPLACE FUNCTION pg_catalog.gs_block_dw_io(timeout integer, identifier text, OUT result boolean)
RETURNS SETOF boolean
LANGUAGE internal
STABLE STRICT NOT FENCED NOT SHIPPABLE
AS $function$gs_block_dw_io$function$;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;

View File

@ -1,59 +0,0 @@
/*------ add sys fuction gs_undo_meta_dump_zone ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta_dump_zone(int4, boolean, OUT zone_id oid, OUT persist_type oid, OUT insert text, OUT discard text, OUT forcediscard text, OUT lsn text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4433;
CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_meta_dump_zone(int4, boolean, OUT zone_id oid, OUT persist_type oid, OUT insert text, OUT discard text, OUT forcediscard text, OUT lsn text)
RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_meta_dump_zone';
/*------ add sys fuction gs_undo_meta_dump_spaces ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta_dump_spaces(int4, boolean, OUT zone_id oid, OUT undorecord_space_tail text, OUT undorecord_space_head text, OUT undorecord_space_lsn text, OUT undoslot_space_tail text, OUT undoslot_space_head text, OUT undoreslot_space_lsn text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4432;
CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_meta_dump_spaces(int4, boolean, OUT zone_id oid, OUT undorecord_space_tail text, OUT undorecord_space_head text, OUT undorecord_space_lsn text, OUT undoslot_space_tail text, OUT undoslot_space_head text, OUT undoreslot_space_lsn text)
RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_meta_dump_spaces';
/*------ add sys fuction gs_undo_meta_dump_slot ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta_dump_slot(int4, boolean, OUT zone_id oid, OUT allocate text, OUT recycle text, OUT frozen_xid text, OUT global_frozen_xid text, OUT recycle_xid text, OUT global_recycle_xid text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4437;
CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_meta_dump_slot(int4, boolean, OUT zone_id oid, OUT allocate text, OUT recycle text, OUT frozen_xid text, OUT global_frozen_xid text, OUT recycle_xid text, OUT global_recycle_xid text)
RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_meta_dump_slot';
/*------ add sys fuction gs_undo_translot_dump_slot ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4541;
CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid)
RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_slot';
/*------ add sys fuction gs_undo_translot_dump_xid ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4438;
CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid)
RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_xid';
/*------ add sys fuction gs_undo_dump_record ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_dump_record(bigint, OUT undoptr oid, OUT xactid oid, OUT cid text,
OUT reloid text, OUT relfilenode text, OUT utype text, OUT blkprev text, OUT blockno text, OUT uoffset text,
OUT prevurp text, OUT payloadlen text, OUT oldxactid text, OUT partitionoid text, OUT tablespace text, OUT alreadyread_bytes text, OUT prev_undorec_len text, OUT td_id text, OUT reserved text, OUT flag text, OUT flag2 text, OUT t_hoff text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4539;
CREATE FUNCTION pg_catalog.gs_undo_dump_record(bigint, OUT undoptr oid, OUT xactid oid, OUT cid text,
OUT reloid text, OUT relfilenode text, OUT utype text, OUT blkprev text, OUT blockno text, OUT uoffset text,
OUT prevurp text, OUT payloadlen text, OUT oldxactid text, OUT partitionoid text, OUT tablespace text, OUT alreadyread_bytes text, OUT prev_undorec_len text, OUT td_id text, OUT reserved text, OUT flag text, OUT flag2 text, OUT t_hoff text)
RETURNS record LANGUAGE INTERNAL as 'gs_undo_dump_record';
/*------ add sys fuction gs_undo_dump_xid ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_dump_xid(xid, OUT undoptr oid, OUT xactid oid, OUT cid text,
OUT reloid text, OUT relfilenode text, OUT utype text, OUT blkprev text, OUT blockno text, OUT uoffset text,
OUT prevurp text, OUT payloadlen text, OUT oldxactid text, OUT partitionoid text, OUT tablespace text, OUT alreadyread_bytes text, OUT prev_undorec_len text, OUT td_id text, OUT reserved text, OUT flag text, OUT flag2 text, OUT t_hoff text) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4540;
CREATE FUNCTION pg_catalog.gs_undo_dump_xid(xid, OUT undoptr oid, OUT xactid oid, OUT cid text,
OUT reloid text, OUT relfilenode text, OUT utype text, OUT blkprev text, OUT blockno text, OUT uoffset text,
OUT prevurp text, OUT payloadlen text, OUT oldxactid text, OUT partitionoid text, OUT tablespace text, OUT alreadyread_bytes text, OUT prev_undorec_len text, OUT td_id text, OUT reserved text, OUT flag text, OUT flag2 text, OUT t_hoff text)
RETURNS record LANGUAGE INTERNAL as 'gs_undo_dump_xid';
/*------ add sys fuction gs_undo_dump_parsepage_mv ------*/
DROP FUNCTION IF EXISTS pg_catalog.gs_undo_dump_parsepage_mv(text, bigint, text, boolean) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4542;
CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_dump_parsepage_mv(relpath text, blkno bigint, reltype text, rmem boolean, OUT output text)
RETURNS text
LANGUAGE internal
STABLE STRICT NOT FENCED NOT SHIPPABLE
AS $function$gs_undo_dump_parsepage_mv$function$;
comment on function PG_CATALOG.gs_undo_dump_parsepage_mv(relpath text, blkno bigint, reltype text, rmem boolean) is 'parse uheap data page and undo to output file based on given filepath';

View File

@ -15,6 +15,7 @@ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1005;
CREATE OR REPLACE FUNCTION pg_catalog.compress_statistic_info(IN input_path text, IN step smallint, OUT path text, OUT extent_count bigint, OUT dispersion_count bigint, OUT void_count bigint) RETURNS record LANGUAGE INTERNAL AS 'compress_statistic_info';
-- compress read page/file from remote
DROP FUNCTION IF EXISTS pg_catalog.gs_read_block_from_remote(int4, int4, int4, int2, int2, int4, xid, int4, xid, boolean, int4) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5843;
CREATE OR REPLACE FUNCTION pg_catalog.gs_read_block_from_remote(oid, oid, oid, smallint, smallint, integer, xid, integer, xid, boolean, integer)
RETURNS bytea
@ -36,5 +37,10 @@ CREATE OR REPLACE FUNCTION pg_catalog.gs_read_file_size_from_remote(oid, oid, oi
NOT FENCED NOT SHIPPABLE
AS 'gs_read_file_size_from_remote';
DROP FUNCTION IF EXISTS pg_catalog.pg_read_binary_file_blocks(IN inputpath text, IN startblocknum int8, IN count int8,
OUT path text,
OUT blocknum int4,
OUT len int4,
OUT data bytea);
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5846;
CREATE OR REPLACE FUNCTION pg_catalog.pg_read_binary_file_blocks(IN input text, IN blocknum bigint, IN blockcount bigint, OUT path text, OUT blocknum integer, OUT len integer, OUT algorithm integer, OUT chunk_size integer, OUT data bytea) RETURNS SETOF record AS 'pg_read_binary_file_blocks' LANGUAGE INTERNAL IMMUTABLE;

View File

@ -1,9 +1,7 @@
create schema hll_cstore;
set current_schema = hll_cstore;
create table t_id(id int);
insert into t_id values(generate_series(1,5000));
--------------CONTENTS--------------------
-- hyperloglog test cases
------------------------------------------
@ -14,7 +12,6 @@ insert into t_id values(generate_series(1,5000));
--4. check cstore case--axx senario
--5. check cstore case--axx extended
------------------------------------------
------------------------------------------
-- 1. create table
------------------------------------------
@ -29,7 +26,6 @@ select a, #b from t_hll order by 1;
(2 rows)
drop table t_hll;
-- row store
create table t_hll(a int, b hll);
insert into t_hll select mod(id, 2) a, hll_add_agg(hll_hash_integer(id)) from t_id group by a;
@ -41,7 +37,6 @@ select a, #b from t_hll order by 1;
(2 rows)
drop table t_hll;
------------------------------------------
-- 2. check cstore case--hello world
------------------------------------------
@ -50,16 +45,12 @@ create table helloworld (
id integer,
set hll
) with (orientation = column);
--- insert an empty hll
insert into helloworld(id, set) values (1, hll_empty());
--- add a hashed integer to the hll
update helloworld set set = hll_add(set, hll_hash_integer(12345)) where id = 1;
--- or add a hashed string to the hll
update helloworld set set = hll_add(set, hll_hash_text('hello world')) where id = 1;
--- get the cardinality of the hll
select hll_cardinality(set) from helloworld where id = 1;
hll_cardinality
@ -67,13 +58,10 @@ select hll_cardinality(set) from helloworld where id = 1;
2
(1 row)
drop table helloworld;
------------------------------------------
-- 3. check cstore case--website traffic static senario
------------------------------------------
-- generate data
create table traffic(weekday int ,id int) with (orientation = column);
insert into traffic select 1, id%1000 from t_id;
@ -83,11 +71,9 @@ insert into traffic select 4, id%4000 from t_id;
insert into traffic select 5, id%5000 from t_id;
insert into traffic select 6, id%6000 from t_id;
insert into traffic select 7, id%7000 from t_id;
-- table to store hll statistics
create table report(weekday int, users hll) with (orientation = column);
insert into report select weekday, hll_add_agg(hll_hash_integer(id)) from traffic group by weekday;
-- 1->1000 2->2000 3->3000 4->4000 5->5000 6->5000 7->5000
select weekday, #hll_add_agg(hll_hash_integer(id)) as unique_users from traffic group by weekday order by weekday;
weekday | unique_users
@ -101,7 +87,6 @@ select weekday, #hll_add_agg(hll_hash_integer(id)) as unique_users from traffic
7 | 5034.89012432268
(7 rows)
-- should be around 5000
select #hll_union_agg(users) from report;
?column?
@ -109,10 +94,8 @@ select #hll_union_agg(users) from report;
5036.22316829936
(1 row)
drop table traffic;
drop table report;
------------------------------------------
-- 4. check cstore case--data warehouse use case
------------------------------------------
@ -121,7 +104,6 @@ create table facts (
date date,
user_id integer
) with (orientation = column);
-- generate date
insert into facts values ('2019-02-20', generate_series(1,100));
insert into facts values ('2019-02-21', generate_series(1,200));
@ -131,19 +113,16 @@ insert into facts values ('2019-02-24', generate_series(1,500));
insert into facts values ('2019-02-25', generate_series(1,600));
insert into facts values ('2019-02-26', generate_series(1,700));
insert into facts values ('2019-02-27', generate_series(1,800));
-- create the destination table
create table daily_uniques (
date date,
users hll
) with (orientation = column);
-- fill it with the aggregated unique statistics
INSERT INTO daily_uniques(date, users)
SELECT date, hll_add_agg(hll_hash_integer(user_id))
FROM facts
GROUP BY 1;
-- ask for the cardinality of the hll for each day
SELECT date, hll_cardinality(users) FROM daily_uniques order by date;
date | hll_cardinality
@ -158,7 +137,6 @@ SELECT date, hll_cardinality(users) FROM daily_uniques order by date;
Wed Feb 27 00:00:00 2019 | 798.111731634412
(8 rows)
-- ask for one week uniques
SELECT hll_cardinality(hll_union_agg(users)) FROM daily_uniques WHERE date >= '2019-02-20'::date AND date <= '2019-02-26'::date;
hll_cardinality
@ -166,7 +144,6 @@ SELECT hll_cardinality(hll_union_agg(users)) FROM daily_uniques WHERE date >= '2
696.602316769498
(1 row)
-- or a sliding window of uniques over the past 6 days
SELECT date, #hll_union_agg(users) OVER seven_days
FROM daily_uniques
@ -183,7 +160,6 @@ WINDOW seven_days AS (ORDER BY date ASC ROWS 6 PRECEDING);
Wed Feb 27 00:00:00 2019 | 798.111731634412
(8 rows)
-- or the number of uniques you saw yesterday that you did not see today
SELECT date, (#hll_union_agg(users) OVER two_days) - #users AS lost_uniques
FROM daily_uniques
@ -200,19 +176,15 @@ WINDOW two_days AS (ORDER BY date ASC ROWS 1 PRECEDING);
Wed Feb 27 00:00:00 2019 | 0
(8 rows)
drop table facts;
drop table daily_uniques;
------------------------------------------
-- 5. check cstore case--aqb test cases
------------------------------------------
create table test_hll(id bigint, name1 text, name2 text) with (orientation = column);
create table test_name1(id bigint, name1 hll) with (orientation = column);
create table test_name1_name2(id bigint, name1_name2 hll) with (orientation = column);
insert into test_hll select id, md5(id::text), md5(id::text) from t_id;
select hll_cardinality(hll_add_agg(hll_text)) , hll_cardinality(hll_add_agg(hll_bigint))
from (
select hll_hash_text(name1) hll_text,hll_hash_bigint(id) hll_bigint
@ -237,7 +209,6 @@ select hll_cardinality(hll_union_agg(hll_add_value))
5001.88168550794
(1 row)
select hll_cardinality(hll_union_agg(hll_add_value))
from (
select hll_add_agg(hll_hash_text(name1 || name2)) hll_add_value
@ -248,7 +219,6 @@ select hll_cardinality(hll_union_agg(hll_add_value))
5012.6502075377
(1 row)
select hll_cardinality(hll_union_agg(hll_add_value))
from (
select hll_add_agg(hll_hash_text(name1 || name2)) hll_add_value
@ -262,53 +232,44 @@ select hll_cardinality(hll_union_agg(hll_add_value))
10010.6991959748
(1 row)
insert into test_name1
select id, hll_add_agg(hll_hash_text(name1))
from test_hll
group by id;
select hll_cardinality(hll_union_agg(name1)) from test_name1;
hll_cardinality
------------------
4995.20839966803
(1 row)
insert into test_name1_name2
select id, hll_add_agg(hll_hash_text(name1 || name2))
from test_hll
group by id;
select hll_cardinality(hll_union_agg(name1_name2)) from test_name1_name2;
hll_cardinality
-----------------
5012.6502075377
(1 row)
drop table test_hll;
drop table test_name1;
drop table test_name1_name2;
------------------------------------------
-- 6. check cstore case--aqb extended test cases
------------------------------------------
create table t_data(a int, b int, c text , d text) with (orientation = column);
insert into t_data select mod(id,2), mod(id,3), id, id from t_id;
--create the dimentinon table
create table t_a_c_hll(a int, c hll) with (orientation = column);
create table t_a_cd_hll(a int, cd hll) with (orientation = column);
create table t_b_c_hll(b int, c hll) with (orientation = column);
create table t_b_cd_hll(b int, cd hll) with (orientation = column);
--insert the agg data
insert into t_a_c_hll select a, hll_add_agg(hll_hash_text(c)) from t_data group by a;
insert into t_a_cd_hll select a, hll_add_agg(hll_hash_text(c||d)) from t_data group by a;
insert into t_b_c_hll select b, hll_add_agg(hll_hash_text(c)) from t_data group by b;
insert into t_b_cd_hll select b, hll_add_agg(hll_hash_text(c||d)) from t_data group by b;
--group a have around 2500
--group b have around 1667
select a, #c from t_a_c_hll order by a;
@ -341,7 +302,6 @@ select b, #cd from t_b_cd_hll order by b;
2 | 1670.38421978935
(3 rows)
--should all be around 5000
select #hll_union_agg(c) from t_a_c_hll;
?column?
@ -367,7 +327,6 @@ select #hll_union_agg(cd) from t_b_cd_hll;
4967.36909301632
(1 row)
--prepare
prepare p1(int) as select a, hll_cardinality( hll_add_agg(hll_hash_text(c)) || hll_add_agg(hll_hash_text(d)) )from t_data where a = $1 group by a order by 1;
execute p1(0);
@ -383,7 +342,6 @@ execute p1(1);
(1 row)
deallocate p1;
prepare p2(int) as select b, hll_cardinality( hll_add_agg(hll_hash_text(c)) || hll_add_agg(hll_hash_text(d)) )from t_data where b = $1 group by b order by 1;
execute p2(0);
b | hll_cardinality
@ -404,7 +362,6 @@ execute p2(2);
(1 row)
deallocate p2;
--transaction
begin;
declare c cursor for select a, hll_cardinality( hll_add_agg(hll_hash_text(c)) || hll_add_agg(hll_hash_text(d)) )from t_data group by a order by 1;
@ -422,7 +379,6 @@ fetch next from c;
close c;
commit;
begin;
declare c cursor for select b, hll_cardinality( hll_add_agg(hll_hash_text(c)) || hll_add_agg(hll_hash_text(d)) )from t_data group by b order by 1;
fetch next from c;
@ -445,14 +401,12 @@ fetch next from c;
close c;
commit;
--cleaning up
drop table t_data;
drop table t_a_c_hll;
drop table t_a_cd_hll;
drop table t_b_c_hll;
drop table t_b_cd_hll;
--final cleaning
drop table t_id;
drop schema hll_cstore cascade;

View File

@ -23,7 +23,6 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "bmsql_order_l
insert into bmsql_order_line(ol_w_id, ol_d_id, ol_o_id, ol_number, ol_i_id, ol_dist_info) values(1, 1, 1, 1, 1, '123');
update bmsql_order_line set ol_dist_info='ss' where ol_w_id =1;
delete from bmsql_order_line;
create table test_partition_for_null_hash_timestamp
(
a timestamp without time zone,
@ -40,7 +39,6 @@ create index idx_test_partition_for_null_hash_timestamp_1 on test_partition_for_
create index idx_test_partition_for_null_hash_timestamp_2 on test_partition_for_null_hash_timestamp(a,b) LOCAL;
create index idx_test_partition_for_null_hash_timestamp_3 on test_partition_for_null_hash_timestamp(c) LOCAL;
create index idx_test_partition_for_null_hash_timestamp_4 on test_partition_for_null_hash_timestamp(b,c,d) LOCAL;
create table test_partition_for_null_hash_text (a text, b varchar(2), c char(1), d varchar(2))
partition by hash (a)
(
@ -54,7 +52,6 @@ create index idx_test_partition_for_null_hash_text_3 on test_partition_for_null_
create index idx_test_partition_for_null_hash_text_4 on test_partition_for_null_hash_text(b,c,d) LOCAL;
create index idx_test_partition_for_null_hash_text_5 on test_partition_for_null_hash_text(b,c,d);
ERROR: Global and local partition index should not be on same column
CREATE TABLE select_partition_table_000_1(
C_CHAR_1 CHAR(1),
C_CHAR_2 CHAR(10),
@ -81,7 +78,6 @@ create index idx_select_partition_table_000_1_2 on select_partition_table_000_1(
create index idx_select_partition_table_000_1_3 on select_partition_table_000_1(C_BIGINT) LOCAL;
create index idx_select_partition_table_000_1_4 on select_partition_table_000_1(C_BIGINT,C_TS_WITH,C_DP) LOCAL;
create index idx_select_partition_table_000_1_5 on select_partition_table_000_1(C_BIGINT,C_NUMERIC,C_TS_WITHOUT);
CREATE TABLE select_partition_table_000_2(
C_CHAR_1 CHAR(1),
C_CHAR_2 CHAR(10),
@ -108,7 +104,6 @@ create index idx_select_partition_table_000_2_2 on select_partition_table_000_2(
create index idx_select_partition_table_000_2_3 on select_partition_table_000_2(C_SMALLINT) LOCAL;
create index idx_select_partition_table_000_2_4 on select_partition_table_000_2(C_SMALLINT,C_TS_WITH,C_DP) LOCAL;
create index idx_select_partition_table_000_2_5 on select_partition_table_000_2(C_SMALLINT,C_NUMERIC,C_TS_WITHOUT);
CREATE TABLE select_partition_table_000_3(
C_CHAR_1 CHAR(1),
C_CHAR_2 CHAR(10),
@ -152,7 +147,6 @@ CREATE TABLE select_partition_table_000_4(
partition select_partition_000_4_2
);
ERROR: column c_float cannot serve as a hash partitioning column because of its datatype
CREATE TABLE select_partition_table_000_5(
C_CHAR_1 CHAR(1),
C_CHAR_2 CHAR(10),

View File

@ -14,11 +14,9 @@ create index delete_test_hash_index_local1 on delete_test_hash (a) local
partition delete_test_hash_p2_index_local tablespace PG_DEFAULT,
partition delete_test_hash_p3_index_local tablespace PG_DEFAULT
);
INSERT INTO delete_test_hash (a) VALUES (10);
INSERT INTO delete_test_hash (a, b) VALUES (50, repeat('x', 10000));
INSERT INTO delete_test_hash (a) VALUES (100);
SELECT id, a, char_length(b) FROM delete_test_hash order by 1, 2, 3;
id | a | char_length
----+-----+-------------
@ -27,42 +25,33 @@ SELECT id, a, char_length(b) FROM delete_test_hash order by 1, 2, 3;
| 100 |
(3 rows)
-- Pseudo Constant Quals
DELETE FROM delete_test_hash where null;
-- allow an alias to be specified for DELETE's target table
DELETE FROM delete_test_hash AS dt WHERE dt.a > 75;
-- if an alias is specified, don't allow the original table name
-- to be referenced
DELETE FROM delete_test_hash dt WHERE dt.a > 25;
SELECT id, a, char_length(b) FROM delete_test_hash order by 1, 2, 3;
id | a | char_length
----+----+-------------
| 10 |
(1 row)
-- delete a row with a TOASTed value
DELETE FROM delete_test_hash WHERE a > 25;
SELECT id, a, char_length(b) FROM delete_test_hash order by 1, 2, 3;
id | a | char_length
----+----+-------------
| 10 |
(1 row)
DROP TABLE delete_test_hash;
-- section 2:
create table hw_hash_partition_dml_t1 (id int, name text)partition by hash(id) (
partition hw_hash_partition_dml_t1_p1,
partition hw_hash_partition_dml_t1_p2,
partition hw_hash_partition_dml_t1_p3);
create index hw_hash_partition_dml_t1_index_local1 on hw_hash_partition_dml_t1(id) local
(
partition hw_hash_partition_dml_t1_p1_index_local_1 tablespace PG_DEFAULT,
@ -73,12 +62,10 @@ create table hw_hash_partition_dml_t2 (id int, name text)partition by hash(id) (
partition hw_hash_partition_dml_t2_p1,
partition hw_hash_partition_dml_t2_p2,
partition hw_hash_partition_dml_t2_p3);
create table hw_hash_partition_dml_t3 (id int, name text)partition by hash(id) (
partition hw_hash_partition_dml_t3_p1,
partition hw_hash_partition_dml_t3_p2,
partition hw_hash_partition_dml_t3_p3);
-- section 2.1: two table join, both are partitioned table
insert into hw_hash_partition_dml_t1 values (1, 'li'), (11, 'wang'), (21, 'zhang');
insert into hw_hash_partition_dml_t2 values (1, 'xi'), (11, 'zhao'), (27, 'qi');
@ -138,7 +125,6 @@ select * from hw_hash_partition_dml_t1 order by 1, 2;
24 | DDD
(5 rows)
-- section 2.2: delete from only one table, no joining
-- delete all tupes remaining: 13, 23, 24
delete from hw_hash_partition_dml_t1;
@ -147,7 +133,6 @@ select * from hw_hash_partition_dml_t1 order by 1, 2;
----+------
(0 rows)
-- section 3:
-- section 3.1: two table join, only one is partitioned table
-- and target relation is partitioned
@ -187,7 +172,6 @@ select * from hw_hash_partition_dml_t1 order by 1, 2;
24 | DDD
(3 rows)
-- section 3.2 delete from only one table, no joining
-- delete all tupes remaining: 13, 23, 24
delete from hw_hash_partition_dml_t1;
@ -196,7 +180,6 @@ select * from hw_hash_partition_dml_t1 order by 1, 2;
----+------
(0 rows)
-- section 3.3: two table join, only one is partitioned table
-- and target relation is on-partitioned
-- delete all tuples in hw_hash_partition_dml_t3
@ -224,7 +207,6 @@ select * from hw_hash_partition_dml_t3 order by 1, 2;
----+------
(0 rows)
-- delete all tuples that is less than 11 in hw_hash_partition_dml_t3, that is 3
insert into hw_hash_partition_dml_t3 values (3, 'AAA'), (13, 'BBB'), (23, 'CCC'), (24, 'DDD');
delete from hw_hash_partition_dml_t3 using hw_hash_partition_dml_t2 where hw_hash_partition_dml_t3.id < hw_hash_partition_dml_t2.id and hw_hash_partition_dml_t2.id = 11;
@ -236,7 +218,6 @@ select * from hw_hash_partition_dml_t3 order by 1, 2;
24 | DDD
(3 rows)
-- section 3.4 delete from only one table, no joining
-- delete all tuples remaining: 13, 23, 24
delete from hw_hash_partition_dml_t3;
@ -245,12 +226,10 @@ select * from hw_hash_partition_dml_t3 order by 1, 2;
----+------
(0 rows)
-- finally, drop table hw_hash_partition_dml_t1, hw_hash_partition_dml_t2 and hw_hash_partition_dml_t3
drop table hw_hash_partition_dml_t1;
drop table hw_hash_partition_dml_t2;
drop table hw_hash_partition_dml_t3;
create schema fvt_other_cmd;
CREATE TABLE FVT_OTHER_CMD.IDEX_LIST_PARTITION_TABLE_001(COL_INT int)
partition by hash (COL_INT)
@ -269,7 +248,6 @@ i:=i+100;
end loop;
end;
/
drop table test_index_ht;
ERROR: table "test_index_ht" does not exist
create table test_index_ht (a int, b int, c int)

View File

@ -7,7 +7,6 @@ partition by hash (a)
partition test_partition_for_null_hash_p2,
partition test_partition_for_null_hash_p3
);
insert into test_partition_for_null_hash values (0, 0, 0, 0);
insert into test_partition_for_null_hash values (1, 1, 1, 1);
insert into test_partition_for_null_hash values (5, 5, 5, 5);
@ -28,7 +27,6 @@ insert into test_partition_for_null_hash values (null, null, null, null);
ERROR: inserted partition key does not map to any table partition
-- success
insert into test_partition_for_null_hash values (0, null, null, null);
CREATE TABLE select_hash_partition_table_000_3(
C_CHAR_1 CHAR(1),
C_CHAR_2 CHAR(10),
@ -50,10 +48,8 @@ CREATE TABLE select_hash_partition_table_000_3(
partition select_hash_partition_000_3_1,
partition select_hash_partition_000_3_2
);
create index select_list_partition_table_index_000_3 ON select_hash_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_list_partition_000_3_1, partition select_list_partition_000_3_3);
create view select_list_partition_table_view_000_3 as select * from select_hash_partition_table_000_3;
INSERT INTO select_hash_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01');
INSERT INTO select_hash_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02');
INSERT INTO select_hash_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03');
@ -73,14 +69,12 @@ select count(*) from select_hash_partition_table_000_3;
13
(1 row)
CREATE TABLE partition_wise_join_table_001_1 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision,RANK SMALLINT)
partition by hash(ID)
(
partition partition_wise_join_table_001_1_1,
partition partition_wise_join_table_001_1_2
) ;
INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 1-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,10000,13 );
INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(41,49),'PARTITION WIASE JOIN 1-3-' || generate_series(40,60),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,15000,15 );
select count(*) from partition_wise_join_table_001_1;
@ -89,14 +83,12 @@ select count(*) from partition_wise_join_table_001_1;
153
(1 row)
CREATE TABLE partition_wise_join_table_001_2 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision )
partition by hash(ID)
(
partition partition_wise_join_table_001_1_1,
partition partition_wise_join_table_001_1_2
);
INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 2-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No 66# Science 4 Street of Xi'an of China $$,10000);
INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(71,79),'PARTITION WIASE JOIN 2-3-' || generate_series(70,80),90 + random() * 10,'1990-8-8',$$No 77# Science 4 Street of Xi'an of China $$,15000);
select count(*) from partition_wise_join_table_001_2;
@ -105,7 +97,6 @@ select count(*) from partition_wise_join_table_001_2;
189
(1 row)
CREATE TABLE select_partition_table_000_3(
C_CHAR_1 CHAR(1),
C_CHAR_2 CHAR(10),
@ -127,10 +118,8 @@ CREATE TABLE select_partition_table_000_3(
partition select_partition_000_3_1,
partition select_partition_000_3_2
);
create index select_partition_table_index_000_3 ON select_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_partition_000_3_1, partition select_partition_000_3_3);
create view select_partition_table_view_000_3 as select * from select_partition_table_000_3;
INSERT INTO select_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01');
INSERT INTO select_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02');
INSERT INTO select_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03');
@ -150,7 +139,6 @@ select count(*) from select_partition_table_000_3;
13
(1 row)
create table test_select_hash_partition (a int, b int)
partition by hash(a)
(
@ -158,7 +146,6 @@ partition by hash(a)
partition test_select_hash_partition_p2,
partition test_select_hash_partition_p3
);
insert into test_select_hash_partition values(1,1);
insert into test_select_hash_partition values(2,2);
insert into test_select_hash_partition values(0,0);
@ -176,7 +163,6 @@ select * from test_select_hash_partition order by a;
7 | 6
(6 rows)
CREATE TABLE hw_partition_select_test(C_INT INTEGER)
partition by hash (C_INT)
(

View File

@ -16,18 +16,15 @@ INTERVAL ('1 month')
PARTITION hw_partition_interval_movement_p1 VALUES LESS THAN ('2020-04-01'),
PARTITION hw_partition_interval_movement_p2 VALUES LESS THAN ('2020-05-01')
) DISABLE ROW MOVEMENT;
create index hw_partition_interval_movement_ind1 on hw_partition_interval_movement(c1) local;
create index hw_partition_interval_movement_ind2 on hw_partition_interval_movement(c2) local;
create index hw_partition_interval_movement_ind3 on hw_partition_interval_movement(c3) local;
--insert into table
insert into hw_partition_interval_movement values(7,2,'2020-02-01');
insert into hw_partition_interval_movement values(3,1,'2020-03-01');
insert into hw_partition_interval_movement values(5,3,'2020-04-01');
insert into hw_partition_interval_movement values(7,5,'2020-05-01');
insert into hw_partition_interval_movement values(1,4,'2020-06-01');
select relname, parttype, partstrategy, boundaries from pg_partition
where parentid = (select oid from pg_class where relname = 'hw_partition_interval_movement')
order by 1;
@ -41,7 +38,6 @@ select relname, parttype, partstrategy, boundaries from pg_partition
sys_p2 | p | i | {"Wed Jul 01 00:00:00 2020"}
(6 rows)
-- fail: update record belongs to a range partition which will be move to other range partition
update hw_partition_interval_movement set C3 = '2020-04-22' where C3 = '2020-03-01';
ERROR: fail to update partitioned table "hw_partition_interval_movement"
@ -62,10 +58,8 @@ DETAIL: disable row movement
update hw_partition_interval_movement set C3 = '2020-07-22' where C3 = '2020-05-01';
ERROR: fail to update partitioned table "hw_partition_interval_movement"
DETAIL: disable row movement
-- enable row movement
alter table hw_partition_interval_movement ENABLE ROW MOVEMENT;
-- succeed: update record belongs to a range partition which will be move to other range partition
update hw_partition_interval_movement set C3 = '2020-04-22' where C3 = '2020-03-01';
-- succeed: update record belongs to a range partition which will be move to an existed interval partition
@ -76,7 +70,6 @@ update hw_partition_interval_movement set C3 = '2020-07-22' where C3 = '2020-04-
update hw_partition_interval_movement set C3 = '2020-03-22' where C3 = '2020-05-01';
-- succeed: update record belongs to a interval partition which will be move to a not existed interval partition
update hw_partition_interval_movement set C3 = '2020-08-22' where C3 = '2020-06-01';
select * from hw_partition_interval_movement;
c1 | c2 | c3
----+----+--------------------------

View File

@ -12,8 +12,6 @@ begin
end loop;
end;
$$ language plpgsql;
select insert_mm('2020-05-1');
insert_mm
-----------

View File

@ -14,8 +14,3 @@ INTERVAL ('1 month')
PARTITION partition_interval_parallel_p2 VALUES LESS THAN ('2020-06-01')
);
CREATE INDEX idx1_partition_interval_parallel on partition_interval_parallel(c3) local ;

View File

@ -23,7 +23,6 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "bmsql_order_l
insert into bmsql_order_line(ol_w_id, ol_d_id, ol_o_id, ol_number, ol_i_id, ol_dist_info) values(1, 1, 1, 1, 1, '123');
update bmsql_order_line set ol_dist_info='ss' where ol_w_id =1;
delete from bmsql_order_line;
create table test_partition_for_null_list_timestamp
(
a timestamp without time zone,
@ -40,7 +39,6 @@ create index idx_test_partition_for_null_list_timestamp_1 on test_partition_for_
create index idx_test_partition_for_null_list_timestamp_2 on test_partition_for_null_list_timestamp(a,b) LOCAL;
create index idx_test_partition_for_null_list_timestamp_3 on test_partition_for_null_list_timestamp(c) LOCAL;
create index idx_test_partition_for_null_list_timestamp_4 on test_partition_for_null_list_timestamp(b,c,d) LOCAL;
create table test_partition_for_null_list_text (a text, b varchar(2), c char(1), d varchar(2))
partition by list (a)
(
@ -59,7 +57,6 @@ create index idx_test_partition_for_null_list_text_4 on test_partition_for_null_
ERROR: relation "test_partition_for_null_list_text" does not exist
create index idx_test_partition_for_null_list_text_5 on test_partition_for_null_list_text(b,c,d);
ERROR: relation "test_partition_for_null_list_text" does not exist
CREATE TABLE select_partition_table_000_1(
C_CHAR_1 CHAR(1),
C_CHAR_2 CHAR(10),
@ -86,7 +83,6 @@ create index idx_select_partition_table_000_1_2 on select_partition_table_000_1(
create index idx_select_partition_table_000_1_3 on select_partition_table_000_1(C_BIGINT) LOCAL;
create index idx_select_partition_table_000_1_4 on select_partition_table_000_1(C_BIGINT,C_TS_WITH,C_DP) LOCAL;
create index idx_select_partition_table_000_1_5 on select_partition_table_000_1(C_BIGINT,C_NUMERIC,C_TS_WITHOUT);
CREATE TABLE select_partition_table_000_2(
C_CHAR_1 CHAR(1),
C_CHAR_2 CHAR(10),
@ -113,7 +109,6 @@ create index idx_select_partition_table_000_2_2 on select_partition_table_000_2(
create index idx_select_partition_table_000_2_3 on select_partition_table_000_2(C_SMALLINT) LOCAL;
create index idx_select_partition_table_000_2_4 on select_partition_table_000_2(C_SMALLINT,C_TS_WITH,C_DP) LOCAL;
create index idx_select_partition_table_000_2_5 on select_partition_table_000_2(C_SMALLINT,C_NUMERIC,C_TS_WITHOUT);
CREATE TABLE select_partition_table_000_3(
C_CHAR_1 CHAR(1),
C_CHAR_2 CHAR(10),
@ -157,7 +152,6 @@ CREATE TABLE select_partition_table_000_4(
partition select_partition_000_4_2 values (5,6,7,8,9)
);
ERROR: column c_dp cannot serve as a list partitioning column because of its datatype
create table test_list_default (a int, b int)
partition by list(a)
(

View File

@ -27,7 +27,6 @@ insert into test_partition_for_null_list values (null, null, null, null);
ERROR: inserted partition key does not map to any table partition
-- success
insert into test_partition_for_null_list values (0, null, null, null);
CREATE TABLE select_list_partition_table_000_3(
C_CHAR_1 CHAR(1),
C_CHAR_2 CHAR(10),
@ -49,10 +48,8 @@ CREATE TABLE select_list_partition_table_000_3(
partition select_list_partition_000_3_1 values (111,222,333,444),
partition select_list_partition_000_3_2 values (555,666,777,888,999,1100,1600)
);
create index select_list_partition_table_index_000_3 ON select_list_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_list_partition_000_3_1, partition select_list_partition_000_3_3);
create view select_list_partition_table_view_000_3 as select * from select_list_partition_table_000_3;
INSERT INTO select_list_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01');
INSERT INTO select_list_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02');
INSERT INTO select_list_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03');
@ -72,14 +69,12 @@ select count(*) from select_list_partition_table_000_3;
13
(1 row)
CREATE TABLE partition_wise_join_table_001_1 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision,RANK SMALLINT)
partition by list(ID)
(
partition partition_wise_join_table_001_1_1 values (1,42,3,44,5,46,7,48,9),
partition partition_wise_join_table_001_1_2 values (41,2,43,4,45,6,47,8,49)
) ;
INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 1-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,10000,13 );
INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(41,49),'PARTITION WIASE JOIN 1-3-' || generate_series(40,60),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,15000,15 );
select count(*) from partition_wise_join_table_001_1;
@ -88,14 +83,12 @@ select count(*) from partition_wise_join_table_001_1;
153
(1 row)
CREATE TABLE partition_wise_join_table_001_2 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision )
partition by list(ID)
(
partition partition_wise_join_table_001_1_1 values (71,2,73,4,75,6,77,8,79),
partition partition_wise_join_table_001_1_2 values (1,72,3,74,5,76,7,78,9)
);
INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 2-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No 66# Science 4 Street of Xi'an of China $$,10000);
INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(71,79),'PARTITION WIASE JOIN 2-3-' || generate_series(70,80),90 + random() * 10,'1990-8-8',$$No 77# Science 4 Street of Xi'an of China $$,15000);
select count(*) from partition_wise_join_table_001_2;
@ -104,7 +97,6 @@ select count(*) from partition_wise_join_table_001_2;
189
(1 row)
CREATE TABLE select_partition_table_000_3(
C_CHAR_1 CHAR(1),
C_CHAR_2 CHAR(10),
@ -126,10 +118,8 @@ CREATE TABLE select_partition_table_000_3(
partition select_partition_000_3_1 values (111,222,333,444),
partition select_partition_000_3_3 values (555,666,777,888,999,1100,1600)
);
create index select_partition_table_index_000_3 ON select_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_partition_000_3_1, partition select_partition_000_3_3);
create view select_partition_table_view_000_3 as select * from select_partition_table_000_3;
INSERT INTO select_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01');
INSERT INTO select_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02');
INSERT INTO select_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03');
@ -151,9 +141,7 @@ select count(*) from select_partition_table_000_3;
13
(1 row)
create table hw_partition_select_ordinary_table (a int, b int);
create table test_select_list_partition (a int, b int)
partition by list(a)
(
@ -161,7 +149,6 @@ partition by list(a)
partition test_select_list_partition_p2 values (1,2,3),
partition test_select_list_partition_p3 values (4,5,6)
);
insert into test_select_list_partition values(1,1);
insert into test_select_list_partition values(2,2);
insert into test_select_list_partition values(0,0);
@ -176,7 +163,6 @@ select count(*) from test_select_list_partition;
4
(1 row)
CREATE TABLE hw_partition_select_test(C_INT INTEGER)
partition by list (C_INT)
(

View File

@ -559,7 +559,6 @@ insert into t_p_mutil_t1 values('201902', '1', '1', 1);
insert into t_p_mutil_t1 values('201903', '2', '1', 1);
insert into t_p_mutil_t1 values('201903', '1', '1', 1);
insert into t_p_mutil_t1 values('201903', '2', '1', 1);
CREATE TABLE t_p_mutil_t2
(
month_code VARCHAR2 ( 30 ) NOT NULL ,
@ -677,7 +676,6 @@ CREATE TRIGGER delete_ar_trigger1
AFTER delete ON test_trigger_src_tbl1
FOR EACH ROW
EXECUTE PROCEDURE tri_delete_func1();
CREATE TRIGGER delete_ar_trigger2
AFTER delete ON test_trigger_src_tbl2
FOR EACH ROW
@ -736,7 +734,6 @@ CREATE TRIGGER delete_ar_trigger1
BEFORE delete ON test_trigger_src_tbl1
FOR EACH ROW
EXECUTE PROCEDURE tri_delete_func1();
CREATE TRIGGER delete_ar_trigger2
BEFORE delete ON test_trigger_src_tbl2
FOR EACH ROW
@ -797,7 +794,6 @@ CREATE TRIGGER delete_ar_trigger1
BEFORE delete ON test_trigger_src_tbl1
FOR EACH STATEMENT
EXECUTE PROCEDURE tri_delete_func1();
CREATE TRIGGER delete_ar_trigger2
BEFORE delete ON test_trigger_src_tbl2
FOR EACH STATEMENT
@ -856,7 +852,6 @@ CREATE TRIGGER delete_ar_trigger1
BEFORE delete ON test_trigger_src_tbl1
FOR EACH STATEMENT
EXECUTE PROCEDURE tri_delete_func1();
CREATE TRIGGER delete_ar_trigger2
BEFORE delete ON test_trigger_src_tbl2
FOR EACH STATEMENT
@ -915,7 +910,6 @@ CREATE TRIGGER delete_ar_trigger1
AFTER delete ON test_trigger_src_tbl1
FOR EACH STATEMENT
EXECUTE PROCEDURE tri_delete_func1();
CREATE TRIGGER delete_ar_trigger2
AFTER delete ON test_trigger_src_tbl2
FOR EACH STATEMENT

View File

@ -674,7 +674,6 @@ insert into t_p_mutil_t1 values('201902', '1', '1', 1);
insert into t_p_mutil_t1 values('201903', '2', '1', 1);
insert into t_p_mutil_t1 values('201903', '1', '1', 1);
insert into t_p_mutil_t1 values('201903', '2', '1', 1);
CREATE TABLE t_p_mutil_t2
(
month_code VARCHAR2 ( 30 ) NOT NULL ,
@ -909,7 +908,6 @@ CREATE TRIGGER update_ar_trigger1
AFTER UPDATE ON test_trigger_src_tbl1
FOR EACH ROW
EXECUTE PROCEDURE tri_update_func1();
CREATE TRIGGER update_ar_trigger2
AFTER UPDATE ON test_trigger_src_tbl2
FOR EACH ROW
@ -972,7 +970,6 @@ CREATE TRIGGER update_ar_trigger1
BEFORE UPDATE ON test_trigger_src_tbl1
FOR EACH ROW
EXECUTE PROCEDURE tri_update_func1();
CREATE TRIGGER update_ar_trigger2
BEFORE UPDATE ON test_trigger_src_tbl2
FOR EACH ROW
@ -1035,7 +1032,6 @@ CREATE TRIGGER update_ar_trigger1
BEFORE UPDATE ON test_trigger_src_tbl1
FOR EACH STATEMENT
EXECUTE PROCEDURE tri_update_func1();
CREATE TRIGGER update_ar_trigger2
BEFORE UPDATE ON test_trigger_src_tbl2
FOR EACH STATEMENT
@ -1098,7 +1094,6 @@ CREATE TRIGGER update_ar_trigger1
BEFORE UPDATE ON test_trigger_src_tbl1
FOR EACH STATEMENT
EXECUTE PROCEDURE tri_update_func1();
CREATE TRIGGER update_ar_trigger2
BEFORE UPDATE ON test_trigger_src_tbl2
FOR EACH STATEMENT
@ -1161,7 +1156,6 @@ CREATE TRIGGER update_ar_trigger1
AFTER UPDATE ON test_trigger_src_tbl1
FOR EACH STATEMENT
EXECUTE PROCEDURE tri_update_func1();
CREATE TRIGGER update_ar_trigger2
AFTER UPDATE ON test_trigger_src_tbl2
FOR EACH STATEMENT
@ -1224,6 +1218,5 @@ insert into settest07 values(1,'开席');
create table settest01(id int,rowid number);
insert into settest01 values(3,5);
update settest01 a,settest07 b set b.c2=4 where a.id>b.c1;
\c regression
drop database multiupdate;

View File

@ -99,7 +99,6 @@ explain (verbose on, costs off) select sum(c1) from t1 where c2=1 limit 1;
Index Cond: (t1.c2 = 1::numeric)
(7 rows)
-- agg fusion
drop index idx2;
-- index t1(c2): indexonlyscan
@ -120,7 +119,6 @@ select sum(c2) from t1 where c2=3;
6
(1 row)
-- index t1(c2): indexscan
explain (verbose on, costs off) select sum(c3) from t1 where c2=3;
QUERY PLAN
@ -139,7 +137,6 @@ select sum(c3) from t1 where c2=3;
4
(1 row)
-- index t1(c3, c2): indexonlyscan
drop index idx1;
create index idx3 on t1(c3, c2);
@ -187,7 +184,6 @@ explain (verbose on, costs off) select sum(c2) from t1 where c2=3;
Index Cond: (t1.c2 = 3::numeric)
(6 rows)
select sum(c3) from t1 where c3=3;
sum
-----
@ -212,7 +208,6 @@ select sum(c2) from t1 where c2=3;
6
(1 row)
-- sort fusion
explain (verbose on, costs off) select c3 from t1 where c3 < 10 order by c2;
QUERY PLAN
@ -235,7 +230,6 @@ select c3 from t1 where c3 < 10 order by c2;
3
(4 rows)
-- nestloop fusion
drop table if exists tn1, tn2;
NOTICE: table "tn1" does not exist, skipping
@ -356,10 +350,8 @@ select tn2.c1, tn1.c1 from tn1,tn2 where tn1.c2 <20 and tn2.c2 <20;
17 | 7
(9 rows)
drop table if exists t1, t2;
drop table if exists tn1, tn2;
-- test opfusion update time
show enable_opfusion;
enable_opfusion
@ -377,14 +369,11 @@ select pg_stat_get_last_data_changed_time(oid) != 0 from pg_class where relname
t
(1 row)
drop table test_opfusion_update;
-- test pbe opfusion when param set null
create table test_bypass (a int, b int);
create index itest on test_bypass(a);
insert into test_bypass values(1,2);
prepare p1 as select * from test_bypass where a = $1;
execute p1(null);
a | b
@ -397,9 +386,7 @@ execute p1(1);
1 | 2
(1 row)
drop table test_bypass;
-- clean
reset enable_seqscan;
reset enable_bitmapscan;

View File

@ -6,8 +6,6 @@ ERROR: database "db_testa" already exists
create database if not exists db_testb;
drop database if exists db_testa;
drop database if exists db_testb;
create user test_user with password 'Abcd.123';
ALTER USER IF EXISTS test_user IDENTIFIED BY 'Abcd.1234';
ALTER USER test_user IDENTIFIED BY 'Abcd.12345';
@ -16,7 +14,6 @@ NOTICE: role "test_user2" does not exist, skipping
ALTER USER test_user2 IDENTIFIED BY 'Abcd.1234';
ERROR: role "test_user2" does not exist
DROP USER test_user;
CREATE SCHEMA sch_name;
CREATE SCHEMA IF NOT EXISTS sch_name;
NOTICE: schema "sch_name" already exists,skipping
@ -25,4 +22,3 @@ CREATE SCHEMA sch_name2;
ERROR: schema "sch_name2" already exists
drop SCHEMA sch_name;
drop SCHEMA sch_name2;

View File

@ -901,7 +901,7 @@ test: hw_cipher_aes128
test: hw_pwd_encryption_sm3
test: rule_test
test: test_auto_increment
test: dump_auto_increment
#test: dump_auto_increment
#delete limit
test: delete_limit
@ -952,7 +952,7 @@ test: procedure_privilege_test
# global temporary table: parallel truncate
test: gtt_trunc_pre
test: gtt_trunc_parallel_dml1 gtt_trunc_parallel_dml2 gtt_trunc_parallel_ddl1 gtt_trunc_parallel_ddl2
test: gtt_trunc_clean
#test: gtt_trunc_clean
test: toomanyparams

0
src/test/regress/security_scripts/post_case_audit.sh Normal file → Executable file
View File