From c7dc7cfeea4d2e2ebab2c7d645f4c1819a4f8fd8 Mon Sep 17 00:00:00 2001 From: MinhLA1410 <36942826+MinhLA1410@users.noreply.github.com> Date: Tue, 21 Dec 2021 13:15:20 +0700 Subject: [PATCH] Release version 0.2.1 Support PostgeSQL 14.0 (#9) Support PostgeSQL 14.0 --- expected/{13.3 => 13.4}/import_local.out | 0 expected/{13.3 => 13.4}/import_server.out | 0 expected/{13.3 => 13.4}/parquet_s3_fdw2.out | 0 .../{13.3 => 13.4}/parquet_s3_fdw_local.out | 54 ++++---- .../parquet_s3_fdw_post_local.out | 109 ++++++++++++---- .../parquet_s3_fdw_post_server.out | 101 ++++++++++---- .../{13.3 => 13.4}/parquet_s3_fdw_server.out | 90 ++++++------- expected/{14beta2 => 14.0}/import_local.out | 0 expected/{14beta2 => 14.0}/import_server.out | 0 .../{14beta2 => 14.0}/parquet_s3_fdw2.out | 0 .../parquet_s3_fdw_local.out | 54 ++++---- .../parquet_s3_fdw_post_local.out | 109 +++++++++++----- .../parquet_s3_fdw_post_server.out | 101 ++++++++++---- .../parquet_s3_fdw_server.out | 90 ++++++------- parquet_s3_fdw.h | 11 +- parquet_s3_fdw_server_option.c | 41 +----- sql/{13.3 => 13.4}/import.sql | 0 sql/{13.3 => 13.4}/import_local.sql | 0 sql/{13.3 => 13.4}/import_server.sql | 0 sql/{13.3 => 13.4}/parquet_s3_fdw.sql | 0 sql/{13.3 => 13.4}/parquet_s3_fdw2.sql | 0 sql/{13.3 => 13.4}/parquet_s3_fdw_local.sql | 0 sql/{13.3 => 13.4}/parquet_s3_fdw_post.sql | 88 ++++++++++--- .../parquet_s3_fdw_post_local.sql | 0 .../parquet_s3_fdw_post_server.sql | 0 sql/{13.3 => 13.4}/parquet_s3_fdw_server.sql | 0 sql/{14beta2 => 14.0}/import.sql | 0 sql/{14beta2 => 14.0}/import_local.sql | 0 sql/{14beta2 => 14.0}/import_server.sql | 0 sql/{14beta2 => 14.0}/parquet_s3_fdw.sql | 0 sql/{14beta2 => 14.0}/parquet_s3_fdw2.sql | 0 .../parquet_s3_fdw_local.sql | 0 sql/{14beta2 => 14.0}/parquet_s3_fdw_post.sql | 88 ++++++++++--- .../parquet_s3_fdw_post_local.sql | 0 .../parquet_s3_fdw_post_server.sql | 0 .../parquet_s3_fdw_server.sql | 0 src/parquet_fdw.c | 123 +++++++++--------- src/parquet_impl.cpp | 4 +- 38 files changed, 673 insertions(+), 390 deletions(-) rename expected/{13.3 => 13.4}/import_local.out (100%) rename expected/{13.3 => 13.4}/import_server.out (100%) rename expected/{13.3 => 13.4}/parquet_s3_fdw2.out (100%) rename expected/{13.3 => 13.4}/parquet_s3_fdw_local.out (93%) rename expected/{14beta2 => 13.4}/parquet_s3_fdw_post_local.out (98%) rename expected/{14beta2 => 13.4}/parquet_s3_fdw_post_server.out (98%) rename expected/{13.3 => 13.4}/parquet_s3_fdw_server.out (89%) rename expected/{14beta2 => 14.0}/import_local.out (100%) rename expected/{14beta2 => 14.0}/import_server.out (100%) rename expected/{14beta2 => 14.0}/parquet_s3_fdw2.out (100%) rename expected/{14beta2 => 14.0}/parquet_s3_fdw_local.out (90%) rename expected/{13.3 => 14.0}/parquet_s3_fdw_post_local.out (98%) rename expected/{13.3 => 14.0}/parquet_s3_fdw_post_server.out (98%) rename expected/{14beta2 => 14.0}/parquet_s3_fdw_server.out (82%) rename sql/{13.3 => 13.4}/import.sql (100%) rename sql/{13.3 => 13.4}/import_local.sql (100%) rename sql/{13.3 => 13.4}/import_server.sql (100%) rename sql/{13.3 => 13.4}/parquet_s3_fdw.sql (100%) rename sql/{13.3 => 13.4}/parquet_s3_fdw2.sql (100%) rename sql/{13.3 => 13.4}/parquet_s3_fdw_local.sql (100%) rename sql/{13.3 => 13.4}/parquet_s3_fdw_post.sql (98%) rename sql/{13.3 => 13.4}/parquet_s3_fdw_post_local.sql (100%) rename sql/{13.3 => 13.4}/parquet_s3_fdw_post_server.sql (100%) rename sql/{13.3 => 13.4}/parquet_s3_fdw_server.sql (100%) rename sql/{14beta2 => 14.0}/import.sql (100%) rename sql/{14beta2 => 14.0}/import_local.sql (100%) rename sql/{14beta2 => 14.0}/import_server.sql (100%) rename sql/{14beta2 => 14.0}/parquet_s3_fdw.sql (100%) rename sql/{14beta2 => 14.0}/parquet_s3_fdw2.sql (100%) rename sql/{14beta2 => 14.0}/parquet_s3_fdw_local.sql (100%) rename sql/{14beta2 => 14.0}/parquet_s3_fdw_post.sql (98%) rename sql/{14beta2 => 14.0}/parquet_s3_fdw_post_local.sql (100%) rename sql/{14beta2 => 14.0}/parquet_s3_fdw_post_server.sql (100%) rename sql/{14beta2 => 14.0}/parquet_s3_fdw_server.sql (100%) diff --git a/expected/13.3/import_local.out b/expected/13.4/import_local.out similarity index 100% rename from expected/13.3/import_local.out rename to expected/13.4/import_local.out diff --git a/expected/13.3/import_server.out b/expected/13.4/import_server.out similarity index 100% rename from expected/13.3/import_server.out rename to expected/13.4/import_server.out diff --git a/expected/13.3/parquet_s3_fdw2.out b/expected/13.4/parquet_s3_fdw2.out similarity index 100% rename from expected/13.3/parquet_s3_fdw2.out rename to expected/13.4/parquet_s3_fdw2.out diff --git a/expected/13.3/parquet_s3_fdw_local.out b/expected/13.4/parquet_s3_fdw_local.out similarity index 93% rename from expected/13.3/parquet_s3_fdw_local.out rename to expected/13.4/parquet_s3_fdw_local.out index 0224c20..4683d99 100644 --- a/expected/13.3/parquet_s3_fdw_local.out +++ b/expected/13.4/parquet_s3_fdw_local.out @@ -85,15 +85,15 @@ EXPLAIN (COSTS OFF) SELECT * FROM example1 ORDER BY three; SET client_min_messages = DEBUG1; --Testcase 12: SELECT * FROM example1 WHERE one < 1; -psql:sql/13.3/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/13.3/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) --Testcase 13: SELECT * FROM example1 WHERE one <= 1; -psql:sql/13.3/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+---------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -101,15 +101,15 @@ psql:sql/13.3/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: skip rowgroup 2 --Testcase 14: SELECT * FROM example1 WHERE one > 6; -psql:sql/13.3/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/13.3/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) --Testcase 15: SELECT * FROM example1 WHERE one >= 6; -psql:sql/13.3/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: skip rowgroup 1 one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 6 | {16,17,18} | tres | 2018-01-06 00:00:00 | 2018-01-06 | f | 2 @@ -117,7 +117,7 @@ psql:sql/13.3/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: skip rowgroup 1 --Testcase 16: SELECT * FROM example1 WHERE one = 2; -psql:sql/13.3/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 2 | {NULL,5,6} | bar | 2018-01-02 00:00:00 | 2018-01-02 | f | @@ -125,15 +125,15 @@ psql:sql/13.3/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: skip rowgroup 2 --Testcase 17: SELECT * FROM example1 WHERE one = 7; -psql:sql/13.3/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/13.3/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) --Testcase 18: SELECT * FROM example1 WHERE six = true; -psql:sql/13.3/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+---------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -152,7 +152,7 @@ SELECT * FROM example1 WHERE six = false; --Testcase 20: SELECT * FROM example1 WHERE seven < 1.5; -psql:sql/13.3/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+---------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -170,7 +170,7 @@ SELECT * FROM example1 WHERE seven <= 1.5; --Testcase 22: SELECT * FROM example1 WHERE seven = 1.5; -psql:sql/13.3/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: skip rowgroup 1 one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 4 | {10,11,12} | uno | 2018-01-04 00:00:00 | 2018-01-04 | f | 1.5 @@ -178,7 +178,7 @@ psql:sql/13.3/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: skip rowgroup 1 --Testcase 23: SELECT * FROM example1 WHERE seven > 1; -psql:sql/13.3/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: skip rowgroup 1 one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 4 | {10,11,12} | uno | 2018-01-04 00:00:00 | 2018-01-04 | f | 1.5 @@ -207,7 +207,7 @@ SELECT * FROM example1 WHERE seven IS NULL; prepare prep(date) as select * from example1 where five < $1; --Testcase 27: execute prep('2018-01-03'); -psql:sql/13.3/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -216,8 +216,8 @@ psql:sql/13.3/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: skip rowgroup 2 --Testcase 28: execute prep('2018-01-01'); -psql:sql/13.3/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/13.3/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) @@ -227,18 +227,18 @@ SET client_min_messages = WARNING; --Testcase 29: CREATE FOREIGN TABLE example_fail (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv; -psql:sql/13.3/parquet_s3_fdw.sql:89: ERROR: parquet_s3_fdw: filename or function is required +psql:sql/13.4/parquet_s3_fdw.sql:89: ERROR: parquet_s3_fdw: filename or function is required --Testcase 30: CREATE FOREIGN TABLE example_fail (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (filename 'nonexistent.parquet', some_option '123'); -psql:sql/13.3/parquet_s3_fdw.sql:93: ERROR: parquet_s3_fdw: No such file or directory +psql:sql/13.4/parquet_s3_fdw.sql:93: ERROR: parquet_s3_fdw: No such file or directory \set var :PATH_FILENAME'/data/simple/example1.parquet' --Testcase 31: CREATE FOREIGN TABLE example_fail (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (filename :'var', some_option '123'); -psql:sql/13.3/parquet_s3_fdw.sql:98: ERROR: parquet_s3_fdw: invalid option "some_option" +psql:sql/13.4/parquet_s3_fdw.sql:98: ERROR: parquet_s3_fdw: invalid option "some_option" -- type mismatch \set var :PATH_FILENAME'/data/simple/example1.parquet' --Testcase 32: @@ -247,10 +247,10 @@ SERVER parquet_s3_srv OPTIONS (filename :'var', sorted 'one'); --Testcase 33: SELECT one FROM example_fail; -psql:sql/13.3/parquet_s3_fdw.sql:107: ERROR: parquet_s3_fdw: parquet_s3_fdw: cast function to bigint[] ('one' column) is not found +psql:sql/13.4/parquet_s3_fdw.sql:107: ERROR: parquet_s3_fdw: parquet_s3_fdw: cast function to bigint[] ('one' column) is not found --Testcase 34: SELECT two FROM example_fail; -psql:sql/13.3/parquet_s3_fdw.sql:109: ERROR: parquet_s3_fdw: parquet_s3_fdw: cannot convert parquet column of type LIST to scalar type of postgres column 'two' +psql:sql/13.4/parquet_s3_fdw.sql:109: ERROR: parquet_s3_fdw: parquet_s3_fdw: cannot convert parquet column of type LIST to scalar type of postgres column 'two' -- files_func --Testcase 35: CREATE FUNCTION list_parquet_s3_files(args JSONB) @@ -299,17 +299,17 @@ LANGUAGE SQL; CREATE FOREIGN TABLE example_inv_func (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (files_func 'int_array_func'); -psql:sql/13.3/parquet_s3_fdw.sql:144: ERROR: parquet_s3_fdw: return type of 'int_array_func' is integer[]; expected text[] +psql:sql/13.4/parquet_s3_fdw.sql:144: ERROR: parquet_s3_fdw: return type of 'int_array_func' is integer[]; expected text[] --Testcase 41: CREATE FOREIGN TABLE example_inv_func (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (files_func 'no_args_func'); -psql:sql/13.3/parquet_s3_fdw.sql:148: ERROR: function no_args_func(jsonb) does not exist +psql:sql/13.4/parquet_s3_fdw.sql:148: ERROR: function no_args_func(jsonb) does not exist --Testcase 42: CREATE FOREIGN TABLE example_inv_func (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (files_func 'list_parquet_s3_files', files_func_arg 'invalid json'); -psql:sql/13.3/parquet_s3_fdw.sql:152: ERROR: invalid input syntax for type json +psql:sql/13.4/parquet_s3_fdw.sql:152: ERROR: invalid input syntax for type json DETAIL: Token "invalid" is invalid. CONTEXT: JSON data, line 1: invalid... --Testcase 43: @@ -603,7 +603,7 @@ SELECT * FROM example3; --Testcase 68: SELECT * FROM example3 WHERE three = 3; -psql:sql/13.3/parquet_s3_fdw.sql:269: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:269: DEBUG: parquet_s3_fdw: skip rowgroup 1 one | two | three -----+-----+------- (0 rows) @@ -627,14 +627,14 @@ SET client_min_messages = WARNING; SELECT * FROM public.parquet_s3_fdw_version(); parquet_s3_fdw_version ------------------------ - 200 + 201 (1 row) --Testcase 71: SELECT parquet_s3_fdw_version(); parquet_s3_fdw_version ------------------------ - 200 + 201 (1 row) --Testcase 72: diff --git a/expected/14beta2/parquet_s3_fdw_post_local.out b/expected/13.4/parquet_s3_fdw_post_local.out similarity index 98% rename from expected/14beta2/parquet_s3_fdw_post_local.out rename to expected/13.4/parquet_s3_fdw_post_local.out index cd0ea29..bb16da9 100644 --- a/expected/14beta2/parquet_s3_fdw_post_local.out +++ b/expected/13.4/parquet_s3_fdw_post_local.out @@ -172,7 +172,7 @@ SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work (1 row) ALTER SERVER parquet_s3_srv OPTIONS (SET use_minio 'false'); -psql:sql/14beta2/parquet_s3_fdw_post.sql:188: ERROR: option "use_minio" not found +psql:sql/13.4/parquet_s3_fdw_post.sql:188: ERROR: option "use_minio" not found --Testcase 17: SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail if only when we use minio/s3. With local file, option use_minio is useless. c3 | c5 @@ -186,7 +186,7 @@ DO $d$ OPTIONS (SET use_minio 'true')$$; END; $d$; -psql:sql/14beta2/parquet_s3_fdw_post.sql:196: ERROR: option "use_minio" not found +psql:sql/13.4/parquet_s3_fdw_post.sql:196: ERROR: option "use_minio" not found --Testcase 18: SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again c3 | c5 @@ -197,7 +197,7 @@ SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again -- Test that alteration of user mapping options causes reconnection ALTER USER MAPPING FOR CURRENT_USER SERVER parquet_s3_srv OPTIONS (SET user 'no such user'); -psql:sql/14beta2/parquet_s3_fdw_post.sql:202: ERROR: option "user" not found +psql:sql/13.4/parquet_s3_fdw_post.sql:202: ERROR: option "user" not found --Testcase 19: SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail if only when we use minio/s3. With local file, option user is useless. c3 | c5 @@ -207,7 +207,7 @@ SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail if only when we ALTER USER MAPPING FOR CURRENT_USER SERVER parquet_s3_srv OPTIONS (SET user 'minioadmin'); -psql:sql/14beta2/parquet_s3_fdw_post.sql:206: ERROR: option "user" not found +psql:sql/13.4/parquet_s3_fdw_post.sql:206: ERROR: option "user" not found --Testcase 20: SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again c3 | c5 @@ -2145,7 +2145,8 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL 20 | 0 | AAA020 (10 rows) -SET enable_resultcache TO off; +SET enable_memoize TO off; +psql:sql/13.4/parquet_s3_fdw_post.sql:576: ERROR: unrecognized configuration parameter "enable_memoize" -- right outer join + left outer join --Testcase 128: EXPLAIN (VERBOSE, COSTS OFF) @@ -2194,7 +2195,8 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT 20 | 0 | AAA020 (10 rows) -RESET enable_resultcache; +RESET enable_memoize; +psql:sql/13.4/parquet_s3_fdw_post.sql:583: ERROR: unrecognized configuration parameter "enable_memoize" -- left outer join + right outer join --Testcase 130: EXPLAIN (VERBOSE, COSTS OFF) @@ -5431,14 +5433,14 @@ DROP FUNCTION f_test(int); --Testcase 341: CREATE FOREIGN TABLE reindex_foreign (c1 int, c2 int) SERVER parquet_s3_srv_2 OPTIONS (filename :'var', sorted 'c1'); -psql:sql/14beta2/parquet_s3_fdw_post.sql:1411: ERROR: parquet_s3_fdw: No such file or directory +psql:sql/13.4/parquet_s3_fdw_post.sql:1411: ERROR: parquet_s3_fdw: No such file or directory REINDEX TABLE reindex_foreign; -- error -psql:sql/14beta2/parquet_s3_fdw_post.sql:1412: ERROR: relation "reindex_foreign" does not exist +psql:sql/13.4/parquet_s3_fdw_post.sql:1412: ERROR: relation "reindex_foreign" does not exist REINDEX TABLE CONCURRENTLY reindex_foreign; -- error -psql:sql/14beta2/parquet_s3_fdw_post.sql:1413: ERROR: relation "reindex_foreign" does not exist +psql:sql/13.4/parquet_s3_fdw_post.sql:1413: ERROR: relation "reindex_foreign" does not exist --Testcase 342: DROP FOREIGN TABLE reindex_foreign; -psql:sql/14beta2/parquet_s3_fdw_post.sql:1415: ERROR: foreign table "reindex_foreign" does not exist +psql:sql/13.4/parquet_s3_fdw_post.sql:1415: ERROR: foreign table "reindex_foreign" does not exist -- partitions and foreign tables -- CREATE TABLE reind_fdw_parent (c1 int) PARTITION BY RANGE (c1); -- CREATE TABLE reind_fdw_0_10 PARTITION OF reind_fdw_parent @@ -5453,9 +5455,11 @@ psql:sql/14beta2/parquet_s3_fdw_post.sql:1415: ERROR: foreign table "reindex_fo -- conversion error -- =================================================================== -- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int; --- SELECT * FROM ft1 WHERE c1 = 1; -- ERROR --- SELECT ft1.c1, ft2.c2, ft1.c8 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR --- SELECT ft1.c1, ft2.c2, ft1 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR +-- SELECT * FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8) WHERE x1 = 1; -- ERROR +-- SELECT ftx.x1, ft2.c2, ftx.x8 FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2 +-- WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR +-- SELECT ftx.x1, ft2.c2, ftx FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2 +-- WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR -- SELECT sum(c2), array_agg(c8) FROM ft1 GROUP BY c8; -- ERROR -- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE text; -- =================================================================== @@ -6222,7 +6226,7 @@ Options: check_option=cascaded -- SELECT * FROM foreign_tbl; --Testcase 389: DROP FOREIGN TABLE foreign_tbl CASCADE; -psql:sql/14beta2/parquet_s3_fdw_post.sql:1786: NOTICE: drop cascades to view rw_view +psql:sql/13.4/parquet_s3_fdw_post.sql:1788: NOTICE: drop cascades to view rw_view --Testcase 390: DROP TRIGGER row_before_insupd_trigger ON base_tbl; --Testcase 391: @@ -6277,7 +6281,7 @@ DROP FOREIGN TABLE foreign_tbl CASCADE; DROP TRIGGER row_before_insupd_trigger ON child_tbl; --Testcase 400: DROP TABLE parent_tbl CASCADE; -psql:sql/14beta2/parquet_s3_fdw_post.sql:1834: NOTICE: drop cascades to view rw_view +psql:sql/13.4/parquet_s3_fdw_post.sql:1836: NOTICE: drop cascades to view rw_view --Testcase 401: DROP FUNCTION row_before_insupd_trigfunc; -- =================================================================== @@ -6290,16 +6294,40 @@ DROP FUNCTION row_before_insupd_trigfunc; -- =================================================================== -- test generated columns -- =================================================================== --- create table gloc1 (a int, b int); +-- create table gloc1 ( +-- a int, +-- b int generated always as (a * 2) stored); -- alter table gloc1 set (autovacuum_enabled = 'false'); -- create foreign table grem1 ( -- a int, -- b int generated always as (a * 2) stored) -- server loopback options(table_name 'gloc1'); +-- explain (verbose, costs off) -- insert into grem1 (a) values (1), (2); +-- insert into grem1 (a) values (1), (2); +-- explain (verbose, costs off) -- update grem1 set a = 22 where a = 2; +-- update grem1 set a = 22 where a = 2; +-- select * from gloc1; +-- select * from grem1; +-- delete from grem1; +-- -- test copy from +-- copy grem1 from stdin; +-- 1 +-- 2 +-- \. -- select * from gloc1; -- select * from grem1; +-- delete from grem1; +-- -- test batch insert +-- alter server loopback options (add batch_size '10'); +-- explain (verbose, costs off) +-- insert into grem1 (a) values (1), (2); +-- insert into grem1 (a) values (1), (2); +-- select * from gloc1; +-- select * from grem1; +-- delete from grem1; +-- alter server loopback options (drop batch_size); -- =================================================================== -- test local triggers -- =================================================================== @@ -6488,6 +6516,10 @@ DROP FUNCTION row_before_insupd_trigfunc; -- DROP TRIGGER trig_row_after ON rem1; -- DROP TRIGGER trig_local_before ON loc1; -- -- Test direct foreign table modification functionality +-- EXPLAIN (verbose, costs off) +-- DELETE FROM rem1; -- can be pushed down +-- EXPLAIN (verbose, costs off) +-- DELETE FROM rem1 WHERE false; -- currently can't be pushed down -- -- Test with statement-level triggers -- CREATE TRIGGER trig_stmt_before -- BEFORE DELETE OR INSERT OR UPDATE ON rem1 @@ -7261,7 +7293,7 @@ FDW options: (filename '/tmp/data_local/ported_postgres/loct_empty.parquet', sor -- \d import_dest2.* -- CREATE SCHEMA import_dest3; -- IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest3 --- OPTIONS (import_collate 'false', import_not_null 'false'); +-- OPTIONS (import_collate 'false', import_generated 'false', import_not_null 'false'); -- \det+ import_dest3.* -- \d import_dest3.* -- Check LIMIT TO and EXCEPT @@ -7441,7 +7473,7 @@ FDW options: (filename '/tmp/data_local/ported_postgres/loct_empty.parquet', sor -- c7 char(10) default 'ft1', -- c8 user_enum -- ) SERVER loopback_nopw OPTIONS (schema_name 'public', table_name 'ft1'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- If we add a password to the connstr it'll fail, because we don't allow passwords -- in connstrs only in user mappings. -- DO $d$ @@ -7454,16 +7486,16 @@ FDW options: (filename '/tmp/data_local/ported_postgres/loct_empty.parquet', sor -- -- This won't work with installcheck, but neither will most of the FDW checks. -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password 'dummypw'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- Unpriv user cannot make the mapping passwordless -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password_required 'false'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- RESET ROLE; -- But the superuser can -- ALTER USER MAPPING FOR regress_nosuper SERVER loopback_nopw OPTIONS (ADD password_required 'false'); -- SET ROLE regress_nosuper; -- Should finally work now --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- unpriv user also cannot set sslcert / sslkey on the user mapping -- first set password_required so we see the right error messages -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (SET password_required 'true'); @@ -7497,11 +7529,11 @@ FDW options: (filename '/tmp/data_local/ported_postgres/loct_empty.parquet', sor -- Change application_name of remote connection to special one -- so that we can easily terminate the connection later. -- ALTER SERVER parquet_s3_srv OPTIONS (application_name 'fdw_retry_check'); --- If debug_invalidate_system_caches_always is active, it results in +-- If debug_discard_caches is active, it results in -- dropping remote connections after every transaction, making it -- impossible to test termination meaningfully. So turn that off -- for this test. --- SET debug_invalidate_system_caches_always = 0; +-- SET debug_discard_caches = 0; -- Make sure we have a remote connection. -- SELECT 1 FROM ft1 LIMIT 1; -- Terminate the remote connection and wait for the termination to complete. @@ -7522,7 +7554,7 @@ FDW options: (filename '/tmp/data_local/ported_postgres/loct_empty.parquet', sor -- SELECT 1 FROM ft1 LIMIT 1; -- should fail -- \set VERBOSITY default -- COMMIT; --- RESET debug_invalidate_system_caches_always; +-- RESET debug_discard_caches; -- ============================================================================= -- test connection invalidation cases and parquet_s3_fdw_get_connections function -- with local parquet file (not on minio/s3 servers). It haven't server and connection. @@ -7573,7 +7605,7 @@ SELECT server_name FROM parquet_s3_fdw_get_connections() ORDER BY 1; -- return ALTER SERVER parquet_s3_srv OPTIONS (ADD use_remote_estimate 'off'); --Testcase 410: DROP SERVER parquet_s3_srv_3 CASCADE; -psql:sql/14beta2/parquet_s3_fdw_post.sql:3241: NOTICE: drop cascades to 2 other objects +psql:sql/13.4/parquet_s3_fdw_post.sql:3273: NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to user mapping for public on server parquet_s3_srv_3 drop cascades to foreign table ft7 -- List all the existing cached connections. parquet_s3_srv and parquet_s3_srv_3 @@ -7669,7 +7701,7 @@ SELECT parquet_s3_fdw_disconnect('parquet_s3_srv_2'); -- Return an error as there is no foreign server with given name. --Testcase 420: SELECT parquet_s3_fdw_disconnect('unknownserver'); -psql:sql/14beta2/parquet_s3_fdw_post.sql:3294: ERROR: server "unknownserver" does not exist +psql:sql/13.4/parquet_s3_fdw_post.sql:3326: ERROR: server "unknownserver" does not exist -- Let's ensure to close all the existing cached connections. --Testcase 421: SELECT 1 FROM parquet_s3_fdw_disconnect_all(); @@ -8001,14 +8033,20 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- EXPLAIN (VERBOSE, COSTS OFF) -- SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505; -- SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505; +-- CREATE TABLE local_tbl (a int, b int, c text); +-- INSERT INTO local_tbl VALUES (1505, 505, 'foo'); +-- ANALYZE local_tbl; +-- EXPLAIN (VERBOSE, COSTS OFF) +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; +-- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; -- EXPLAIN (VERBOSE, COSTS OFF) -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- Check with foreign modify --- CREATE TABLE local_tbl (a int, b int, c text); --- INSERT INTO local_tbl VALUES (1505, 505, 'foo'); -- CREATE TABLE base_tbl3 (a int, b int, c text); -- CREATE FOREIGN TABLE remote_tbl (a int, b int, c text) -- SERVER loopback OPTIONS (table_name 'base_tbl3'); @@ -8056,6 +8094,21 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- DROP TABLE join_tbl; -- ALTER SERVER loopback OPTIONS (DROP async_capable); -- ALTER SERVER loopback2 OPTIONS (DROP async_capable); +-- =================================================================== +-- test invalid server and foreign table options +-- =================================================================== +-- Invalid fdw_startup_cost option +-- CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw +-- OPTIONS(fdw_startup_cost '100$%$#$#'); +-- -- Invalid fdw_tuple_cost option +-- CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw +-- OPTIONS(fdw_tuple_cost '100$%$#$#'); +-- -- Invalid fetch_size option +-- CREATE FOREIGN TABLE inv_fsz (c1 int ) +-- SERVER loopback OPTIONS (fetch_size '100$%$#$#'); +-- -- Invalid batch_size option +-- CREATE FOREIGN TABLE inv_bsz (c1 int ) +-- SERVER loopback OPTIONS (batch_size '100$%$#$#'); -- Clean-up SET client_min_messages TO WARNING; --Testcase 438: diff --git a/expected/14beta2/parquet_s3_fdw_post_server.out b/expected/13.4/parquet_s3_fdw_post_server.out similarity index 98% rename from expected/14beta2/parquet_s3_fdw_post_server.out rename to expected/13.4/parquet_s3_fdw_post_server.out index 56ace2c..8bedea4 100644 --- a/expected/14beta2/parquet_s3_fdw_post_server.out +++ b/expected/13.4/parquet_s3_fdw_post_server.out @@ -174,7 +174,7 @@ SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work ALTER SERVER parquet_s3_srv OPTIONS (SET use_minio 'false'); --Testcase 17: SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail if only when we use minio/s3. With local file, option use_minio is useless. -psql:sql/14beta2/parquet_s3_fdw_post.sql:190: ERROR: parquet_s3_fdw: failed to open Parquet file HeadObject failed +psql:sql/13.4/parquet_s3_fdw_post.sql:190: ERROR: parquet_s3_fdw: failed to open Parquet file HeadObject failed DO $d$ BEGIN EXECUTE $$ALTER SERVER parquet_s3_srv @@ -193,7 +193,7 @@ ALTER USER MAPPING FOR CURRENT_USER SERVER parquet_s3_srv OPTIONS (SET user 'no such user'); --Testcase 19: SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail if only when we use minio/s3. With local file, option user is useless. -psql:sql/14beta2/parquet_s3_fdw_post.sql:204: ERROR: parquet_s3_fdw: failed to open Parquet file HeadObject failed +psql:sql/13.4/parquet_s3_fdw_post.sql:204: ERROR: parquet_s3_fdw: failed to open Parquet file HeadObject failed ALTER USER MAPPING FOR CURRENT_USER SERVER parquet_s3_srv OPTIONS (SET user 'minioadmin'); --Testcase 20: @@ -2133,7 +2133,8 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL 20 | 0 | AAA020 (10 rows) -SET enable_resultcache TO off; +SET enable_memoize TO off; +psql:sql/13.4/parquet_s3_fdw_post.sql:576: ERROR: unrecognized configuration parameter "enable_memoize" -- right outer join + left outer join --Testcase 128: EXPLAIN (VERBOSE, COSTS OFF) @@ -2182,7 +2183,8 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT 20 | 0 | AAA020 (10 rows) -RESET enable_resultcache; +RESET enable_memoize; +psql:sql/13.4/parquet_s3_fdw_post.sql:583: ERROR: unrecognized configuration parameter "enable_memoize" -- left outer join + right outer join --Testcase 130: EXPLAIN (VERBOSE, COSTS OFF) @@ -5420,9 +5422,9 @@ DROP FUNCTION f_test(int); CREATE FOREIGN TABLE reindex_foreign (c1 int, c2 int) SERVER parquet_s3_srv_2 OPTIONS (filename :'var', sorted 'c1'); REINDEX TABLE reindex_foreign; -- error -psql:sql/14beta2/parquet_s3_fdw_post.sql:1412: ERROR: "reindex_foreign" is not a table or materialized view +psql:sql/13.4/parquet_s3_fdw_post.sql:1412: ERROR: "reindex_foreign" is not a table or materialized view REINDEX TABLE CONCURRENTLY reindex_foreign; -- error -psql:sql/14beta2/parquet_s3_fdw_post.sql:1413: ERROR: "reindex_foreign" is not a table or materialized view +psql:sql/13.4/parquet_s3_fdw_post.sql:1413: ERROR: "reindex_foreign" is not a table or materialized view --Testcase 342: DROP FOREIGN TABLE reindex_foreign; -- partitions and foreign tables @@ -5439,9 +5441,11 @@ DROP FOREIGN TABLE reindex_foreign; -- conversion error -- =================================================================== -- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int; --- SELECT * FROM ft1 WHERE c1 = 1; -- ERROR --- SELECT ft1.c1, ft2.c2, ft1.c8 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR --- SELECT ft1.c1, ft2.c2, ft1 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR +-- SELECT * FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8) WHERE x1 = 1; -- ERROR +-- SELECT ftx.x1, ft2.c2, ftx.x8 FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2 +-- WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR +-- SELECT ftx.x1, ft2.c2, ftx FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2 +-- WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR -- SELECT sum(c2), array_agg(c8) FROM ft1 GROUP BY c8; -- ERROR -- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE text; -- =================================================================== @@ -6208,7 +6212,7 @@ Options: check_option=cascaded -- SELECT * FROM foreign_tbl; --Testcase 389: DROP FOREIGN TABLE foreign_tbl CASCADE; -psql:sql/14beta2/parquet_s3_fdw_post.sql:1786: NOTICE: drop cascades to view rw_view +psql:sql/13.4/parquet_s3_fdw_post.sql:1788: NOTICE: drop cascades to view rw_view --Testcase 390: DROP TRIGGER row_before_insupd_trigger ON base_tbl; --Testcase 391: @@ -6263,7 +6267,7 @@ DROP FOREIGN TABLE foreign_tbl CASCADE; DROP TRIGGER row_before_insupd_trigger ON child_tbl; --Testcase 400: DROP TABLE parent_tbl CASCADE; -psql:sql/14beta2/parquet_s3_fdw_post.sql:1834: NOTICE: drop cascades to view rw_view +psql:sql/13.4/parquet_s3_fdw_post.sql:1836: NOTICE: drop cascades to view rw_view --Testcase 401: DROP FUNCTION row_before_insupd_trigfunc; -- =================================================================== @@ -6276,16 +6280,40 @@ DROP FUNCTION row_before_insupd_trigfunc; -- =================================================================== -- test generated columns -- =================================================================== --- create table gloc1 (a int, b int); +-- create table gloc1 ( +-- a int, +-- b int generated always as (a * 2) stored); -- alter table gloc1 set (autovacuum_enabled = 'false'); -- create foreign table grem1 ( -- a int, -- b int generated always as (a * 2) stored) -- server loopback options(table_name 'gloc1'); +-- explain (verbose, costs off) -- insert into grem1 (a) values (1), (2); +-- insert into grem1 (a) values (1), (2); +-- explain (verbose, costs off) -- update grem1 set a = 22 where a = 2; +-- update grem1 set a = 22 where a = 2; +-- select * from gloc1; +-- select * from grem1; +-- delete from grem1; +-- -- test copy from +-- copy grem1 from stdin; +-- 1 +-- 2 +-- \. -- select * from gloc1; -- select * from grem1; +-- delete from grem1; +-- -- test batch insert +-- alter server loopback options (add batch_size '10'); +-- explain (verbose, costs off) +-- insert into grem1 (a) values (1), (2); +-- insert into grem1 (a) values (1), (2); +-- select * from gloc1; +-- select * from grem1; +-- delete from grem1; +-- alter server loopback options (drop batch_size); -- =================================================================== -- test local triggers -- =================================================================== @@ -6474,6 +6502,10 @@ DROP FUNCTION row_before_insupd_trigfunc; -- DROP TRIGGER trig_row_after ON rem1; -- DROP TRIGGER trig_local_before ON loc1; -- -- Test direct foreign table modification functionality +-- EXPLAIN (verbose, costs off) +-- DELETE FROM rem1; -- can be pushed down +-- EXPLAIN (verbose, costs off) +-- DELETE FROM rem1 WHERE false; -- currently can't be pushed down -- -- Test with statement-level triggers -- CREATE TRIGGER trig_stmt_before -- BEFORE DELETE OR INSERT OR UPDATE ON rem1 @@ -7247,7 +7279,7 @@ FDW options: (filename 's3://ported_postgres/loct_empty.parquet', sorted 'c1') -- \d import_dest2.* -- CREATE SCHEMA import_dest3; -- IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest3 --- OPTIONS (import_collate 'false', import_not_null 'false'); +-- OPTIONS (import_collate 'false', import_generated 'false', import_not_null 'false'); -- \det+ import_dest3.* -- \d import_dest3.* -- Check LIMIT TO and EXCEPT @@ -7427,7 +7459,7 @@ FDW options: (filename 's3://ported_postgres/loct_empty.parquet', sorted 'c1') -- c7 char(10) default 'ft1', -- c8 user_enum -- ) SERVER loopback_nopw OPTIONS (schema_name 'public', table_name 'ft1'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- If we add a password to the connstr it'll fail, because we don't allow passwords -- in connstrs only in user mappings. -- DO $d$ @@ -7440,16 +7472,16 @@ FDW options: (filename 's3://ported_postgres/loct_empty.parquet', sorted 'c1') -- -- This won't work with installcheck, but neither will most of the FDW checks. -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password 'dummypw'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- Unpriv user cannot make the mapping passwordless -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password_required 'false'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- RESET ROLE; -- But the superuser can -- ALTER USER MAPPING FOR regress_nosuper SERVER loopback_nopw OPTIONS (ADD password_required 'false'); -- SET ROLE regress_nosuper; -- Should finally work now --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- unpriv user also cannot set sslcert / sslkey on the user mapping -- first set password_required so we see the right error messages -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (SET password_required 'true'); @@ -7483,11 +7515,11 @@ FDW options: (filename 's3://ported_postgres/loct_empty.parquet', sorted 'c1') -- Change application_name of remote connection to special one -- so that we can easily terminate the connection later. -- ALTER SERVER parquet_s3_srv OPTIONS (application_name 'fdw_retry_check'); --- If debug_invalidate_system_caches_always is active, it results in +-- If debug_discard_caches is active, it results in -- dropping remote connections after every transaction, making it -- impossible to test termination meaningfully. So turn that off -- for this test. --- SET debug_invalidate_system_caches_always = 0; +-- SET debug_discard_caches = 0; -- Make sure we have a remote connection. -- SELECT 1 FROM ft1 LIMIT 1; -- Terminate the remote connection and wait for the termination to complete. @@ -7508,7 +7540,7 @@ FDW options: (filename 's3://ported_postgres/loct_empty.parquet', sorted 'c1') -- SELECT 1 FROM ft1 LIMIT 1; -- should fail -- \set VERBOSITY default -- COMMIT; --- RESET debug_invalidate_system_caches_always; +-- RESET debug_discard_caches; -- ============================================================================= -- test connection invalidation cases and parquet_s3_fdw_get_connections function -- with local parquet file (not on minio/s3 servers). It haven't server and connection. @@ -7561,7 +7593,7 @@ SELECT server_name FROM parquet_s3_fdw_get_connections() ORDER BY 1; -- return ALTER SERVER parquet_s3_srv OPTIONS (ADD use_remote_estimate 'off'); --Testcase 410: DROP SERVER parquet_s3_srv_3 CASCADE; -psql:sql/14beta2/parquet_s3_fdw_post.sql:3241: NOTICE: drop cascades to 2 other objects +psql:sql/13.4/parquet_s3_fdw_post.sql:3273: NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to user mapping for public on server parquet_s3_srv_3 drop cascades to foreign table ft7 -- List all the existing cached connections. parquet_s3_srv and parquet_s3_srv_3 @@ -7663,7 +7695,7 @@ SELECT parquet_s3_fdw_disconnect('parquet_s3_srv_2'); -- Return an error as there is no foreign server with given name. --Testcase 420: SELECT parquet_s3_fdw_disconnect('unknownserver'); -psql:sql/14beta2/parquet_s3_fdw_post.sql:3294: ERROR: server "unknownserver" does not exist +psql:sql/13.4/parquet_s3_fdw_post.sql:3326: ERROR: server "unknownserver" does not exist -- Let's ensure to close all the existing cached connections. --Testcase 421: SELECT 1 FROM parquet_s3_fdw_disconnect_all(); @@ -7997,14 +8029,20 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- EXPLAIN (VERBOSE, COSTS OFF) -- SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505; -- SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505; +-- CREATE TABLE local_tbl (a int, b int, c text); +-- INSERT INTO local_tbl VALUES (1505, 505, 'foo'); +-- ANALYZE local_tbl; +-- EXPLAIN (VERBOSE, COSTS OFF) +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; +-- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; -- EXPLAIN (VERBOSE, COSTS OFF) -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- Check with foreign modify --- CREATE TABLE local_tbl (a int, b int, c text); --- INSERT INTO local_tbl VALUES (1505, 505, 'foo'); -- CREATE TABLE base_tbl3 (a int, b int, c text); -- CREATE FOREIGN TABLE remote_tbl (a int, b int, c text) -- SERVER loopback OPTIONS (table_name 'base_tbl3'); @@ -8052,6 +8090,21 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- DROP TABLE join_tbl; -- ALTER SERVER loopback OPTIONS (DROP async_capable); -- ALTER SERVER loopback2 OPTIONS (DROP async_capable); +-- =================================================================== +-- test invalid server and foreign table options +-- =================================================================== +-- Invalid fdw_startup_cost option +-- CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw +-- OPTIONS(fdw_startup_cost '100$%$#$#'); +-- -- Invalid fdw_tuple_cost option +-- CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw +-- OPTIONS(fdw_tuple_cost '100$%$#$#'); +-- -- Invalid fetch_size option +-- CREATE FOREIGN TABLE inv_fsz (c1 int ) +-- SERVER loopback OPTIONS (fetch_size '100$%$#$#'); +-- -- Invalid batch_size option +-- CREATE FOREIGN TABLE inv_bsz (c1 int ) +-- SERVER loopback OPTIONS (batch_size '100$%$#$#'); -- Clean-up SET client_min_messages TO WARNING; --Testcase 438: diff --git a/expected/13.3/parquet_s3_fdw_server.out b/expected/13.4/parquet_s3_fdw_server.out similarity index 89% rename from expected/13.3/parquet_s3_fdw_server.out rename to expected/13.4/parquet_s3_fdw_server.out index 3213304..afaef00 100644 --- a/expected/13.3/parquet_s3_fdw_server.out +++ b/expected/13.4/parquet_s3_fdw_server.out @@ -85,17 +85,17 @@ EXPLAIN (COSTS OFF) SELECT * FROM example1 ORDER BY three; SET client_min_messages = DEBUG1; --Testcase 12: SELECT * FROM example1 WHERE one < 1; -psql:sql/13.3/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/13.3/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/13.3/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) --Testcase 13: SELECT * FROM example1 WHERE one <= 1; -psql:sql/13.3/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/13.3/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+---------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -103,17 +103,17 @@ psql:sql/13.3/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: open Parquet file o --Testcase 14: SELECT * FROM example1 WHERE one > 6; -psql:sql/13.3/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/13.3/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/13.3/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) --Testcase 15: SELECT * FROM example1 WHERE one >= 6; -psql:sql/13.3/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/13.3/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 6 | {16,17,18} | tres | 2018-01-06 00:00:00 | 2018-01-06 | f | 2 @@ -121,8 +121,8 @@ psql:sql/13.3/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: open Parquet file o --Testcase 16: SELECT * FROM example1 WHERE one = 2; -psql:sql/13.3/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/13.3/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 2 | {NULL,5,6} | bar | 2018-01-02 00:00:00 | 2018-01-02 | f | @@ -130,17 +130,17 @@ psql:sql/13.3/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: open Parquet file o --Testcase 17: SELECT * FROM example1 WHERE one = 7; -psql:sql/13.3/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/13.3/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/13.3/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) --Testcase 18: SELECT * FROM example1 WHERE six = true; -psql:sql/13.3/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/13.3/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+---------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -149,7 +149,7 @@ psql:sql/13.3/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: open Parquet file o --Testcase 19: SELECT * FROM example1 WHERE six = false; -psql:sql/13.3/parquet_s3_fdw.sql:63: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:63: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 2 | {NULL,5,6} | bar | 2018-01-02 00:00:00 | 2018-01-02 | f | @@ -160,8 +160,8 @@ psql:sql/13.3/parquet_s3_fdw.sql:63: DEBUG: parquet_s3_fdw: open Parquet file o --Testcase 20: SELECT * FROM example1 WHERE seven < 1.5; -psql:sql/13.3/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/13.3/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+---------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -170,7 +170,7 @@ psql:sql/13.3/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: open Parquet file o --Testcase 21: SELECT * FROM example1 WHERE seven <= 1.5; -psql:sql/13.3/parquet_s3_fdw.sql:67: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:67: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -180,8 +180,8 @@ psql:sql/13.3/parquet_s3_fdw.sql:67: DEBUG: parquet_s3_fdw: open Parquet file o --Testcase 22: SELECT * FROM example1 WHERE seven = 1.5; -psql:sql/13.3/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/13.3/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 4 | {10,11,12} | uno | 2018-01-04 00:00:00 | 2018-01-04 | f | 1.5 @@ -189,8 +189,8 @@ psql:sql/13.3/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: open Parquet file o --Testcase 23: SELECT * FROM example1 WHERE seven > 1; -psql:sql/13.3/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/13.3/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 4 | {10,11,12} | uno | 2018-01-04 00:00:00 | 2018-01-04 | f | 1.5 @@ -199,7 +199,7 @@ psql:sql/13.3/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: open Parquet file o --Testcase 24: SELECT * FROM example1 WHERE seven >= 1; -psql:sql/13.3/parquet_s3_fdw.sql:73: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:73: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 3 | {7,8,9} | baz | 2018-01-03 00:00:00 | 2018-01-03 | t | 1 @@ -209,7 +209,7 @@ psql:sql/13.3/parquet_s3_fdw.sql:73: DEBUG: parquet_s3_fdw: open Parquet file o --Testcase 25: SELECT * FROM example1 WHERE seven IS NULL; -psql:sql/13.3/parquet_s3_fdw.sql:75: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:75: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 2 | {NULL,5,6} | bar | 2018-01-02 00:00:00 | 2018-01-02 | f | @@ -221,8 +221,8 @@ psql:sql/13.3/parquet_s3_fdw.sql:75: DEBUG: parquet_s3_fdw: open Parquet file o prepare prep(date) as select * from example1 where five < $1; --Testcase 27: execute prep('2018-01-03'); -psql:sql/13.3/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/13.3/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -231,9 +231,9 @@ psql:sql/13.3/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: open Parquet file o --Testcase 28: execute prep('2018-01-01'); -psql:sql/13.3/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/13.3/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/13.3/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/13.4/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/13.4/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) @@ -243,18 +243,18 @@ SET client_min_messages = WARNING; --Testcase 29: CREATE FOREIGN TABLE example_fail (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv; -psql:sql/13.3/parquet_s3_fdw.sql:89: ERROR: parquet_s3_fdw: filename or function is required +psql:sql/13.4/parquet_s3_fdw.sql:89: ERROR: parquet_s3_fdw: filename or function is required --Testcase 30: CREATE FOREIGN TABLE example_fail (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (filename 'nonexistent.parquet', some_option '123'); -psql:sql/13.3/parquet_s3_fdw.sql:93: ERROR: parquet_s3_fdw: No such file or directory +psql:sql/13.4/parquet_s3_fdw.sql:93: ERROR: parquet_s3_fdw: No such file or directory \set var :PATH_FILENAME'/data/simple/example1.parquet' --Testcase 31: CREATE FOREIGN TABLE example_fail (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (filename :'var', some_option '123'); -psql:sql/13.3/parquet_s3_fdw.sql:98: ERROR: parquet_s3_fdw: invalid option "some_option" +psql:sql/13.4/parquet_s3_fdw.sql:98: ERROR: parquet_s3_fdw: invalid option "some_option" -- type mismatch \set var :PATH_FILENAME'/data/simple/example1.parquet' --Testcase 32: @@ -263,10 +263,10 @@ SERVER parquet_s3_srv OPTIONS (filename :'var', sorted 'one'); --Testcase 33: SELECT one FROM example_fail; -psql:sql/13.3/parquet_s3_fdw.sql:107: ERROR: parquet_s3_fdw: parquet_s3_fdw: cast function to bigint[] ('one' column) is not found +psql:sql/13.4/parquet_s3_fdw.sql:107: ERROR: parquet_s3_fdw: parquet_s3_fdw: cast function to bigint[] ('one' column) is not found --Testcase 34: SELECT two FROM example_fail; -psql:sql/13.3/parquet_s3_fdw.sql:109: ERROR: parquet_s3_fdw: parquet_s3_fdw: cannot convert parquet column of type LIST to scalar type of postgres column 'two' +psql:sql/13.4/parquet_s3_fdw.sql:109: ERROR: parquet_s3_fdw: parquet_s3_fdw: cannot convert parquet column of type LIST to scalar type of postgres column 'two' -- files_func --Testcase 35: CREATE FUNCTION list_parquet_s3_files(args JSONB) @@ -315,17 +315,17 @@ LANGUAGE SQL; CREATE FOREIGN TABLE example_inv_func (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (files_func 'int_array_func'); -psql:sql/13.3/parquet_s3_fdw.sql:144: ERROR: parquet_s3_fdw: return type of 'int_array_func' is integer[]; expected text[] +psql:sql/13.4/parquet_s3_fdw.sql:144: ERROR: parquet_s3_fdw: return type of 'int_array_func' is integer[]; expected text[] --Testcase 41: CREATE FOREIGN TABLE example_inv_func (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (files_func 'no_args_func'); -psql:sql/13.3/parquet_s3_fdw.sql:148: ERROR: function no_args_func(jsonb) does not exist +psql:sql/13.4/parquet_s3_fdw.sql:148: ERROR: function no_args_func(jsonb) does not exist --Testcase 42: CREATE FOREIGN TABLE example_inv_func (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (files_func 'list_parquet_s3_files', files_func_arg 'invalid json'); -psql:sql/13.3/parquet_s3_fdw.sql:152: ERROR: invalid input syntax for type json +psql:sql/13.4/parquet_s3_fdw.sql:152: ERROR: invalid input syntax for type json DETAIL: Token "invalid" is invalid. CONTEXT: JSON data, line 1: invalid... --Testcase 43: @@ -611,7 +611,7 @@ SERVER parquet_s3_srv OPTIONS (filename :'var', sorted 'one'); --Testcase 67: SELECT * FROM example3; -psql:sql/13.3/parquet_s3_fdw.sql:267: DEBUG: parquet_s3_fdw: open Parquet file on S3. datacomplex/example3.parquet +psql:sql/13.4/parquet_s3_fdw.sql:267: DEBUG: parquet_s3_fdw: open Parquet file on S3. datacomplex/example3.parquet one | two | three --------------------------------------+--------------------------------------+------- {"1": "foo", "2": "bar", "3": "baz"} | {"2018-01-01": 10, "2018-01-02": 15} | 1 @@ -620,8 +620,8 @@ psql:sql/13.3/parquet_s3_fdw.sql:267: DEBUG: parquet_s3_fdw: open Parquet file --Testcase 68: SELECT * FROM example3 WHERE three = 3; -psql:sql/13.3/parquet_s3_fdw.sql:269: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/13.3/parquet_s3_fdw.sql:269: DEBUG: parquet_s3_fdw: open Parquet file on S3. datacomplex/example3.parquet +psql:sql/13.4/parquet_s3_fdw.sql:269: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/13.4/parquet_s3_fdw.sql:269: DEBUG: parquet_s3_fdw: open Parquet file on S3. datacomplex/example3.parquet one | two | three -----+-----+------- (0 rows) @@ -645,14 +645,14 @@ SET client_min_messages = WARNING; SELECT * FROM public.parquet_s3_fdw_version(); parquet_s3_fdw_version ------------------------ - 200 + 201 (1 row) --Testcase 71: SELECT parquet_s3_fdw_version(); parquet_s3_fdw_version ------------------------ - 200 + 201 (1 row) --Testcase 72: diff --git a/expected/14beta2/import_local.out b/expected/14.0/import_local.out similarity index 100% rename from expected/14beta2/import_local.out rename to expected/14.0/import_local.out diff --git a/expected/14beta2/import_server.out b/expected/14.0/import_server.out similarity index 100% rename from expected/14beta2/import_server.out rename to expected/14.0/import_server.out diff --git a/expected/14beta2/parquet_s3_fdw2.out b/expected/14.0/parquet_s3_fdw2.out similarity index 100% rename from expected/14beta2/parquet_s3_fdw2.out rename to expected/14.0/parquet_s3_fdw2.out diff --git a/expected/14beta2/parquet_s3_fdw_local.out b/expected/14.0/parquet_s3_fdw_local.out similarity index 90% rename from expected/14beta2/parquet_s3_fdw_local.out rename to expected/14.0/parquet_s3_fdw_local.out index 93ed17a..1525cf3 100644 --- a/expected/14beta2/parquet_s3_fdw_local.out +++ b/expected/14.0/parquet_s3_fdw_local.out @@ -85,15 +85,15 @@ EXPLAIN (COSTS OFF) SELECT * FROM example1 ORDER BY three; SET client_min_messages = DEBUG1; --Testcase 12: SELECT * FROM example1 WHERE one < 1; -psql:sql/14beta2/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/14beta2/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) --Testcase 13: SELECT * FROM example1 WHERE one <= 1; -psql:sql/14beta2/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+---------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -101,15 +101,15 @@ psql:sql/14beta2/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: skip rowgroup 2 --Testcase 14: SELECT * FROM example1 WHERE one > 6; -psql:sql/14beta2/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/14beta2/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) --Testcase 15: SELECT * FROM example1 WHERE one >= 6; -psql:sql/14beta2/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: skip rowgroup 1 one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 6 | {16,17,18} | tres | 2018-01-06 00:00:00 | 2018-01-06 | f | 2 @@ -117,7 +117,7 @@ psql:sql/14beta2/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: skip rowgroup 1 --Testcase 16: SELECT * FROM example1 WHERE one = 2; -psql:sql/14beta2/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 2 | {NULL,5,6} | bar | 2018-01-02 00:00:00 | 2018-01-02 | f | @@ -125,15 +125,15 @@ psql:sql/14beta2/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: skip rowgroup 2 --Testcase 17: SELECT * FROM example1 WHERE one = 7; -psql:sql/14beta2/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/14beta2/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) --Testcase 18: SELECT * FROM example1 WHERE six = true; -psql:sql/14beta2/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+---------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -152,7 +152,7 @@ SELECT * FROM example1 WHERE six = false; --Testcase 20: SELECT * FROM example1 WHERE seven < 1.5; -psql:sql/14beta2/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+---------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -170,7 +170,7 @@ SELECT * FROM example1 WHERE seven <= 1.5; --Testcase 22: SELECT * FROM example1 WHERE seven = 1.5; -psql:sql/14beta2/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: skip rowgroup 1 one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 4 | {10,11,12} | uno | 2018-01-04 00:00:00 | 2018-01-04 | f | 1.5 @@ -178,7 +178,7 @@ psql:sql/14beta2/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: skip rowgroup 1 --Testcase 23: SELECT * FROM example1 WHERE seven > 1; -psql:sql/14beta2/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: skip rowgroup 1 one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 4 | {10,11,12} | uno | 2018-01-04 00:00:00 | 2018-01-04 | f | 1.5 @@ -207,7 +207,7 @@ SELECT * FROM example1 WHERE seven IS NULL; prepare prep(date) as select * from example1 where five < $1; --Testcase 27: execute prep('2018-01-03'); -psql:sql/14beta2/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -216,8 +216,8 @@ psql:sql/14beta2/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: skip rowgroup 2 --Testcase 28: execute prep('2018-01-01'); -psql:sql/14beta2/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/14beta2/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 2 one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) @@ -227,18 +227,18 @@ SET client_min_messages = WARNING; --Testcase 29: CREATE FOREIGN TABLE example_fail (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv; -psql:sql/14beta2/parquet_s3_fdw.sql:89: ERROR: parquet_s3_fdw: filename or function is required +psql:sql/14.0/parquet_s3_fdw.sql:89: ERROR: parquet_s3_fdw: filename or function is required --Testcase 30: CREATE FOREIGN TABLE example_fail (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (filename 'nonexistent.parquet', some_option '123'); -psql:sql/14beta2/parquet_s3_fdw.sql:93: ERROR: parquet_s3_fdw: No such file or directory +psql:sql/14.0/parquet_s3_fdw.sql:93: ERROR: parquet_s3_fdw: No such file or directory \set var :PATH_FILENAME'/data/simple/example1.parquet' --Testcase 31: CREATE FOREIGN TABLE example_fail (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (filename :'var', some_option '123'); -psql:sql/14beta2/parquet_s3_fdw.sql:98: ERROR: parquet_s3_fdw: invalid option "some_option" +psql:sql/14.0/parquet_s3_fdw.sql:98: ERROR: parquet_s3_fdw: invalid option "some_option" -- type mismatch \set var :PATH_FILENAME'/data/simple/example1.parquet' --Testcase 32: @@ -247,10 +247,10 @@ SERVER parquet_s3_srv OPTIONS (filename :'var', sorted 'one'); --Testcase 33: SELECT one FROM example_fail; -psql:sql/14beta2/parquet_s3_fdw.sql:107: ERROR: parquet_s3_fdw: parquet_s3_fdw: cast function to bigint[] ('one' column) is not found +psql:sql/14.0/parquet_s3_fdw.sql:107: ERROR: parquet_s3_fdw: parquet_s3_fdw: cast function to bigint[] ('one' column) is not found --Testcase 34: SELECT two FROM example_fail; -psql:sql/14beta2/parquet_s3_fdw.sql:109: ERROR: parquet_s3_fdw: parquet_s3_fdw: cannot convert parquet column of type LIST to scalar type of postgres column 'two' +psql:sql/14.0/parquet_s3_fdw.sql:109: ERROR: parquet_s3_fdw: parquet_s3_fdw: cannot convert parquet column of type LIST to scalar type of postgres column 'two' -- files_func --Testcase 35: CREATE FUNCTION list_parquet_s3_files(args JSONB) @@ -299,17 +299,17 @@ LANGUAGE SQL; CREATE FOREIGN TABLE example_inv_func (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (files_func 'int_array_func'); -psql:sql/14beta2/parquet_s3_fdw.sql:144: ERROR: parquet_s3_fdw: return type of 'int_array_func' is integer[]; expected text[] +psql:sql/14.0/parquet_s3_fdw.sql:144: ERROR: parquet_s3_fdw: return type of 'int_array_func' is integer[]; expected text[] --Testcase 41: CREATE FOREIGN TABLE example_inv_func (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (files_func 'no_args_func'); -psql:sql/14beta2/parquet_s3_fdw.sql:148: ERROR: function no_args_func(jsonb) does not exist +psql:sql/14.0/parquet_s3_fdw.sql:148: ERROR: function no_args_func(jsonb) does not exist --Testcase 42: CREATE FOREIGN TABLE example_inv_func (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (files_func 'list_parquet_s3_files', files_func_arg 'invalid json'); -psql:sql/14beta2/parquet_s3_fdw.sql:152: ERROR: invalid input syntax for type json +psql:sql/14.0/parquet_s3_fdw.sql:152: ERROR: invalid input syntax for type json DETAIL: Token "invalid" is invalid. CONTEXT: JSON data, line 1: invalid... --Testcase 43: @@ -603,7 +603,7 @@ SELECT * FROM example3; --Testcase 68: SELECT * FROM example3 WHERE three = 3; -psql:sql/14beta2/parquet_s3_fdw.sql:269: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:269: DEBUG: parquet_s3_fdw: skip rowgroup 1 one | two | three -----+-----+------- (0 rows) @@ -627,14 +627,14 @@ SET client_min_messages = WARNING; SELECT * FROM public.parquet_s3_fdw_version(); parquet_s3_fdw_version ------------------------ - 200 + 201 (1 row) --Testcase 71: SELECT parquet_s3_fdw_version(); parquet_s3_fdw_version ------------------------ - 200 + 201 (1 row) --Testcase 72: diff --git a/expected/13.3/parquet_s3_fdw_post_local.out b/expected/14.0/parquet_s3_fdw_post_local.out similarity index 98% rename from expected/13.3/parquet_s3_fdw_post_local.out rename to expected/14.0/parquet_s3_fdw_post_local.out index e0997eb..77900c9 100644 --- a/expected/13.3/parquet_s3_fdw_post_local.out +++ b/expected/14.0/parquet_s3_fdw_post_local.out @@ -172,7 +172,7 @@ SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work (1 row) ALTER SERVER parquet_s3_srv OPTIONS (SET use_minio 'false'); -psql:sql/13.3/parquet_s3_fdw_post.sql:188: ERROR: option "use_minio" not found +psql:sql/14.0/parquet_s3_fdw_post.sql:188: ERROR: option "use_minio" not found --Testcase 17: SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail if only when we use minio/s3. With local file, option use_minio is useless. c3 | c5 @@ -186,7 +186,7 @@ DO $d$ OPTIONS (SET use_minio 'true')$$; END; $d$; -psql:sql/13.3/parquet_s3_fdw_post.sql:196: ERROR: option "use_minio" not found +psql:sql/14.0/parquet_s3_fdw_post.sql:196: ERROR: option "use_minio" not found --Testcase 18: SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again c3 | c5 @@ -197,7 +197,7 @@ SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again -- Test that alteration of user mapping options causes reconnection ALTER USER MAPPING FOR CURRENT_USER SERVER parquet_s3_srv OPTIONS (SET user 'no such user'); -psql:sql/13.3/parquet_s3_fdw_post.sql:202: ERROR: option "user" not found +psql:sql/14.0/parquet_s3_fdw_post.sql:202: ERROR: option "user" not found --Testcase 19: SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail if only when we use minio/s3. With local file, option user is useless. c3 | c5 @@ -207,7 +207,7 @@ SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail if only when we ALTER USER MAPPING FOR CURRENT_USER SERVER parquet_s3_srv OPTIONS (SET user 'minioadmin'); -psql:sql/13.3/parquet_s3_fdw_post.sql:206: ERROR: option "user" not found +psql:sql/14.0/parquet_s3_fdw_post.sql:206: ERROR: option "user" not found --Testcase 20: SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again c3 | c5 @@ -2145,8 +2145,7 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL 20 | 0 | AAA020 (10 rows) -SET enable_resultcache TO off; -psql:sql/13.3/parquet_s3_fdw_post.sql:576: ERROR: unrecognized configuration parameter "enable_resultcache" +SET enable_memoize TO off; -- right outer join + left outer join --Testcase 128: EXPLAIN (VERBOSE, COSTS OFF) @@ -2195,8 +2194,7 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT 20 | 0 | AAA020 (10 rows) -RESET enable_resultcache; -psql:sql/13.3/parquet_s3_fdw_post.sql:583: ERROR: unrecognized configuration parameter "enable_resultcache" +RESET enable_memoize; -- left outer join + right outer join --Testcase 130: EXPLAIN (VERBOSE, COSTS OFF) @@ -5433,14 +5431,14 @@ DROP FUNCTION f_test(int); --Testcase 341: CREATE FOREIGN TABLE reindex_foreign (c1 int, c2 int) SERVER parquet_s3_srv_2 OPTIONS (filename :'var', sorted 'c1'); -psql:sql/13.3/parquet_s3_fdw_post.sql:1411: ERROR: parquet_s3_fdw: No such file or directory +psql:sql/14.0/parquet_s3_fdw_post.sql:1411: ERROR: parquet_s3_fdw: No such file or directory REINDEX TABLE reindex_foreign; -- error -psql:sql/13.3/parquet_s3_fdw_post.sql:1412: ERROR: relation "reindex_foreign" does not exist +psql:sql/14.0/parquet_s3_fdw_post.sql:1412: ERROR: relation "reindex_foreign" does not exist REINDEX TABLE CONCURRENTLY reindex_foreign; -- error -psql:sql/13.3/parquet_s3_fdw_post.sql:1413: ERROR: relation "reindex_foreign" does not exist +psql:sql/14.0/parquet_s3_fdw_post.sql:1413: ERROR: relation "reindex_foreign" does not exist --Testcase 342: DROP FOREIGN TABLE reindex_foreign; -psql:sql/13.3/parquet_s3_fdw_post.sql:1415: ERROR: foreign table "reindex_foreign" does not exist +psql:sql/14.0/parquet_s3_fdw_post.sql:1415: ERROR: foreign table "reindex_foreign" does not exist -- partitions and foreign tables -- CREATE TABLE reind_fdw_parent (c1 int) PARTITION BY RANGE (c1); -- CREATE TABLE reind_fdw_0_10 PARTITION OF reind_fdw_parent @@ -5455,9 +5453,11 @@ psql:sql/13.3/parquet_s3_fdw_post.sql:1415: ERROR: foreign table "reindex_forei -- conversion error -- =================================================================== -- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int; --- SELECT * FROM ft1 WHERE c1 = 1; -- ERROR --- SELECT ft1.c1, ft2.c2, ft1.c8 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR --- SELECT ft1.c1, ft2.c2, ft1 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR +-- SELECT * FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8) WHERE x1 = 1; -- ERROR +-- SELECT ftx.x1, ft2.c2, ftx.x8 FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2 +-- WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR +-- SELECT ftx.x1, ft2.c2, ftx FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2 +-- WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR -- SELECT sum(c2), array_agg(c8) FROM ft1 GROUP BY c8; -- ERROR -- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE text; -- =================================================================== @@ -6224,7 +6224,7 @@ Options: check_option=cascaded -- SELECT * FROM foreign_tbl; --Testcase 389: DROP FOREIGN TABLE foreign_tbl CASCADE; -psql:sql/13.3/parquet_s3_fdw_post.sql:1786: NOTICE: drop cascades to view rw_view +psql:sql/14.0/parquet_s3_fdw_post.sql:1788: NOTICE: drop cascades to view rw_view --Testcase 390: DROP TRIGGER row_before_insupd_trigger ON base_tbl; --Testcase 391: @@ -6279,7 +6279,7 @@ DROP FOREIGN TABLE foreign_tbl CASCADE; DROP TRIGGER row_before_insupd_trigger ON child_tbl; --Testcase 400: DROP TABLE parent_tbl CASCADE; -psql:sql/13.3/parquet_s3_fdw_post.sql:1834: NOTICE: drop cascades to view rw_view +psql:sql/14.0/parquet_s3_fdw_post.sql:1836: NOTICE: drop cascades to view rw_view --Testcase 401: DROP FUNCTION row_before_insupd_trigfunc; -- =================================================================== @@ -6292,16 +6292,40 @@ DROP FUNCTION row_before_insupd_trigfunc; -- =================================================================== -- test generated columns -- =================================================================== --- create table gloc1 (a int, b int); +-- create table gloc1 ( +-- a int, +-- b int generated always as (a * 2) stored); -- alter table gloc1 set (autovacuum_enabled = 'false'); -- create foreign table grem1 ( -- a int, -- b int generated always as (a * 2) stored) -- server loopback options(table_name 'gloc1'); +-- explain (verbose, costs off) -- insert into grem1 (a) values (1), (2); +-- insert into grem1 (a) values (1), (2); +-- explain (verbose, costs off) -- update grem1 set a = 22 where a = 2; +-- update grem1 set a = 22 where a = 2; +-- select * from gloc1; +-- select * from grem1; +-- delete from grem1; +-- -- test copy from +-- copy grem1 from stdin; +-- 1 +-- 2 +-- \. -- select * from gloc1; -- select * from grem1; +-- delete from grem1; +-- -- test batch insert +-- alter server loopback options (add batch_size '10'); +-- explain (verbose, costs off) +-- insert into grem1 (a) values (1), (2); +-- insert into grem1 (a) values (1), (2); +-- select * from gloc1; +-- select * from grem1; +-- delete from grem1; +-- alter server loopback options (drop batch_size); -- =================================================================== -- test local triggers -- =================================================================== @@ -6490,6 +6514,10 @@ DROP FUNCTION row_before_insupd_trigfunc; -- DROP TRIGGER trig_row_after ON rem1; -- DROP TRIGGER trig_local_before ON loc1; -- -- Test direct foreign table modification functionality +-- EXPLAIN (verbose, costs off) +-- DELETE FROM rem1; -- can be pushed down +-- EXPLAIN (verbose, costs off) +-- DELETE FROM rem1 WHERE false; -- currently can't be pushed down -- -- Test with statement-level triggers -- CREATE TRIGGER trig_stmt_before -- BEFORE DELETE OR INSERT OR UPDATE ON rem1 @@ -7263,7 +7291,7 @@ FDW options: (filename '/tmp/data_local/ported_postgres/loct_empty.parquet', sor -- \d import_dest2.* -- CREATE SCHEMA import_dest3; -- IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest3 --- OPTIONS (import_collate 'false', import_not_null 'false'); +-- OPTIONS (import_collate 'false', import_generated 'false', import_not_null 'false'); -- \det+ import_dest3.* -- \d import_dest3.* -- Check LIMIT TO and EXCEPT @@ -7443,7 +7471,7 @@ FDW options: (filename '/tmp/data_local/ported_postgres/loct_empty.parquet', sor -- c7 char(10) default 'ft1', -- c8 user_enum -- ) SERVER loopback_nopw OPTIONS (schema_name 'public', table_name 'ft1'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- If we add a password to the connstr it'll fail, because we don't allow passwords -- in connstrs only in user mappings. -- DO $d$ @@ -7456,16 +7484,16 @@ FDW options: (filename '/tmp/data_local/ported_postgres/loct_empty.parquet', sor -- -- This won't work with installcheck, but neither will most of the FDW checks. -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password 'dummypw'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- Unpriv user cannot make the mapping passwordless -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password_required 'false'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- RESET ROLE; -- But the superuser can -- ALTER USER MAPPING FOR regress_nosuper SERVER loopback_nopw OPTIONS (ADD password_required 'false'); -- SET ROLE regress_nosuper; -- Should finally work now --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- unpriv user also cannot set sslcert / sslkey on the user mapping -- first set password_required so we see the right error messages -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (SET password_required 'true'); @@ -7499,11 +7527,11 @@ FDW options: (filename '/tmp/data_local/ported_postgres/loct_empty.parquet', sor -- Change application_name of remote connection to special one -- so that we can easily terminate the connection later. -- ALTER SERVER parquet_s3_srv OPTIONS (application_name 'fdw_retry_check'); --- If debug_invalidate_system_caches_always is active, it results in +-- If debug_discard_caches is active, it results in -- dropping remote connections after every transaction, making it -- impossible to test termination meaningfully. So turn that off -- for this test. --- SET debug_invalidate_system_caches_always = 0; +-- SET debug_discard_caches = 0; -- Make sure we have a remote connection. -- SELECT 1 FROM ft1 LIMIT 1; -- Terminate the remote connection and wait for the termination to complete. @@ -7524,7 +7552,7 @@ FDW options: (filename '/tmp/data_local/ported_postgres/loct_empty.parquet', sor -- SELECT 1 FROM ft1 LIMIT 1; -- should fail -- \set VERBOSITY default -- COMMIT; --- RESET debug_invalidate_system_caches_always; +-- RESET debug_discard_caches; -- ============================================================================= -- test connection invalidation cases and parquet_s3_fdw_get_connections function -- with local parquet file (not on minio/s3 servers). It haven't server and connection. @@ -7575,7 +7603,7 @@ SELECT server_name FROM parquet_s3_fdw_get_connections() ORDER BY 1; -- return ALTER SERVER parquet_s3_srv OPTIONS (ADD use_remote_estimate 'off'); --Testcase 410: DROP SERVER parquet_s3_srv_3 CASCADE; -psql:sql/13.3/parquet_s3_fdw_post.sql:3241: NOTICE: drop cascades to 2 other objects +psql:sql/14.0/parquet_s3_fdw_post.sql:3273: NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to user mapping for public on server parquet_s3_srv_3 drop cascades to foreign table ft7 -- List all the existing cached connections. parquet_s3_srv and parquet_s3_srv_3 @@ -7671,7 +7699,7 @@ SELECT parquet_s3_fdw_disconnect('parquet_s3_srv_2'); -- Return an error as there is no foreign server with given name. --Testcase 420: SELECT parquet_s3_fdw_disconnect('unknownserver'); -psql:sql/13.3/parquet_s3_fdw_post.sql:3294: ERROR: server "unknownserver" does not exist +psql:sql/14.0/parquet_s3_fdw_post.sql:3326: ERROR: server "unknownserver" does not exist -- Let's ensure to close all the existing cached connections. --Testcase 421: SELECT 1 FROM parquet_s3_fdw_disconnect_all(); @@ -8003,14 +8031,20 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- EXPLAIN (VERBOSE, COSTS OFF) -- SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505; -- SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505; +-- CREATE TABLE local_tbl (a int, b int, c text); +-- INSERT INTO local_tbl VALUES (1505, 505, 'foo'); +-- ANALYZE local_tbl; +-- EXPLAIN (VERBOSE, COSTS OFF) +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; +-- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; -- EXPLAIN (VERBOSE, COSTS OFF) -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- Check with foreign modify --- CREATE TABLE local_tbl (a int, b int, c text); --- INSERT INTO local_tbl VALUES (1505, 505, 'foo'); -- CREATE TABLE base_tbl3 (a int, b int, c text); -- CREATE FOREIGN TABLE remote_tbl (a int, b int, c text) -- SERVER loopback OPTIONS (table_name 'base_tbl3'); @@ -8058,6 +8092,21 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- DROP TABLE join_tbl; -- ALTER SERVER loopback OPTIONS (DROP async_capable); -- ALTER SERVER loopback2 OPTIONS (DROP async_capable); +-- =================================================================== +-- test invalid server and foreign table options +-- =================================================================== +-- Invalid fdw_startup_cost option +-- CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw +-- OPTIONS(fdw_startup_cost '100$%$#$#'); +-- -- Invalid fdw_tuple_cost option +-- CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw +-- OPTIONS(fdw_tuple_cost '100$%$#$#'); +-- -- Invalid fetch_size option +-- CREATE FOREIGN TABLE inv_fsz (c1 int ) +-- SERVER loopback OPTIONS (fetch_size '100$%$#$#'); +-- -- Invalid batch_size option +-- CREATE FOREIGN TABLE inv_bsz (c1 int ) +-- SERVER loopback OPTIONS (batch_size '100$%$#$#'); -- Clean-up SET client_min_messages TO WARNING; --Testcase 438: diff --git a/expected/13.3/parquet_s3_fdw_post_server.out b/expected/14.0/parquet_s3_fdw_post_server.out similarity index 98% rename from expected/13.3/parquet_s3_fdw_post_server.out rename to expected/14.0/parquet_s3_fdw_post_server.out index 08fd660..7f0b979 100644 --- a/expected/13.3/parquet_s3_fdw_post_server.out +++ b/expected/14.0/parquet_s3_fdw_post_server.out @@ -174,7 +174,7 @@ SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work ALTER SERVER parquet_s3_srv OPTIONS (SET use_minio 'false'); --Testcase 17: SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail if only when we use minio/s3. With local file, option use_minio is useless. -psql:sql/13.3/parquet_s3_fdw_post.sql:190: ERROR: parquet_s3_fdw: failed to open Parquet file HeadObject failed +psql:sql/14.0/parquet_s3_fdw_post.sql:190: ERROR: parquet_s3_fdw: failed to open Parquet file HeadObject failed DO $d$ BEGIN EXECUTE $$ALTER SERVER parquet_s3_srv @@ -193,7 +193,7 @@ ALTER USER MAPPING FOR CURRENT_USER SERVER parquet_s3_srv OPTIONS (SET user 'no such user'); --Testcase 19: SELECT c3, c5 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail if only when we use minio/s3. With local file, option user is useless. -psql:sql/13.3/parquet_s3_fdw_post.sql:204: ERROR: parquet_s3_fdw: failed to open Parquet file HeadObject failed +psql:sql/14.0/parquet_s3_fdw_post.sql:204: ERROR: parquet_s3_fdw: failed to open Parquet file HeadObject failed ALTER USER MAPPING FOR CURRENT_USER SERVER parquet_s3_srv OPTIONS (SET user 'minioadmin'); --Testcase 20: @@ -2133,8 +2133,7 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL 20 | 0 | AAA020 (10 rows) -SET enable_resultcache TO off; -psql:sql/13.3/parquet_s3_fdw_post.sql:576: ERROR: unrecognized configuration parameter "enable_resultcache" +SET enable_memoize TO off; -- right outer join + left outer join --Testcase 128: EXPLAIN (VERBOSE, COSTS OFF) @@ -2183,8 +2182,7 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT 20 | 0 | AAA020 (10 rows) -RESET enable_resultcache; -psql:sql/13.3/parquet_s3_fdw_post.sql:583: ERROR: unrecognized configuration parameter "enable_resultcache" +RESET enable_memoize; -- left outer join + right outer join --Testcase 130: EXPLAIN (VERBOSE, COSTS OFF) @@ -5422,9 +5420,9 @@ DROP FUNCTION f_test(int); CREATE FOREIGN TABLE reindex_foreign (c1 int, c2 int) SERVER parquet_s3_srv_2 OPTIONS (filename :'var', sorted 'c1'); REINDEX TABLE reindex_foreign; -- error -psql:sql/13.3/parquet_s3_fdw_post.sql:1412: ERROR: "reindex_foreign" is not a table or materialized view +psql:sql/14.0/parquet_s3_fdw_post.sql:1412: ERROR: "reindex_foreign" is not a table or materialized view REINDEX TABLE CONCURRENTLY reindex_foreign; -- error -psql:sql/13.3/parquet_s3_fdw_post.sql:1413: ERROR: "reindex_foreign" is not a table or materialized view +psql:sql/14.0/parquet_s3_fdw_post.sql:1413: ERROR: "reindex_foreign" is not a table or materialized view --Testcase 342: DROP FOREIGN TABLE reindex_foreign; -- partitions and foreign tables @@ -5441,9 +5439,11 @@ DROP FOREIGN TABLE reindex_foreign; -- conversion error -- =================================================================== -- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int; --- SELECT * FROM ft1 WHERE c1 = 1; -- ERROR --- SELECT ft1.c1, ft2.c2, ft1.c8 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR --- SELECT ft1.c1, ft2.c2, ft1 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR +-- SELECT * FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8) WHERE x1 = 1; -- ERROR +-- SELECT ftx.x1, ft2.c2, ftx.x8 FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2 +-- WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR +-- SELECT ftx.x1, ft2.c2, ftx FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2 +-- WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR -- SELECT sum(c2), array_agg(c8) FROM ft1 GROUP BY c8; -- ERROR -- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE text; -- =================================================================== @@ -6210,7 +6210,7 @@ Options: check_option=cascaded -- SELECT * FROM foreign_tbl; --Testcase 389: DROP FOREIGN TABLE foreign_tbl CASCADE; -psql:sql/13.3/parquet_s3_fdw_post.sql:1786: NOTICE: drop cascades to view rw_view +psql:sql/14.0/parquet_s3_fdw_post.sql:1788: NOTICE: drop cascades to view rw_view --Testcase 390: DROP TRIGGER row_before_insupd_trigger ON base_tbl; --Testcase 391: @@ -6265,7 +6265,7 @@ DROP FOREIGN TABLE foreign_tbl CASCADE; DROP TRIGGER row_before_insupd_trigger ON child_tbl; --Testcase 400: DROP TABLE parent_tbl CASCADE; -psql:sql/13.3/parquet_s3_fdw_post.sql:1834: NOTICE: drop cascades to view rw_view +psql:sql/14.0/parquet_s3_fdw_post.sql:1836: NOTICE: drop cascades to view rw_view --Testcase 401: DROP FUNCTION row_before_insupd_trigfunc; -- =================================================================== @@ -6278,16 +6278,40 @@ DROP FUNCTION row_before_insupd_trigfunc; -- =================================================================== -- test generated columns -- =================================================================== --- create table gloc1 (a int, b int); +-- create table gloc1 ( +-- a int, +-- b int generated always as (a * 2) stored); -- alter table gloc1 set (autovacuum_enabled = 'false'); -- create foreign table grem1 ( -- a int, -- b int generated always as (a * 2) stored) -- server loopback options(table_name 'gloc1'); +-- explain (verbose, costs off) -- insert into grem1 (a) values (1), (2); +-- insert into grem1 (a) values (1), (2); +-- explain (verbose, costs off) -- update grem1 set a = 22 where a = 2; +-- update grem1 set a = 22 where a = 2; +-- select * from gloc1; +-- select * from grem1; +-- delete from grem1; +-- -- test copy from +-- copy grem1 from stdin; +-- 1 +-- 2 +-- \. -- select * from gloc1; -- select * from grem1; +-- delete from grem1; +-- -- test batch insert +-- alter server loopback options (add batch_size '10'); +-- explain (verbose, costs off) +-- insert into grem1 (a) values (1), (2); +-- insert into grem1 (a) values (1), (2); +-- select * from gloc1; +-- select * from grem1; +-- delete from grem1; +-- alter server loopback options (drop batch_size); -- =================================================================== -- test local triggers -- =================================================================== @@ -6476,6 +6500,10 @@ DROP FUNCTION row_before_insupd_trigfunc; -- DROP TRIGGER trig_row_after ON rem1; -- DROP TRIGGER trig_local_before ON loc1; -- -- Test direct foreign table modification functionality +-- EXPLAIN (verbose, costs off) +-- DELETE FROM rem1; -- can be pushed down +-- EXPLAIN (verbose, costs off) +-- DELETE FROM rem1 WHERE false; -- currently can't be pushed down -- -- Test with statement-level triggers -- CREATE TRIGGER trig_stmt_before -- BEFORE DELETE OR INSERT OR UPDATE ON rem1 @@ -7249,7 +7277,7 @@ FDW options: (filename 's3://ported_postgres/loct_empty.parquet', sorted 'c1') -- \d import_dest2.* -- CREATE SCHEMA import_dest3; -- IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest3 --- OPTIONS (import_collate 'false', import_not_null 'false'); +-- OPTIONS (import_collate 'false', import_generated 'false', import_not_null 'false'); -- \det+ import_dest3.* -- \d import_dest3.* -- Check LIMIT TO and EXCEPT @@ -7429,7 +7457,7 @@ FDW options: (filename 's3://ported_postgres/loct_empty.parquet', sorted 'c1') -- c7 char(10) default 'ft1', -- c8 user_enum -- ) SERVER loopback_nopw OPTIONS (schema_name 'public', table_name 'ft1'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- If we add a password to the connstr it'll fail, because we don't allow passwords -- in connstrs only in user mappings. -- DO $d$ @@ -7442,16 +7470,16 @@ FDW options: (filename 's3://ported_postgres/loct_empty.parquet', sorted 'c1') -- -- This won't work with installcheck, but neither will most of the FDW checks. -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password 'dummypw'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- Unpriv user cannot make the mapping passwordless -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password_required 'false'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- RESET ROLE; -- But the superuser can -- ALTER USER MAPPING FOR regress_nosuper SERVER loopback_nopw OPTIONS (ADD password_required 'false'); -- SET ROLE regress_nosuper; -- Should finally work now --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- unpriv user also cannot set sslcert / sslkey on the user mapping -- first set password_required so we see the right error messages -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (SET password_required 'true'); @@ -7485,11 +7513,11 @@ FDW options: (filename 's3://ported_postgres/loct_empty.parquet', sorted 'c1') -- Change application_name of remote connection to special one -- so that we can easily terminate the connection later. -- ALTER SERVER parquet_s3_srv OPTIONS (application_name 'fdw_retry_check'); --- If debug_invalidate_system_caches_always is active, it results in +-- If debug_discard_caches is active, it results in -- dropping remote connections after every transaction, making it -- impossible to test termination meaningfully. So turn that off -- for this test. --- SET debug_invalidate_system_caches_always = 0; +-- SET debug_discard_caches = 0; -- Make sure we have a remote connection. -- SELECT 1 FROM ft1 LIMIT 1; -- Terminate the remote connection and wait for the termination to complete. @@ -7510,7 +7538,7 @@ FDW options: (filename 's3://ported_postgres/loct_empty.parquet', sorted 'c1') -- SELECT 1 FROM ft1 LIMIT 1; -- should fail -- \set VERBOSITY default -- COMMIT; --- RESET debug_invalidate_system_caches_always; +-- RESET debug_discard_caches; -- ============================================================================= -- test connection invalidation cases and parquet_s3_fdw_get_connections function -- with local parquet file (not on minio/s3 servers). It haven't server and connection. @@ -7563,7 +7591,7 @@ SELECT server_name FROM parquet_s3_fdw_get_connections() ORDER BY 1; -- return ALTER SERVER parquet_s3_srv OPTIONS (ADD use_remote_estimate 'off'); --Testcase 410: DROP SERVER parquet_s3_srv_3 CASCADE; -psql:sql/13.3/parquet_s3_fdw_post.sql:3241: NOTICE: drop cascades to 2 other objects +psql:sql/14.0/parquet_s3_fdw_post.sql:3273: NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to user mapping for public on server parquet_s3_srv_3 drop cascades to foreign table ft7 -- List all the existing cached connections. parquet_s3_srv and parquet_s3_srv_3 @@ -7665,7 +7693,7 @@ SELECT parquet_s3_fdw_disconnect('parquet_s3_srv_2'); -- Return an error as there is no foreign server with given name. --Testcase 420: SELECT parquet_s3_fdw_disconnect('unknownserver'); -psql:sql/13.3/parquet_s3_fdw_post.sql:3294: ERROR: server "unknownserver" does not exist +psql:sql/14.0/parquet_s3_fdw_post.sql:3326: ERROR: server "unknownserver" does not exist -- Let's ensure to close all the existing cached connections. --Testcase 421: SELECT 1 FROM parquet_s3_fdw_disconnect_all(); @@ -7999,14 +8027,20 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- EXPLAIN (VERBOSE, COSTS OFF) -- SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505; -- SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505; +-- CREATE TABLE local_tbl (a int, b int, c text); +-- INSERT INTO local_tbl VALUES (1505, 505, 'foo'); +-- ANALYZE local_tbl; +-- EXPLAIN (VERBOSE, COSTS OFF) +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; +-- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; -- EXPLAIN (VERBOSE, COSTS OFF) -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- Check with foreign modify --- CREATE TABLE local_tbl (a int, b int, c text); --- INSERT INTO local_tbl VALUES (1505, 505, 'foo'); -- CREATE TABLE base_tbl3 (a int, b int, c text); -- CREATE FOREIGN TABLE remote_tbl (a int, b int, c text) -- SERVER loopback OPTIONS (table_name 'base_tbl3'); @@ -8054,6 +8088,21 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- DROP TABLE join_tbl; -- ALTER SERVER loopback OPTIONS (DROP async_capable); -- ALTER SERVER loopback2 OPTIONS (DROP async_capable); +-- =================================================================== +-- test invalid server and foreign table options +-- =================================================================== +-- Invalid fdw_startup_cost option +-- CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw +-- OPTIONS(fdw_startup_cost '100$%$#$#'); +-- -- Invalid fdw_tuple_cost option +-- CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw +-- OPTIONS(fdw_tuple_cost '100$%$#$#'); +-- -- Invalid fetch_size option +-- CREATE FOREIGN TABLE inv_fsz (c1 int ) +-- SERVER loopback OPTIONS (fetch_size '100$%$#$#'); +-- -- Invalid batch_size option +-- CREATE FOREIGN TABLE inv_bsz (c1 int ) +-- SERVER loopback OPTIONS (batch_size '100$%$#$#'); -- Clean-up SET client_min_messages TO WARNING; --Testcase 438: diff --git a/expected/14beta2/parquet_s3_fdw_server.out b/expected/14.0/parquet_s3_fdw_server.out similarity index 82% rename from expected/14beta2/parquet_s3_fdw_server.out rename to expected/14.0/parquet_s3_fdw_server.out index 52cda39..eea3b8a 100644 --- a/expected/14beta2/parquet_s3_fdw_server.out +++ b/expected/14.0/parquet_s3_fdw_server.out @@ -85,17 +85,17 @@ EXPLAIN (COSTS OFF) SELECT * FROM example1 ORDER BY three; SET client_min_messages = DEBUG1; --Testcase 12: SELECT * FROM example1 WHERE one < 1; -psql:sql/14beta2/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/14beta2/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/14beta2/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:49: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) --Testcase 13: SELECT * FROM example1 WHERE one <= 1; -psql:sql/14beta2/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/14beta2/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+---------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -103,17 +103,17 @@ psql:sql/14beta2/parquet_s3_fdw.sql:51: DEBUG: parquet_s3_fdw: open Parquet fil --Testcase 14: SELECT * FROM example1 WHERE one > 6; -psql:sql/14beta2/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/14beta2/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/14beta2/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:53: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) --Testcase 15: SELECT * FROM example1 WHERE one >= 6; -psql:sql/14beta2/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/14beta2/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 6 | {16,17,18} | tres | 2018-01-06 00:00:00 | 2018-01-06 | f | 2 @@ -121,8 +121,8 @@ psql:sql/14beta2/parquet_s3_fdw.sql:55: DEBUG: parquet_s3_fdw: open Parquet fil --Testcase 16: SELECT * FROM example1 WHERE one = 2; -psql:sql/14beta2/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/14beta2/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 2 | {NULL,5,6} | bar | 2018-01-02 00:00:00 | 2018-01-02 | f | @@ -130,17 +130,17 @@ psql:sql/14beta2/parquet_s3_fdw.sql:57: DEBUG: parquet_s3_fdw: open Parquet fil --Testcase 17: SELECT * FROM example1 WHERE one = 7; -psql:sql/14beta2/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/14beta2/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/14beta2/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:59: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) --Testcase 18: SELECT * FROM example1 WHERE six = true; -psql:sql/14beta2/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/14beta2/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+---------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -149,7 +149,7 @@ psql:sql/14beta2/parquet_s3_fdw.sql:61: DEBUG: parquet_s3_fdw: open Parquet fil --Testcase 19: SELECT * FROM example1 WHERE six = false; -psql:sql/14beta2/parquet_s3_fdw.sql:63: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:63: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 2 | {NULL,5,6} | bar | 2018-01-02 00:00:00 | 2018-01-02 | f | @@ -160,8 +160,8 @@ psql:sql/14beta2/parquet_s3_fdw.sql:63: DEBUG: parquet_s3_fdw: open Parquet fil --Testcase 20: SELECT * FROM example1 WHERE seven < 1.5; -psql:sql/14beta2/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/14beta2/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+---------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -170,7 +170,7 @@ psql:sql/14beta2/parquet_s3_fdw.sql:65: DEBUG: parquet_s3_fdw: open Parquet fil --Testcase 21: SELECT * FROM example1 WHERE seven <= 1.5; -psql:sql/14beta2/parquet_s3_fdw.sql:67: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:67: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -180,8 +180,8 @@ psql:sql/14beta2/parquet_s3_fdw.sql:67: DEBUG: parquet_s3_fdw: open Parquet fil --Testcase 22: SELECT * FROM example1 WHERE seven = 1.5; -psql:sql/14beta2/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/14beta2/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 4 | {10,11,12} | uno | 2018-01-04 00:00:00 | 2018-01-04 | f | 1.5 @@ -189,8 +189,8 @@ psql:sql/14beta2/parquet_s3_fdw.sql:69: DEBUG: parquet_s3_fdw: open Parquet fil --Testcase 23: SELECT * FROM example1 WHERE seven > 1; -psql:sql/14beta2/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/14beta2/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 4 | {10,11,12} | uno | 2018-01-04 00:00:00 | 2018-01-04 | f | 1.5 @@ -199,7 +199,7 @@ psql:sql/14beta2/parquet_s3_fdw.sql:71: DEBUG: parquet_s3_fdw: open Parquet fil --Testcase 24: SELECT * FROM example1 WHERE seven >= 1; -psql:sql/14beta2/parquet_s3_fdw.sql:73: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:73: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 3 | {7,8,9} | baz | 2018-01-03 00:00:00 | 2018-01-03 | t | 1 @@ -209,7 +209,7 @@ psql:sql/14beta2/parquet_s3_fdw.sql:73: DEBUG: parquet_s3_fdw: open Parquet fil --Testcase 25: SELECT * FROM example1 WHERE seven IS NULL; -psql:sql/14beta2/parquet_s3_fdw.sql:75: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:75: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 2 | {NULL,5,6} | bar | 2018-01-02 00:00:00 | 2018-01-02 | f | @@ -221,8 +221,8 @@ psql:sql/14beta2/parquet_s3_fdw.sql:75: DEBUG: parquet_s3_fdw: open Parquet fil prepare prep(date) as select * from example1 where five < $1; --Testcase 27: execute prep('2018-01-03'); -psql:sql/14beta2/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/14beta2/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+------------+-------+---------------------+------------+-----+------- 1 | {1,2,3} | foo | 2018-01-01 00:00:00 | 2018-01-01 | t | 0.5 @@ -231,9 +231,9 @@ psql:sql/14beta2/parquet_s3_fdw.sql:81: DEBUG: parquet_s3_fdw: open Parquet fil --Testcase 28: execute prep('2018-01-01'); -psql:sql/14beta2/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/14beta2/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 2 -psql:sql/14beta2/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet +psql:sql/14.0/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: skip rowgroup 2 +psql:sql/14.0/parquet_s3_fdw.sql:83: DEBUG: parquet_s3_fdw: open Parquet file on S3. datasimple/example1.parquet one | two | three | four | five | six | seven -----+-----+-------+------+------+-----+------- (0 rows) @@ -243,18 +243,18 @@ SET client_min_messages = WARNING; --Testcase 29: CREATE FOREIGN TABLE example_fail (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv; -psql:sql/14beta2/parquet_s3_fdw.sql:89: ERROR: parquet_s3_fdw: filename or function is required +psql:sql/14.0/parquet_s3_fdw.sql:89: ERROR: parquet_s3_fdw: filename or function is required --Testcase 30: CREATE FOREIGN TABLE example_fail (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (filename 'nonexistent.parquet', some_option '123'); -psql:sql/14beta2/parquet_s3_fdw.sql:93: ERROR: parquet_s3_fdw: No such file or directory +psql:sql/14.0/parquet_s3_fdw.sql:93: ERROR: parquet_s3_fdw: No such file or directory \set var :PATH_FILENAME'/data/simple/example1.parquet' --Testcase 31: CREATE FOREIGN TABLE example_fail (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (filename :'var', some_option '123'); -psql:sql/14beta2/parquet_s3_fdw.sql:98: ERROR: parquet_s3_fdw: invalid option "some_option" +psql:sql/14.0/parquet_s3_fdw.sql:98: ERROR: parquet_s3_fdw: invalid option "some_option" -- type mismatch \set var :PATH_FILENAME'/data/simple/example1.parquet' --Testcase 32: @@ -263,10 +263,10 @@ SERVER parquet_s3_srv OPTIONS (filename :'var', sorted 'one'); --Testcase 33: SELECT one FROM example_fail; -psql:sql/14beta2/parquet_s3_fdw.sql:107: ERROR: parquet_s3_fdw: parquet_s3_fdw: cast function to bigint[] ('one' column) is not found +psql:sql/14.0/parquet_s3_fdw.sql:107: ERROR: parquet_s3_fdw: parquet_s3_fdw: cast function to bigint[] ('one' column) is not found --Testcase 34: SELECT two FROM example_fail; -psql:sql/14beta2/parquet_s3_fdw.sql:109: ERROR: parquet_s3_fdw: parquet_s3_fdw: cannot convert parquet column of type LIST to scalar type of postgres column 'two' +psql:sql/14.0/parquet_s3_fdw.sql:109: ERROR: parquet_s3_fdw: parquet_s3_fdw: cannot convert parquet column of type LIST to scalar type of postgres column 'two' -- files_func --Testcase 35: CREATE FUNCTION list_parquet_s3_files(args JSONB) @@ -315,17 +315,17 @@ LANGUAGE SQL; CREATE FOREIGN TABLE example_inv_func (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (files_func 'int_array_func'); -psql:sql/14beta2/parquet_s3_fdw.sql:144: ERROR: parquet_s3_fdw: return type of 'int_array_func' is integer[]; expected text[] +psql:sql/14.0/parquet_s3_fdw.sql:144: ERROR: parquet_s3_fdw: return type of 'int_array_func' is integer[]; expected text[] --Testcase 41: CREATE FOREIGN TABLE example_inv_func (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (files_func 'no_args_func'); -psql:sql/14beta2/parquet_s3_fdw.sql:148: ERROR: function no_args_func(jsonb) does not exist +psql:sql/14.0/parquet_s3_fdw.sql:148: ERROR: function no_args_func(jsonb) does not exist --Testcase 42: CREATE FOREIGN TABLE example_inv_func (one INT8, two INT8[], three TEXT) SERVER parquet_s3_srv OPTIONS (files_func 'list_parquet_s3_files', files_func_arg 'invalid json'); -psql:sql/14beta2/parquet_s3_fdw.sql:152: ERROR: invalid input syntax for type json +psql:sql/14.0/parquet_s3_fdw.sql:152: ERROR: invalid input syntax for type json DETAIL: Token "invalid" is invalid. CONTEXT: JSON data, line 1: invalid... --Testcase 43: @@ -611,7 +611,7 @@ SERVER parquet_s3_srv OPTIONS (filename :'var', sorted 'one'); --Testcase 67: SELECT * FROM example3; -psql:sql/14beta2/parquet_s3_fdw.sql:267: DEBUG: parquet_s3_fdw: open Parquet file on S3. datacomplex/example3.parquet +psql:sql/14.0/parquet_s3_fdw.sql:267: DEBUG: parquet_s3_fdw: open Parquet file on S3. datacomplex/example3.parquet one | two | three --------------------------------------+--------------------------------------+------- {"1": "foo", "2": "bar", "3": "baz"} | {"2018-01-01": 10, "2018-01-02": 15} | 1 @@ -620,8 +620,8 @@ psql:sql/14beta2/parquet_s3_fdw.sql:267: DEBUG: parquet_s3_fdw: open Parquet fi --Testcase 68: SELECT * FROM example3 WHERE three = 3; -psql:sql/14beta2/parquet_s3_fdw.sql:269: DEBUG: parquet_s3_fdw: skip rowgroup 1 -psql:sql/14beta2/parquet_s3_fdw.sql:269: DEBUG: parquet_s3_fdw: open Parquet file on S3. datacomplex/example3.parquet +psql:sql/14.0/parquet_s3_fdw.sql:269: DEBUG: parquet_s3_fdw: skip rowgroup 1 +psql:sql/14.0/parquet_s3_fdw.sql:269: DEBUG: parquet_s3_fdw: open Parquet file on S3. datacomplex/example3.parquet one | two | three -----+-----+------- (0 rows) @@ -645,14 +645,14 @@ SET client_min_messages = WARNING; SELECT * FROM public.parquet_s3_fdw_version(); parquet_s3_fdw_version ------------------------ - 200 + 201 (1 row) --Testcase 71: SELECT parquet_s3_fdw_version(); parquet_s3_fdw_version ------------------------ - 200 + 201 (1 row) --Testcase 72: diff --git a/parquet_s3_fdw.h b/parquet_s3_fdw.h index b1a9eaf..edff8d0 100644 --- a/parquet_s3_fdw.h +++ b/parquet_s3_fdw.h @@ -36,17 +36,17 @@ #define parquet_fdw_use_threads parquet_s3_fdw_use_threads #define SingleFileExecutionState SingleFileExecutionStateS3 -#define CODE_VERSION 200 +#define CODE_VERSION 201 /* Structure to store option information. */ typedef struct parquet_s3_server_opt { - bool use_minio; /* Connect to MinIO instead of Amazon S3. */ + bool use_minio; /* Connect to MinIO instead of Amazon S3. */ bool keep_connections; /* setting value of keep_connections - * server option */ + * server option */ } parquet_s3_server_opt; -bool parquet_s3_is_valid_server_option(DefElem *def); +bool parquet_s3_is_valid_server_option(DefElem *def); parquet_s3_server_opt *parquet_s3_get_options(Oid foreignoid); parquet_s3_server_opt *parquet_s3_get_server_options(Oid serverid); @@ -54,5 +54,4 @@ parquet_s3_server_opt *parquet_s3_get_server_options(Oid serverid); #define SERVER_OPTION_USE_MINIO "use_minio" #define SERVER_OPTION_KEEP_CONNECTIONS "keep_connections" -#endif /* __PARQUET_S3_FDW_H__ */ - +#endif /* __PARQUET_S3_FDW_H__ */ diff --git a/parquet_s3_fdw_server_option.c b/parquet_s3_fdw_server_option.c index c67a808..0676d2b 100644 --- a/parquet_s3_fdw_server_option.c +++ b/parquet_s3_fdw_server_option.c @@ -25,35 +25,6 @@ #include "utils/builtins.h" #include "utils/lsyscache.h" -/* - * Describes the valid options for server that use this wrapper. - */ -typedef struct ParquetS3FdwServerOption -{ - const char *optname; - Oid optcontext; /* Oid of catalog in which option may appear */ -} ParquetS3FdwServerOption; - - -/* - * Valid options for parquet_s3_fdw. - * - */ -static ParquetS3FdwServerOption parquet_s3_server_options[] = -{ - /* Connection options */ - { - SERVER_OPTION_USE_MINIO, ForeignServerRelationId - }, - /* Keep Connections options */ - { - SERVER_OPTION_KEEP_CONNECTIONS, ForeignServerRelationId - }, - /* Sentinel */ - { - NULL, InvalidOid - } -}; /* * Check if the provided option is one of the valid options. @@ -61,18 +32,16 @@ static ParquetS3FdwServerOption parquet_s3_server_options[] = bool parquet_s3_is_valid_server_option(DefElem *def) { - struct ParquetS3FdwServerOption *opt; - if (strcmp(def->defname, SERVER_OPTION_USE_MINIO) == 0 || strcmp(def->defname, SERVER_OPTION_KEEP_CONNECTIONS) == 0) { /* Check that bool value is valid */ - bool check_bool_valid; + bool check_bool_valid; if (!parse_bool(defGetString(def), &check_bool_valid)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("parquet_s3_fdw: invalid value for boolean option \"%s\": %s", + errmsg("parquet_s3_fdw: invalid value for boolean option \"%s\": %s", def->defname, defGetString(def)))); return true; } @@ -84,7 +53,7 @@ parquet_s3_is_valid_server_option(DefElem *def) * Extract listed option information into parquet_s3_server_opt structure. */ static void -extract_options(List *options, parquet_s3_server_opt *opt) +parquet_s3_extract_options(List *options, parquet_s3_server_opt * opt) { ListCell *lc; @@ -145,7 +114,7 @@ parquet_s3_get_options(Oid foreignoid) options = list_concat(options, f_mapping->options); /* Store option information into the structure. */ - extract_options(options, opt); + parquet_s3_extract_options(options, opt); return opt; } @@ -173,7 +142,7 @@ parquet_s3_get_server_options(Oid serverid) options = f_server->options; /* Store option information into the structure. */ - extract_options(options, opt); + parquet_s3_extract_options(options, opt); return opt; } diff --git a/sql/13.3/import.sql b/sql/13.4/import.sql similarity index 100% rename from sql/13.3/import.sql rename to sql/13.4/import.sql diff --git a/sql/13.3/import_local.sql b/sql/13.4/import_local.sql similarity index 100% rename from sql/13.3/import_local.sql rename to sql/13.4/import_local.sql diff --git a/sql/13.3/import_server.sql b/sql/13.4/import_server.sql similarity index 100% rename from sql/13.3/import_server.sql rename to sql/13.4/import_server.sql diff --git a/sql/13.3/parquet_s3_fdw.sql b/sql/13.4/parquet_s3_fdw.sql similarity index 100% rename from sql/13.3/parquet_s3_fdw.sql rename to sql/13.4/parquet_s3_fdw.sql diff --git a/sql/13.3/parquet_s3_fdw2.sql b/sql/13.4/parquet_s3_fdw2.sql similarity index 100% rename from sql/13.3/parquet_s3_fdw2.sql rename to sql/13.4/parquet_s3_fdw2.sql diff --git a/sql/13.3/parquet_s3_fdw_local.sql b/sql/13.4/parquet_s3_fdw_local.sql similarity index 100% rename from sql/13.3/parquet_s3_fdw_local.sql rename to sql/13.4/parquet_s3_fdw_local.sql diff --git a/sql/13.3/parquet_s3_fdw_post.sql b/sql/13.4/parquet_s3_fdw_post.sql similarity index 98% rename from sql/13.3/parquet_s3_fdw_post.sql rename to sql/13.4/parquet_s3_fdw_post.sql index 520aa75..d5e2040 100644 --- a/sql/13.3/parquet_s3_fdw_post.sql +++ b/sql/13.4/parquet_s3_fdw_post.sql @@ -573,14 +573,14 @@ EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; --Testcase 127: SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; -SET enable_resultcache TO off; +SET enable_memoize TO off; -- right outer join + left outer join --Testcase 128: EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; --Testcase 129: SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; -RESET enable_resultcache; +RESET enable_memoize; -- left outer join + right outer join --Testcase 130: EXPLAIN (VERBOSE, COSTS OFF) @@ -1428,9 +1428,11 @@ DROP FOREIGN TABLE reindex_foreign; -- conversion error -- =================================================================== -- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int; --- SELECT * FROM ft1 WHERE c1 = 1; -- ERROR --- SELECT ft1.c1, ft2.c2, ft1.c8 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR --- SELECT ft1.c1, ft2.c2, ft1 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR +-- SELECT * FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8) WHERE x1 = 1; -- ERROR +-- SELECT ftx.x1, ft2.c2, ftx.x8 FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2 +-- WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR +-- SELECT ftx.x1, ft2.c2, ftx FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2 +-- WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR -- SELECT sum(c2), array_agg(c8) FROM ft1 GROUP BY c8; -- ERROR -- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE text; @@ -1848,16 +1850,42 @@ DROP FUNCTION row_before_insupd_trigfunc; -- =================================================================== -- test generated columns -- =================================================================== --- create table gloc1 (a int, b int); +-- create table gloc1 ( +-- a int, +-- b int generated always as (a * 2) stored); -- alter table gloc1 set (autovacuum_enabled = 'false'); -- create foreign table grem1 ( -- a int, -- b int generated always as (a * 2) stored) -- server loopback options(table_name 'gloc1'); +-- explain (verbose, costs off) +-- insert into grem1 (a) values (1), (2); -- insert into grem1 (a) values (1), (2); +-- explain (verbose, costs off) +-- update grem1 set a = 22 where a = 2; -- update grem1 set a = 22 where a = 2; -- select * from gloc1; -- select * from grem1; +-- delete from grem1; + +-- -- test copy from +-- copy grem1 from stdin; +-- 1 +-- 2 +-- \. +-- select * from gloc1; +-- select * from grem1; +-- delete from grem1; + +-- -- test batch insert +-- alter server loopback options (add batch_size '10'); +-- explain (verbose, costs off) +-- insert into grem1 (a) values (1), (2); +-- insert into grem1 (a) values (1), (2); +-- select * from gloc1; +-- select * from grem1; +-- delete from grem1; +-- alter server loopback options (drop batch_size); -- =================================================================== -- test local triggers @@ -2104,6 +2132,10 @@ DROP FUNCTION row_before_insupd_trigfunc; -- -- Test direct foreign table modification functionality +-- EXPLAIN (verbose, costs off) +-- DELETE FROM rem1; -- can be pushed down +-- EXPLAIN (verbose, costs off) +-- DELETE FROM rem1 WHERE false; -- currently can't be pushed down -- -- Test with statement-level triggers -- CREATE TRIGGER trig_stmt_before @@ -2874,7 +2906,7 @@ IMPORT FOREIGN SCHEMA :var FROM SERVER parquet_s3_srv INTO import_dest1 OPTIONS -- \d import_dest2.* -- CREATE SCHEMA import_dest3; -- IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest3 --- OPTIONS (import_collate 'false', import_not_null 'false'); +-- OPTIONS (import_collate 'false', import_generated 'false', import_not_null 'false'); -- \det+ import_dest3.* -- \d import_dest3.* @@ -3102,7 +3134,7 @@ IMPORT FOREIGN SCHEMA :var FROM SERVER parquet_s3_srv INTO import_dest1 OPTIONS -- c8 user_enum -- ) SERVER loopback_nopw OPTIONS (schema_name 'public', table_name 'ft1'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- If we add a password to the connstr it'll fail, because we don't allow passwords -- in connstrs only in user mappings. @@ -3120,13 +3152,13 @@ IMPORT FOREIGN SCHEMA :var FROM SERVER parquet_s3_srv INTO import_dest1 OPTIONS -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password 'dummypw'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- Unpriv user cannot make the mapping passwordless -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password_required 'false'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- RESET ROLE; @@ -3136,7 +3168,7 @@ IMPORT FOREIGN SCHEMA :var FROM SERVER parquet_s3_srv INTO import_dest1 OPTIONS -- SET ROLE regress_nosuper; -- Should finally work now --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- unpriv user also cannot set sslcert / sslkey on the user mapping -- first set password_required so we see the right error messages @@ -3181,11 +3213,11 @@ IMPORT FOREIGN SCHEMA :var FROM SERVER parquet_s3_srv INTO import_dest1 OPTIONS -- so that we can easily terminate the connection later. -- ALTER SERVER parquet_s3_srv OPTIONS (application_name 'fdw_retry_check'); --- If debug_invalidate_system_caches_always is active, it results in +-- If debug_discard_caches is active, it results in -- dropping remote connections after every transaction, making it -- impossible to test termination meaningfully. So turn that off -- for this test. --- SET debug_invalidate_system_caches_always = 0; +-- SET debug_discard_caches = 0; -- Make sure we have a remote connection. -- SELECT 1 FROM ft1 LIMIT 1; @@ -3211,7 +3243,7 @@ IMPORT FOREIGN SCHEMA :var FROM SERVER parquet_s3_srv INTO import_dest1 OPTIONS -- \set VERBOSITY default -- COMMIT; --- RESET debug_invalidate_system_caches_always; +-- RESET debug_discard_caches; -- ============================================================================= -- test connection invalidation cases and parquet_s3_fdw_get_connections function -- with local parquet file (not on minio/s3 servers). It haven't server and connection. @@ -3660,6 +3692,16 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505; -- SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505; +-- CREATE TABLE local_tbl (a int, b int, c text); +-- INSERT INTO local_tbl VALUES (1505, 505, 'foo'); +-- ANALYZE local_tbl; + +-- EXPLAIN (VERBOSE, COSTS OFF) +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; +-- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; + -- EXPLAIN (VERBOSE, COSTS OFF) -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) @@ -3667,8 +3709,6 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- Check with foreign modify --- CREATE TABLE local_tbl (a int, b int, c text); --- INSERT INTO local_tbl VALUES (1505, 505, 'foo'); -- CREATE TABLE base_tbl3 (a int, b int, c text); -- CREATE FOREIGN TABLE remote_tbl (a int, b int, c text) @@ -3730,6 +3770,22 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- ALTER SERVER loopback OPTIONS (DROP async_capable); -- ALTER SERVER loopback2 OPTIONS (DROP async_capable); +-- =================================================================== +-- test invalid server and foreign table options +-- =================================================================== +-- Invalid fdw_startup_cost option +-- CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw +-- OPTIONS(fdw_startup_cost '100$%$#$#'); +-- -- Invalid fdw_tuple_cost option +-- CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw +-- OPTIONS(fdw_tuple_cost '100$%$#$#'); +-- -- Invalid fetch_size option +-- CREATE FOREIGN TABLE inv_fsz (c1 int ) +-- SERVER loopback OPTIONS (fetch_size '100$%$#$#'); +-- -- Invalid batch_size option +-- CREATE FOREIGN TABLE inv_bsz (c1 int ) +-- SERVER loopback OPTIONS (batch_size '100$%$#$#'); + -- Clean-up SET client_min_messages TO WARNING; --Testcase 438: diff --git a/sql/13.3/parquet_s3_fdw_post_local.sql b/sql/13.4/parquet_s3_fdw_post_local.sql similarity index 100% rename from sql/13.3/parquet_s3_fdw_post_local.sql rename to sql/13.4/parquet_s3_fdw_post_local.sql diff --git a/sql/13.3/parquet_s3_fdw_post_server.sql b/sql/13.4/parquet_s3_fdw_post_server.sql similarity index 100% rename from sql/13.3/parquet_s3_fdw_post_server.sql rename to sql/13.4/parquet_s3_fdw_post_server.sql diff --git a/sql/13.3/parquet_s3_fdw_server.sql b/sql/13.4/parquet_s3_fdw_server.sql similarity index 100% rename from sql/13.3/parquet_s3_fdw_server.sql rename to sql/13.4/parquet_s3_fdw_server.sql diff --git a/sql/14beta2/import.sql b/sql/14.0/import.sql similarity index 100% rename from sql/14beta2/import.sql rename to sql/14.0/import.sql diff --git a/sql/14beta2/import_local.sql b/sql/14.0/import_local.sql similarity index 100% rename from sql/14beta2/import_local.sql rename to sql/14.0/import_local.sql diff --git a/sql/14beta2/import_server.sql b/sql/14.0/import_server.sql similarity index 100% rename from sql/14beta2/import_server.sql rename to sql/14.0/import_server.sql diff --git a/sql/14beta2/parquet_s3_fdw.sql b/sql/14.0/parquet_s3_fdw.sql similarity index 100% rename from sql/14beta2/parquet_s3_fdw.sql rename to sql/14.0/parquet_s3_fdw.sql diff --git a/sql/14beta2/parquet_s3_fdw2.sql b/sql/14.0/parquet_s3_fdw2.sql similarity index 100% rename from sql/14beta2/parquet_s3_fdw2.sql rename to sql/14.0/parquet_s3_fdw2.sql diff --git a/sql/14beta2/parquet_s3_fdw_local.sql b/sql/14.0/parquet_s3_fdw_local.sql similarity index 100% rename from sql/14beta2/parquet_s3_fdw_local.sql rename to sql/14.0/parquet_s3_fdw_local.sql diff --git a/sql/14beta2/parquet_s3_fdw_post.sql b/sql/14.0/parquet_s3_fdw_post.sql similarity index 98% rename from sql/14beta2/parquet_s3_fdw_post.sql rename to sql/14.0/parquet_s3_fdw_post.sql index 520aa75..d5e2040 100644 --- a/sql/14beta2/parquet_s3_fdw_post.sql +++ b/sql/14.0/parquet_s3_fdw_post.sql @@ -573,14 +573,14 @@ EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; --Testcase 127: SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; -SET enable_resultcache TO off; +SET enable_memoize TO off; -- right outer join + left outer join --Testcase 128: EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; --Testcase 129: SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; -RESET enable_resultcache; +RESET enable_memoize; -- left outer join + right outer join --Testcase 130: EXPLAIN (VERBOSE, COSTS OFF) @@ -1428,9 +1428,11 @@ DROP FOREIGN TABLE reindex_foreign; -- conversion error -- =================================================================== -- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int; --- SELECT * FROM ft1 WHERE c1 = 1; -- ERROR --- SELECT ft1.c1, ft2.c2, ft1.c8 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR --- SELECT ft1.c1, ft2.c2, ft1 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR +-- SELECT * FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8) WHERE x1 = 1; -- ERROR +-- SELECT ftx.x1, ft2.c2, ftx.x8 FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2 +-- WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR +-- SELECT ftx.x1, ft2.c2, ftx FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2 +-- WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR -- SELECT sum(c2), array_agg(c8) FROM ft1 GROUP BY c8; -- ERROR -- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE text; @@ -1848,16 +1850,42 @@ DROP FUNCTION row_before_insupd_trigfunc; -- =================================================================== -- test generated columns -- =================================================================== --- create table gloc1 (a int, b int); +-- create table gloc1 ( +-- a int, +-- b int generated always as (a * 2) stored); -- alter table gloc1 set (autovacuum_enabled = 'false'); -- create foreign table grem1 ( -- a int, -- b int generated always as (a * 2) stored) -- server loopback options(table_name 'gloc1'); +-- explain (verbose, costs off) +-- insert into grem1 (a) values (1), (2); -- insert into grem1 (a) values (1), (2); +-- explain (verbose, costs off) +-- update grem1 set a = 22 where a = 2; -- update grem1 set a = 22 where a = 2; -- select * from gloc1; -- select * from grem1; +-- delete from grem1; + +-- -- test copy from +-- copy grem1 from stdin; +-- 1 +-- 2 +-- \. +-- select * from gloc1; +-- select * from grem1; +-- delete from grem1; + +-- -- test batch insert +-- alter server loopback options (add batch_size '10'); +-- explain (verbose, costs off) +-- insert into grem1 (a) values (1), (2); +-- insert into grem1 (a) values (1), (2); +-- select * from gloc1; +-- select * from grem1; +-- delete from grem1; +-- alter server loopback options (drop batch_size); -- =================================================================== -- test local triggers @@ -2104,6 +2132,10 @@ DROP FUNCTION row_before_insupd_trigfunc; -- -- Test direct foreign table modification functionality +-- EXPLAIN (verbose, costs off) +-- DELETE FROM rem1; -- can be pushed down +-- EXPLAIN (verbose, costs off) +-- DELETE FROM rem1 WHERE false; -- currently can't be pushed down -- -- Test with statement-level triggers -- CREATE TRIGGER trig_stmt_before @@ -2874,7 +2906,7 @@ IMPORT FOREIGN SCHEMA :var FROM SERVER parquet_s3_srv INTO import_dest1 OPTIONS -- \d import_dest2.* -- CREATE SCHEMA import_dest3; -- IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest3 --- OPTIONS (import_collate 'false', import_not_null 'false'); +-- OPTIONS (import_collate 'false', import_generated 'false', import_not_null 'false'); -- \det+ import_dest3.* -- \d import_dest3.* @@ -3102,7 +3134,7 @@ IMPORT FOREIGN SCHEMA :var FROM SERVER parquet_s3_srv INTO import_dest1 OPTIONS -- c8 user_enum -- ) SERVER loopback_nopw OPTIONS (schema_name 'public', table_name 'ft1'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- If we add a password to the connstr it'll fail, because we don't allow passwords -- in connstrs only in user mappings. @@ -3120,13 +3152,13 @@ IMPORT FOREIGN SCHEMA :var FROM SERVER parquet_s3_srv INTO import_dest1 OPTIONS -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password 'dummypw'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- Unpriv user cannot make the mapping passwordless -- ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password_required 'false'); --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- RESET ROLE; @@ -3136,7 +3168,7 @@ IMPORT FOREIGN SCHEMA :var FROM SERVER parquet_s3_srv INTO import_dest1 OPTIONS -- SET ROLE regress_nosuper; -- Should finally work now --- SELECT * FROM ft1_nopw LIMIT 1; +-- SELECT 1 FROM ft1_nopw LIMIT 1; -- unpriv user also cannot set sslcert / sslkey on the user mapping -- first set password_required so we see the right error messages @@ -3181,11 +3213,11 @@ IMPORT FOREIGN SCHEMA :var FROM SERVER parquet_s3_srv INTO import_dest1 OPTIONS -- so that we can easily terminate the connection later. -- ALTER SERVER parquet_s3_srv OPTIONS (application_name 'fdw_retry_check'); --- If debug_invalidate_system_caches_always is active, it results in +-- If debug_discard_caches is active, it results in -- dropping remote connections after every transaction, making it -- impossible to test termination meaningfully. So turn that off -- for this test. --- SET debug_invalidate_system_caches_always = 0; +-- SET debug_discard_caches = 0; -- Make sure we have a remote connection. -- SELECT 1 FROM ft1 LIMIT 1; @@ -3211,7 +3243,7 @@ IMPORT FOREIGN SCHEMA :var FROM SERVER parquet_s3_srv INTO import_dest1 OPTIONS -- \set VERBOSITY default -- COMMIT; --- RESET debug_invalidate_system_caches_always; +-- RESET debug_discard_caches; -- ============================================================================= -- test connection invalidation cases and parquet_s3_fdw_get_connections function -- with local parquet file (not on minio/s3 servers). It haven't server and connection. @@ -3660,6 +3692,16 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505; -- SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505; +-- CREATE TABLE local_tbl (a int, b int, c text); +-- INSERT INTO local_tbl VALUES (1505, 505, 'foo'); +-- ANALYZE local_tbl; + +-- EXPLAIN (VERBOSE, COSTS OFF) +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; +-- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; +-- SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a; + -- EXPLAIN (VERBOSE, COSTS OFF) -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) @@ -3667,8 +3709,6 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1; -- Check with foreign modify --- CREATE TABLE local_tbl (a int, b int, c text); --- INSERT INTO local_tbl VALUES (1505, 505, 'foo'); -- CREATE TABLE base_tbl3 (a int, b int, c text); -- CREATE FOREIGN TABLE remote_tbl (a int, b int, c text) @@ -3730,6 +3770,22 @@ ALTER SERVER parquet_s3_srv OPTIONS (SET keep_connections 'on'); -- ALTER SERVER loopback OPTIONS (DROP async_capable); -- ALTER SERVER loopback2 OPTIONS (DROP async_capable); +-- =================================================================== +-- test invalid server and foreign table options +-- =================================================================== +-- Invalid fdw_startup_cost option +-- CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw +-- OPTIONS(fdw_startup_cost '100$%$#$#'); +-- -- Invalid fdw_tuple_cost option +-- CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw +-- OPTIONS(fdw_tuple_cost '100$%$#$#'); +-- -- Invalid fetch_size option +-- CREATE FOREIGN TABLE inv_fsz (c1 int ) +-- SERVER loopback OPTIONS (fetch_size '100$%$#$#'); +-- -- Invalid batch_size option +-- CREATE FOREIGN TABLE inv_bsz (c1 int ) +-- SERVER loopback OPTIONS (batch_size '100$%$#$#'); + -- Clean-up SET client_min_messages TO WARNING; --Testcase 438: diff --git a/sql/14beta2/parquet_s3_fdw_post_local.sql b/sql/14.0/parquet_s3_fdw_post_local.sql similarity index 100% rename from sql/14beta2/parquet_s3_fdw_post_local.sql rename to sql/14.0/parquet_s3_fdw_post_local.sql diff --git a/sql/14beta2/parquet_s3_fdw_post_server.sql b/sql/14.0/parquet_s3_fdw_post_server.sql similarity index 100% rename from sql/14beta2/parquet_s3_fdw_post_server.sql rename to sql/14.0/parquet_s3_fdw_post_server.sql diff --git a/sql/14beta2/parquet_s3_fdw_server.sql b/sql/14.0/parquet_s3_fdw_server.sql similarity index 100% rename from sql/14beta2/parquet_s3_fdw_server.sql rename to sql/14.0/parquet_s3_fdw_server.sql diff --git a/src/parquet_fdw.c b/src/parquet_fdw.c index 091ceac..e79d78e 100644 --- a/src/parquet_fdw.c +++ b/src/parquet_fdw.c @@ -29,49 +29,49 @@ PG_MODULE_MAGIC; -void _PG_init(void); +void _PG_init(void); extern void parquet_s3_init(); extern void parquet_s3_shutdown(); /* FDW routines */ extern void parquetGetForeignRelSize(PlannerInfo *root, - RelOptInfo *baserel, - Oid foreigntableid); + RelOptInfo *baserel, + Oid foreigntableid); extern void parquetGetForeignPaths(PlannerInfo *root, - RelOptInfo *baserel, - Oid foreigntableid); + RelOptInfo *baserel, + Oid foreigntableid); extern ForeignScan *parquetGetForeignPlan(PlannerInfo *root, - RelOptInfo *baserel, - Oid foreigntableid, - ForeignPath *best_path, - List *tlist, - List *scan_clauses, - Plan *outer_plan); + RelOptInfo *baserel, + Oid foreigntableid, + ForeignPath *best_path, + List *tlist, + List *scan_clauses, + Plan *outer_plan); extern TupleTableSlot *parquetIterateForeignScan(ForeignScanState *node); extern void parquetBeginForeignScan(ForeignScanState *node, int eflags); extern void parquetEndForeignScan(ForeignScanState *node); extern void parquetReScanForeignScan(ForeignScanState *node); -extern int parquetAcquireSampleRowsFunc(Relation relation, int elevel, - HeapTuple *rows, int targrows, - double *totalrows, - double *totaldeadrows); -extern bool parquetAnalyzeForeignTable (Relation relation, - AcquireSampleRowsFunc *func, - BlockNumber *totalpages); +extern int parquetAcquireSampleRowsFunc(Relation relation, int elevel, + HeapTuple *rows, int targrows, + double *totalrows, + double *totaldeadrows); +extern bool parquetAnalyzeForeignTable(Relation relation, + AcquireSampleRowsFunc *func, + BlockNumber *totalpages); extern void parquetExplainForeignScan(ForeignScanState *node, ExplainState *es); extern bool parquetIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel, - RangeTblEntry *rte); + RangeTblEntry *rte); extern Size parquetEstimateDSMForeignScan(ForeignScanState *node, - ParallelContext *pcxt); + ParallelContext *pcxt); extern void parquetInitializeDSMForeignScan(ForeignScanState *node, - ParallelContext *pcxt, - void *coordinate); + ParallelContext *pcxt, + void *coordinate); extern void parquetReInitializeDSMForeignScan(ForeignScanState *node, - ParallelContext *pcxt, - void *coordinate); + ParallelContext *pcxt, + void *coordinate); extern void parquetInitializeWorkerForeignScan(ForeignScanState *node, - shm_toc *toc, - void *coordinate); + shm_toc *toc, + void *coordinate); extern void parquetShutdownForeignScan(ForeignScanState *node); extern List *parquetImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid); extern Datum parquet_fdw_validator_impl(PG_FUNCTION_ARGS); @@ -83,19 +83,19 @@ void _PG_init(void) { DefineCustomBoolVariable("parquet_s3_fdw.use_threads", - "Enables use_thread option", - NULL, - &parquet_fdw_use_threads, - true, - PGC_USERSET, - 0, - NULL, - NULL, - NULL); - - parquet_s3_init(); - - on_proc_exit(&parquet_s3_shutdown, PointerGetDatum(NULL)); + "Enables use_thread option", + NULL, + &parquet_fdw_use_threads, + true, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); + + parquet_s3_init(); + + on_proc_exit(&parquet_s3_shutdown, PointerGetDatum(NULL)); } PG_FUNCTION_INFO_V1(parquet_s3_fdw_validator); @@ -104,13 +104,13 @@ PG_FUNCTION_INFO_V1(parquet_s3_fdw_version); Datum parquet_s3_fdw_version(PG_FUNCTION_ARGS) { - PG_RETURN_INT32(CODE_VERSION); + PG_RETURN_INT32(CODE_VERSION); } Datum parquet_s3_fdw_validator(PG_FUNCTION_ARGS) { - return parquet_fdw_validator_impl(fcinfo); + return parquet_fdw_validator_impl(fcinfo); } PG_FUNCTION_INFO_V1(parquet_s3_fdw_handler); @@ -118,25 +118,24 @@ PG_FUNCTION_INFO_V1(parquet_s3_fdw_handler); Datum parquet_s3_fdw_handler(PG_FUNCTION_ARGS) { - FdwRoutine *fdwroutine = makeNode(FdwRoutine); - - fdwroutine->GetForeignRelSize = parquetGetForeignRelSize; - fdwroutine->GetForeignPaths = parquetGetForeignPaths; - fdwroutine->GetForeignPlan = parquetGetForeignPlan; - fdwroutine->BeginForeignScan = parquetBeginForeignScan; - fdwroutine->IterateForeignScan = parquetIterateForeignScan; - fdwroutine->ReScanForeignScan = parquetReScanForeignScan; - fdwroutine->EndForeignScan = parquetEndForeignScan; - fdwroutine->AnalyzeForeignTable = parquetAnalyzeForeignTable; - fdwroutine->ExplainForeignScan = parquetExplainForeignScan; - fdwroutine->IsForeignScanParallelSafe = parquetIsForeignScanParallelSafe; - fdwroutine->EstimateDSMForeignScan = parquetEstimateDSMForeignScan; - fdwroutine->InitializeDSMForeignScan = parquetInitializeDSMForeignScan; - fdwroutine->ReInitializeDSMForeignScan = parquetReInitializeDSMForeignScan; - fdwroutine->InitializeWorkerForeignScan = parquetInitializeWorkerForeignScan; - fdwroutine->ShutdownForeignScan = parquetShutdownForeignScan; - fdwroutine->ImportForeignSchema = parquetImportForeignSchema; - - PG_RETURN_POINTER(fdwroutine); + FdwRoutine *fdwroutine = makeNode(FdwRoutine); + + fdwroutine->GetForeignRelSize = parquetGetForeignRelSize; + fdwroutine->GetForeignPaths = parquetGetForeignPaths; + fdwroutine->GetForeignPlan = parquetGetForeignPlan; + fdwroutine->BeginForeignScan = parquetBeginForeignScan; + fdwroutine->IterateForeignScan = parquetIterateForeignScan; + fdwroutine->ReScanForeignScan = parquetReScanForeignScan; + fdwroutine->EndForeignScan = parquetEndForeignScan; + fdwroutine->AnalyzeForeignTable = parquetAnalyzeForeignTable; + fdwroutine->ExplainForeignScan = parquetExplainForeignScan; + fdwroutine->IsForeignScanParallelSafe = parquetIsForeignScanParallelSafe; + fdwroutine->EstimateDSMForeignScan = parquetEstimateDSMForeignScan; + fdwroutine->InitializeDSMForeignScan = parquetInitializeDSMForeignScan; + fdwroutine->ReInitializeDSMForeignScan = parquetReInitializeDSMForeignScan; + fdwroutine->InitializeWorkerForeignScan = parquetInitializeWorkerForeignScan; + fdwroutine->ShutdownForeignScan = parquetShutdownForeignScan; + fdwroutine->ImportForeignSchema = parquetImportForeignSchema; + + PG_RETURN_POINTER(fdwroutine); } - diff --git a/src/parquet_impl.cpp b/src/parquet_impl.cpp index ae7187f..6f14531 100644 --- a/src/parquet_impl.cpp +++ b/src/parquet_impl.cpp @@ -1317,7 +1317,7 @@ parquetGetForeignPlan(PlannerInfo * /* root */, if (fdw_private->s3client) { if(fdw_private->dirname == NULL) - params = lappend(params, makeString("")); + params = lappend(params, makeString((char *) "")); else params = lappend(params, makeString(fdw_private->dirname)); params = lappend(params, makeInteger(foreigntableid)); @@ -1341,7 +1341,7 @@ parquetGetForeignPlan(PlannerInfo * /* root */, extern "C" void parquetBeginForeignScan(ForeignScanState *node, int /* eflags */) { - ParquetS3FdwExecutionState *festate; + ParquetS3FdwExecutionState *festate = NULL; MemoryContextCallback *callback; MemoryContext reader_cxt; ForeignScan *plan = (ForeignScan *) node->ss.ps.plan;