diff --git a/src/query/catalog/src/plan/datasource/datasource_info/parquet.rs b/src/query/catalog/src/plan/datasource/datasource_info/parquet.rs index fa41bb025eab..407d26ce6b54 100644 --- a/src/query/catalog/src/plan/datasource/datasource_info/parquet.rs +++ b/src/query/catalog/src/plan/datasource/datasource_info/parquet.rs @@ -71,12 +71,11 @@ pub struct ParquetTableInfo { pub files_to_read: Option>, pub schema_from: String, pub compression_ratio: f64, + pub leaf_fields: Arc>, // These fields are only used in coordinator node of the cluster, // so we don't need to serialize them. #[serde(skip)] - pub leaf_fields: Arc>, - #[serde(skip)] pub parquet_metas: Arc>>>, #[serde(skip)] pub need_stats_provider: bool, diff --git a/src/query/storages/stage/src/append/parquet_file/writer_processor.rs b/src/query/storages/stage/src/append/parquet_file/writer_processor.rs index ff8aad1d6fdd..40101d668edc 100644 --- a/src/query/storages/stage/src/append/parquet_file/writer_processor.rs +++ b/src/query/storages/stage/src/append/parquet_file/writer_processor.rs @@ -81,7 +81,7 @@ fn create_writer( .set_max_row_group_size(MAX_ROW_GROUP_SIZE) .set_encoding(Encoding::PLAIN) .set_dictionary_enabled(false) - .set_statistics_enabled(EnabledStatistics::None) + .set_statistics_enabled(EnabledStatistics::Chunk) .set_bloom_filter_enabled(false) .set_created_by(format!("Databend {}", *DATABEND_SEMVER)) .build(); diff --git a/tests/compat_fuse/compat-stateless/01_flashback/01_0001_across_versions.sh b/tests/compat_fuse/compat-stateless/01_flashback/01_0001_across_versions.sh index 7bba674328e7..aac3113a8974 100755 --- a/tests/compat_fuse/compat-stateless/01_flashback/01_0001_across_versions.sh +++ b/tests/compat_fuse/compat-stateless/01_flashback/01_0001_across_versions.sh @@ -20,8 +20,8 @@ echo "checking that 3 snapshots exist" echo "select count(*) from fuse_snapshot('default', 'fuse_test_flashback')" | $BENDSQL_CLIENT_CONNECT # write down 2 new rows using current version -echo "insert into fuse_test_flashback values (4)" | $BENDSQL_CLIENT_CONNECT -echo "insert into fuse_test_flashback values (5)" | $BENDSQL_CLIENT_CONNECT +echo "insert into fuse_test_flashback values (4)" | $BENDSQL_CLIENT_OUTPUT_NULL +echo "insert into fuse_test_flashback values (5)" | $BENDSQL_CLIENT_OUTPUT_NULL # the table now contains five rows {1,2,3,4,5}, and 5 snapshots: # s1 {1}, s2 {1,2}, s3 {1,2,3}, s4 {1,2,3,4}, s5 {1,2,3,4,5} @@ -121,7 +121,7 @@ echo "suite: mixed versioned segment compaction test" # creation of s5: #--------------- # insert another row, which will produce a new snapshot s5 {1,2,3,4}, of version 3 -echo "insert into t2 values (4)" | $BENDSQL_CLIENT_CONNECT +echo "insert into t2 values (4)" | $BENDSQL_CLIENT_OUTPUT_NULL # s5 now contains 3 segments, 2 of version 2, and 1 of version 3 # - v2 segment_1: {1,2}, v2 segment_2: {3}, v3 segment_3: {4} diff --git a/tests/compat_fuse/compat-stateless/02_flashback_v3_to_v4/01_0001_across_versions.sh b/tests/compat_fuse/compat-stateless/02_flashback_v3_to_v4/01_0001_across_versions.sh index d48d4eaae44a..2ab4b9411032 100755 --- a/tests/compat_fuse/compat-stateless/02_flashback_v3_to_v4/01_0001_across_versions.sh +++ b/tests/compat_fuse/compat-stateless/02_flashback_v3_to_v4/01_0001_across_versions.sh @@ -20,8 +20,8 @@ echo "checking that 3 snapshots exist" echo "select count(*) from fuse_snapshot('default', 'fuse_test_flashback')" | $BENDSQL_CLIENT_CONNECT # write down 2 new rows using current version -echo "insert into fuse_test_flashback values (4)" | $BENDSQL_CLIENT_CONNECT -echo "insert into fuse_test_flashback values (5)" | $BENDSQL_CLIENT_CONNECT +echo "insert into fuse_test_flashback values (4)" | $BENDSQL_CLIENT_OUTPUT_NULL +echo "insert into fuse_test_flashback values (5)" | $BENDSQL_CLIENT_OUTPUT_NULL # the table now contains five rows {1,2,3,4,5}, and 5 snapshots: # s1 {1}, s2 {1,2}, s3 {1,2,3}, s4 {1,2,3,4}, s5 {1,2,3,4,5} @@ -121,7 +121,7 @@ echo "suite: mixed versioned segment compaction test" # creation of s5: #--------------- # insert another row, which will produce a new snapshot s5 {1,2,3,4}, of version 3 -echo "insert into t2 values (4)" | $BENDSQL_CLIENT_CONNECT +echo "insert into t2 values (4)" | $BENDSQL_CLIENT_OUTPUT_NULL # s5 now contains 3 segments, 2 of version 3, and 1 of version 4 # - v2 segment_1: {1,2}, v2 segment_2: {3}, v3 segment_3: {4} diff --git a/tests/databend-test b/tests/databend-test index 32971a03de81..b4b5dc41688b 100755 --- a/tests/databend-test +++ b/tests/databend-test @@ -353,7 +353,7 @@ def run_tests_array(all_tests_with_params): result_is_different = subprocess.call( ['diff', '-q', result_file, stdout_file], stdout=PIPE) - if result_is_different: + if not args.complete and result_is_different: diff = Popen( [ 'diff', '-U', @@ -369,6 +369,15 @@ def run_tests_array(all_tests_with_params): status += " - result differs with:\n{}\n"\ .format(diff) else: + if args.complete: + o = Popen( + [ + 'cp', + stdout_file, + result_file + ], + stdout=PIPE, + universal_newlines=True).communicate()[0] passed_total += 1 failures_chain = 0 status += MSG_OK @@ -694,6 +703,10 @@ if __name__ == '__main__': parser.add_argument('--run-dir', nargs='+', help="Only run these tests in the dir") + parser.add_argument('--complete', + action='store_true', + default=False, + help="complete results") parser.add_argument('--stop', action='store_true', default=None, diff --git a/tests/shell_env.sh b/tests/shell_env.sh index faabb8cc4d83..0a979138231c 100755 --- a/tests/shell_env.sh +++ b/tests/shell_env.sh @@ -14,6 +14,8 @@ export QUERY_CLICKHOUSE_HTTP_HANDLER_PORT=${QUERY_CLICKHOUSE_HTTP_HANDLER_PORT:= export BENDSQL_CLIENT_CONNECT="bendsql -uroot --host ${QUERY_MYSQL_HANDLER_HOST} --port ${QUERY_HTTP_HANDLER_PORT} --quote-style=never" +export BENDSQL_CLIENT_OUTPUT_NULL="bendsql -uroot --host ${QUERY_MYSQL_HANDLER_HOST} --port ${QUERY_HTTP_HANDLER_PORT} --quote-style=never --output null" + # share client export QUERY_MYSQL_HANDLER_SHARE_PROVIDER_PORT="18000" diff --git a/tests/sqllogictests/suites/stage/formats/parquet/options/null_if.test b/tests/sqllogictests/suites/stage/formats/parquet/options/null_if.test index 65ab8a82a226..fee31e3da19b 100644 --- a/tests/sqllogictests/suites/stage/formats/parquet/options/null_if.test +++ b/tests/sqllogictests/suites/stage/formats/parquet/options/null_if.test @@ -26,7 +26,7 @@ remove @data/unload/parquet/null_if/ query copy into @data/unload/parquet/null_if from string ---- -3 56 365 +3 56 387 statement ok drop file format if exists parquet_null_if diff --git a/tests/sqllogictests/suites/stage/formats/parquet/options/parquet_missing_uuid.test b/tests/sqllogictests/suites/stage/formats/parquet/options/parquet_missing_uuid.test index 6d5759dea221..f7079fb7435f 100644 --- a/tests/sqllogictests/suites/stage/formats/parquet/options/parquet_missing_uuid.test +++ b/tests/sqllogictests/suites/stage/formats/parquet/options/parquet_missing_uuid.test @@ -10,7 +10,7 @@ remove @data/parquet/unload/uuid query copy into @data/parquet/unload/uuid/ from (select 1 as a) file_format = (type = parquet) ---- -1 1 356 +1 1 374 query error column id doesn't exist copy into t_uuid from @data/parquet/unload/uuid file_format = (type = parquet) RETURN_FAILED_ONLY=TRUE @@ -22,7 +22,7 @@ select * from t_uuid query copy into @data/parquet/unload/uuid/ from (select 1 as a) file_format = (type = parquet) ---- -1 1 356 +1 1 374 statement ok truncate table t_uuid diff --git a/tests/suites/0_stateless/03_dml/03_0016_update_with_lock.sh b/tests/suites/0_stateless/03_dml/03_0016_update_with_lock.sh index eb6cc61e72c3..2158ec97ab19 100755 --- a/tests/suites/0_stateless/03_dml/03_0016_update_with_lock.sh +++ b/tests/suites/0_stateless/03_dml/03_0016_update_with_lock.sh @@ -12,7 +12,7 @@ echo "set global enable_table_lock = 1" | $BENDSQL_CLIENT_CONNECT for i in $(seq 1 10);do ( j=$(($i+1)) - echo "insert into test_update.t values($i, $j)" | $BENDSQL_CLIENT_CONNECT + echo "insert into test_update.t values($i, $j)" | $BENDSQL_CLIENT_OUTPUT_NULL )& done wait @@ -23,7 +23,7 @@ echo "select count() from test_update.t where a + 1 = b" | $BENDSQL_CLIENT_CONNE echo "Test table lock for update" for i in $(seq 1 10);do ( - echo "update test_update.t set b = $i where a = $i" | $BENDSQL_CLIENT_CONNECT + echo "update test_update.t set b = $i where a = $i" | $BENDSQL_CLIENT_OUTPUT_NULL )& done wait diff --git a/tests/suites/0_stateless/05_hints/05_0001_set_var.result b/tests/suites/0_stateless/05_hints/05_0001_set_var.result index 3be3d021e378..45cbaeaebadf 100644 --- a/tests/suites/0_stateless/05_hints/05_0001_set_var.result +++ b/tests/suites/0_stateless/05_hints/05_0001_set_var.result @@ -20,7 +20,8 @@ storage_read_buffer_size 1048576 4 America/Toronto America/Toronto +1 2022-02-02 03:00:00 2022-02-02 03:00:00 -1 13 387 +1 13 427 Asia/Shanghai diff --git a/tests/suites/0_stateless/05_hints/05_0001_set_var.sh b/tests/suites/0_stateless/05_hints/05_0001_set_var.sh index 83595c491f1e..3b75b9a248da 100755 --- a/tests/suites/0_stateless/05_hints/05_0001_set_var.sh +++ b/tests/suites/0_stateless/05_hints/05_0001_set_var.sh @@ -18,23 +18,21 @@ echo "select /*+ SET_VAR(storage_read_buffer_size=200) SET_VAR(timezone=x) */ na echo "drop database if exists set_var;" | $BENDSQL_CLIENT_CONNECT echo "create database set_var;" | $BENDSQL_CLIENT_CONNECT echo "create table set_var.test(id int);" | $BENDSQL_CLIENT_CONNECT -echo "insert /*+SET_VAR(timezone='Asia/Shanghai') SET_VAR(storage_read_buffer_size=200)*/ into set_var.test values(1)" | $BENDSQL_CLIENT_CONNECT -echo "insert /*+SET_VAR(timezone='Asia/Shanghai') (storage_read_buffer_size=200)*/ into set_var.test values(3)" | $BENDSQL_CLIENT_CONNECT +echo "insert /*+SET_VAR(timezone='Asia/Shanghai') SET_VAR(storage_read_buffer_size=200)*/ into set_var.test values(1)" | $BENDSQL_CLIENT_OUTPUT_NULL +echo "insert /*+SET_VAR(timezone='Asia/Shanghai') (storage_read_buffer_size=200)*/ into set_var.test values(3)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "select /*+SET_VAR(timezone='Asia/Shanghai') SET_VAR(storage_read_buffer_size=200)*/ * from set_var.test order by id" | $BENDSQL_CLIENT_CONNECT echo "select /*+SET_VAR(timezone='Asia/Shanghai') (storage_read_buffer_size=200)*/ id from set_var.test order by id" | $BENDSQL_CLIENT_CONNECT -echo "update /*+SET_VAR(timezone='Asia/Shanghai') SET_VAR(storage_read_buffer_size=200)*/ set_var.test set id=2 where id=1" | $BENDSQL_CLIENT_CONNECT -echo "update /*+SET_VAR(timezone='Asia/Shanghai') (storage_read_buffer_size=200)*/ set_var.test set id=4 where id=3" | $BENDSQL_CLIENT_CONNECT +echo "update /*+SET_VAR(timezone='Asia/Shanghai') SET_VAR(storage_read_buffer_size=200)*/ set_var.test set id=2 where id=1" | $BENDSQL_CLIENT_OUTPUT_NULL +echo "update /*+SET_VAR(timezone='Asia/Shanghai') (storage_read_buffer_size=200)*/ set_var.test set id=4 where id=3" | $BENDSQL_CLIENT_OUTPUT_NULL echo "select * from set_var.test order by id" | $BENDSQL_CLIENT_CONNECT -echo "delete /*+SET_VAR(timezone='Asia/Shanghai') SET_VAR(storage_read_buffer_size=200)*/ from set_var.test where id=2" | $BENDSQL_CLIENT_CONNECT -echo "delete /*+SET_VAR(timezone='Asia/Shanghai') (storage_read_buffer_size=200)*/ from set_var.test where id=4" | $BENDSQL_CLIENT_CONNECT +echo "delete /*+SET_VAR(timezone='Asia/Shanghai') SET_VAR(storage_read_buffer_size=200)*/ from set_var.test where id=2" | $BENDSQL_CLIENT_OUTPUT_NULL +echo "delete /*+SET_VAR(timezone='Asia/Shanghai') (storage_read_buffer_size=200)*/ from set_var.test where id=4" | $BENDSQL_CLIENT_OUTPUT_NULL echo "select * from set_var.test" | $BENDSQL_CLIENT_CONNECT - echo "set timezone='America/Toronto'; select /*+SET_VAR(timezone='Asia/Shanghai') */ timezone(); select timezone();" | $BENDSQL_CLIENT_CONNECT echo "create table set_var.t(c1 timestamp)" | $BENDSQL_CLIENT_CONNECT # Toronto and Shanghai time diff is 13 hours. echo "set timezone='America/Toronto'; insert /*+SET_VAR(timezone='Asia/Shanghai') */ into set_var.t values('2022-02-02 03:00:00'); select /*+SET_VAR(timezone='Asia/Shanghai') */ * from set_var.t; select * from set_var.t;" | $BENDSQL_CLIENT_CONNECT echo "drop database set_var;" | $BENDSQL_CLIENT_CONNECT - echo "drop stage if exists s2" | $BENDSQL_CLIENT_CONNECT echo "create stage s2" | $BENDSQL_CLIENT_CONNECT echo "copy /*+SET_VAR(timezone='Asia/Shanghai') */ into @s2 from (select timezone()); " | $BENDSQL_CLIENT_CONNECT diff --git a/tests/suites/0_stateless/05_hints/05_0002_deduplicate_label.sh b/tests/suites/0_stateless/05_hints/05_0002_deduplicate_label.sh index 49294b6de2e5..495540495883 100755 --- a/tests/suites/0_stateless/05_hints/05_0002_deduplicate_label.sh +++ b/tests/suites/0_stateless/05_hints/05_0002_deduplicate_label.sh @@ -12,8 +12,8 @@ echo "drop stage if exists s5_1;" | $BENDSQL_CLIENT_CONNECT echo "CREATE TABLE t5(a Int, b bool) Engine = Fuse;" | $BENDSQL_CLIENT_CONNECT -echo "INSERT /*+ SET_VAR(deduplicate_label='insert-test') */ INTO t5 (a, b) VALUES(1, false)" | $BENDSQL_CLIENT_CONNECT -echo "INSERT /*+ SET_VAR(deduplicate_label='insert-test') */ INTO t5 (a, b) VALUES(1, false)" | $BENDSQL_CLIENT_CONNECT +echo "INSERT /*+ SET_VAR(deduplicate_label='insert-test') */ INTO t5 (a, b) VALUES(1, false)" | $BENDSQL_CLIENT_OUTPUT_NULL +echo "INSERT /*+ SET_VAR(deduplicate_label='insert-test') */ INTO t5 (a, b) VALUES(1, false)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "select * from t5" | $BENDSQL_CLIENT_CONNECT echo "CREATE STAGE s5_1;" | $BENDSQL_CLIENT_CONNECT @@ -23,12 +23,12 @@ echo "CREATE STAGE s5;" | $MYSQL_CLINEENRT_CONNECT echo "copy /*+SET_VAR(deduplicate_label='copy-test')*/ into @s5 from (select * from t5);" | $MYSQL_CLINEENRT_CONNECT echo "select * from @s5;" | $MYSQL_CLINEENRT_CONNECT -echo "UPDATE /*+ SET_VAR(deduplicate_label='update-test') */ t5 SET a = 20 WHERE b = false;" | $BENDSQL_CLIENT_CONNECT -echo "UPDATE /*+ SET_VAR(deduplicate_label='update-test') */ t5 SET a = 30 WHERE b = false;" | $BENDSQL_CLIENT_CONNECT +echo "UPDATE /*+ SET_VAR(deduplicate_label='update-test') */ t5 SET a = 20 WHERE b = false;" | $BENDSQL_CLIENT_OUTPUT_NULL +echo "UPDATE /*+ SET_VAR(deduplicate_label='update-test') */ t5 SET a = 30 WHERE b = false;" | $BENDSQL_CLIENT_OUTPUT_NULL echo "select * from t5" | $BENDSQL_CLIENT_CONNECT -echo "replace /*+ SET_VAR(deduplicate_label='replace-test') */ into t5 on(a,b) values(40,false);" | $BENDSQL_CLIENT_CONNECT -echo "replace /*+ SET_VAR(deduplicate_label='replace-test') */ into t5 on(a,b) values(50,false);" | $BENDSQL_CLIENT_CONNECT +echo "replace /*+ SET_VAR(deduplicate_label='replace-test') */ into t5 on(a,b) values(40,false);" | $BENDSQL_CLIENT_OUTPUT_NULL +echo "replace /*+ SET_VAR(deduplicate_label='replace-test') */ into t5 on(a,b) values(50,false);" | $BENDSQL_CLIENT_OUTPUT_NULL echo "select * from t5 order by a" | $BENDSQL_CLIENT_CONNECT echo "drop table if exists t5;" | $BENDSQL_CLIENT_CONNECT diff --git a/tests/suites/0_stateless/12_time_travel/12_0004_time_travel_select_at.sh b/tests/suites/0_stateless/12_time_travel/12_0004_time_travel_select_at.sh index 6dca24a9520f..5974bf693144 100755 --- a/tests/suites/0_stateless/12_time_travel/12_0004_time_travel_select_at.sh +++ b/tests/suites/0_stateless/12_time_travel/12_0004_time_travel_select_at.sh @@ -7,12 +7,12 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ## Create table t12_0004 echo "create table t12_0004(c int)" | $BENDSQL_CLIENT_CONNECT echo "two insertions" -echo "insert into t12_0004 values(1),(2)" | $BENDSQL_CLIENT_CONNECT +echo "insert into t12_0004 values(1),(2)" | $BENDSQL_CLIENT_OUTPUT_NULL # for at offset. sleep 2 -echo "insert into t12_0004 values(3)" | $BENDSQL_CLIENT_CONNECT +echo "insert into t12_0004 values(3)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "latest snapshot should contain 3 rows" echo "select count(*) from t12_0004" | $BENDSQL_CLIENT_CONNECT diff --git a/tests/suites/0_stateless/12_time_travel/12_0005_changes_select.sh b/tests/suites/0_stateless/12_time_travel/12_0005_changes_select.sh index baaa89da7c8a..b5607b603da2 100755 --- a/tests/suites/0_stateless/12_time_travel/12_0005_changes_select.sh +++ b/tests/suites/0_stateless/12_time_travel/12_0005_changes_select.sh @@ -4,13 +4,13 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../../../shell_env.sh -## Create table t12_0005 -echo "create table t12_0005(a int, b int) change_tracking=true" | $BENDSQL_CLIENT_CONNECT -echo "insert into t12_0005 values(1, 1),(2, 1)" | $BENDSQL_CLIENT_CONNECT +## Create or replace table t12_0005 +echo "create or replace table t12_0005(a int, b int) change_tracking=true" | $BENDSQL_CLIENT_CONNECT +echo "insert into t12_0005 values(1, 1),(2, 1)" | $BENDSQL_CLIENT_OUTPUT_NULL -echo "update t12_0005 set b = 2 where a = 2" | $BENDSQL_CLIENT_CONNECT -echo "delete from t12_0005 where a = 1" | $BENDSQL_CLIENT_CONNECT -echo "insert into t12_0005 values(3, 3)" | $BENDSQL_CLIENT_CONNECT +echo "update t12_0005 set b = 2 where a = 2" | $BENDSQL_CLIENT_OUTPUT_NULL +echo "delete from t12_0005 where a = 1" | $BENDSQL_CLIENT_OUTPUT_NULL +echo "insert into t12_0005 values(3, 3)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "latest snapshot should contain 2 rows" echo "select count(*) from t12_0005" | $BENDSQL_CLIENT_CONNECT diff --git a/tests/suites/0_stateless/16_flashback/16_0001_flashback.sh b/tests/suites/0_stateless/16_flashback/16_0001_flashback.sh index 12365a655bf4..1df02d0c7801 100755 --- a/tests/suites/0_stateless/16_flashback/16_0001_flashback.sh +++ b/tests/suites/0_stateless/16_flashback/16_0001_flashback.sh @@ -6,10 +6,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo "create table t16(c int not null)" | $BENDSQL_CLIENT_CONNECT # the first snapshot contains 2 rows -echo "insert into t16 values(1),(2)" | $BENDSQL_CLIENT_CONNECT +echo "insert into t16 values(1),(2)" | $BENDSQL_CLIENT_OUTPUT_NULL # the second(last) snapshot should contain 3 rows -echo "insert into t16 values(3)" | $BENDSQL_CLIENT_CONNECT +echo "insert into t16 values(3)" | $BENDSQL_CLIENT_OUTPUT_NULL # flash back to the second(last) snapshot should be ok, and have no effects SNAPSHOT_ID=$(echo "select snapshot_id from fuse_snapshot('default','t16') where row_count=3" | $BENDSQL_CLIENT_CONNECT) diff --git a/tests/suites/0_stateless/17_altertable/17_0001_time_travel_alter_add_drop_column_select_at.result b/tests/suites/0_stateless/17_altertable/17_0001_time_travel_alter_add_drop_column_select_at.result index 43b5eb68fc7b..f29ce171c749 100644 --- a/tests/suites/0_stateless/17_altertable/17_0001_time_travel_alter_add_drop_column_select_at.result +++ b/tests/suites/0_stateless/17_altertable/17_0001_time_travel_alter_add_drop_column_select_at.result @@ -1,4 +1,6 @@ two insertions +2 +1 latest snapshot should contain 3 rows 3 alter table add a column diff --git a/tests/suites/0_stateless/17_altertable/17_0002_alter_table_purge_before.result b/tests/suites/0_stateless/17_altertable/17_0002_alter_table_purge_before.result index a93b26018936..ebd3f748ca94 100644 --- a/tests/suites/0_stateless/17_altertable/17_0002_alter_table_purge_before.result +++ b/tests/suites/0_stateless/17_altertable/17_0002_alter_table_purge_before.result @@ -1,3 +1,6 @@ +2 +1 +1 checking that there should are 3 snapshots before purge true alter table add a column @@ -10,6 +13,10 @@ checking that after purge (by snapshot id) there should be 4 snapshots left true checking that after purge (by snapshot id) there should be 4 rows left true +2 +1 +1 +1 checking that there should are 4 snapshots before purge true alter table add a column diff --git a/tests/suites/0_stateless/17_altertable/17_0003_alter_table_update.result b/tests/suites/0_stateless/17_altertable/17_0003_alter_table_update.result index 5d1033867122..6359aa85295b 100644 --- a/tests/suites/0_stateless/17_altertable/17_0003_alter_table_update.result +++ b/tests/suites/0_stateless/17_altertable/17_0003_alter_table_update.result @@ -1,8 +1,16 @@ +1 alter table add a column +1 update table column +1 alter table drop a column update table column +0 +1 alter table add a column +1 update table column +1 alter table drop a column update table column +0 diff --git a/tests/suites/0_stateless/17_altertable/17_0004_alter_table_set_options.result b/tests/suites/0_stateless/17_altertable/17_0004_alter_table_set_options.result index 5285368b2ce8..25230790a0eb 100644 --- a/tests/suites/0_stateless/17_altertable/17_0004_alter_table_set_options.result +++ b/tests/suites/0_stateless/17_altertable/17_0004_alter_table_set_options.result @@ -1,3 +1,4 @@ +1 Error: APIError: QueryFailed: [1301]table option snapshot_location is invalid for alter table statement Error: APIError: QueryFailed: [1301]table option snapshot_loc is invalid for alter table statement Error: APIError: QueryFailed: [1301]table option extrenal_location is invalid for alter table statement diff --git a/tests/suites/0_stateless/17_altertable/17_0004_alter_table_set_options.sh b/tests/suites/0_stateless/17_altertable/17_0004_alter_table_set_options.sh index 8661cd32eee7..898defacbf06 100755 --- a/tests/suites/0_stateless/17_altertable/17_0004_alter_table_set_options.sh +++ b/tests/suites/0_stateless/17_altertable/17_0004_alter_table_set_options.sh @@ -3,13 +3,13 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../../../shell_env.sh -echo "create table t(a int not null)" | $BENDSQL_CLIENT_CONNECT +echo "create or replace table t(a int not null)" | $BENDSQL_CLIENT_CONNECT echo "insert into t values(1)" | $BENDSQL_CLIENT_CONNECT # get snapshot location SNAPSHOT_LOCATION=$(echo "select _snapshot_name from t;" | $BENDSQL_CLIENT_CONNECT) -echo "create table t2(a int not null)" | $BENDSQL_CLIENT_CONNECT +echo "create or replace table t2(a int not null)" | $BENDSQL_CLIENT_CONNECT echo "alter table t2 set options(snapshot_location = '$SNAPSHOT_LOCATION',block_per_segment = 500)" | $BENDSQL_CLIENT_CONNECT echo "alter table t2 set options(snapshot_loc = '$SNAPSHOT_LOCATION',block_per_segment = 500)" | $BENDSQL_CLIENT_CONNECT echo "alter table t2 set options(extrenal_location = '$SNAPSHOT_LOCATION',block_per_segment = 500)" | $BENDSQL_CLIENT_CONNECT @@ -22,7 +22,7 @@ echo "alter table t2 set options(storage_format = 'memory')" | $BENDSQL_CLIENT_C echo "alter table t2 set options(bloom_index_columns = 'b')" | $BENDSQL_CLIENT_CONNECT # valid bloom index column data type. -echo "create table t3(a decimal(4,2) not null)" | $BENDSQL_CLIENT_CONNECT +echo "create or replace table t3(a decimal(4,2) not null)" | $BENDSQL_CLIENT_CONNECT echo "alter table t3 set options(bloom_index_columns = 'a')" | $BENDSQL_CLIENT_CONNECT #drop table diff --git a/tests/suites/0_stateless/17_altertable/17_0005_alter_table_modify_column_type.result b/tests/suites/0_stateless/17_altertable/17_0005_alter_table_modify_column_type.result index 139dc0df5d10..dcb833a3e94c 100644 --- a/tests/suites/0_stateless/17_altertable/17_0005_alter_table_modify_column_type.result +++ b/tests/suites/0_stateless/17_altertable/17_0005_alter_table_modify_column_type.result @@ -1,3 +1,4 @@ +1 1 2 3 a VARCHAR NO '' b INT NO 0 @@ -6,27 +7,36 @@ c INT NO 0 a FLOAT NO 0 b VARCHAR NO '' c INT NO 0 +1 Error: APIError: QueryFailed: [1006]fail to auto cast column a (String) to column a (Float32) invalid float literal while evaluating function `to_float32('a')` in expr `to_float32(a)` Error: APIError: QueryFailed: [1058]Cannot find column b Error: APIError: QueryFailed: [1006]null value in column `a` of table `c` violates not-null constraint +1 0 1 Error: APIError: QueryFailed: [1006]invalid float literal while evaluating function `to_float32('a')` in expr `to_float32('a')` 0 1 +1 0 1 1.2 2 +1 1 10 1 10 1 10 1.01 +1 1 10 1.01 10 2 2.2 begin test default column +1 1 1 +1 1 1 not 2 begin test not NULL column +1 1 1 Error: APIError: QueryFailed: [1006]null value in column `a` of table `f` violates not-null constraint +1 1 1 2 a VARCHAR NO '' diff --git a/tests/suites/0_stateless/18_rbac/18_0001_udf_priv.result b/tests/suites/0_stateless/18_rbac/18_0001_udf_priv.result index 2f5bc74f89c4..34b3b7f21d68 100644 --- a/tests/suites/0_stateless/18_rbac/18_0001_udf_priv.result +++ b/tests/suites/0_stateless/18_rbac/18_0001_udf_priv.result @@ -7,6 +7,7 @@ select 1 from (select f2(f1(10))); Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f1 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. select * from system.one where f2(f1(1)); Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f1 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. +1 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f1 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. 1 NULL Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f1 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. @@ -16,15 +17,19 @@ Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is requ Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f1 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f1 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f1 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. +1 +2 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f1 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f1 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f1 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. +2 === Only Has Privilege on f1 === Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f2 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. 2 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f2 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f2 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f2 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. +1 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f2 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. 1 NULL Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f2 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. @@ -34,25 +39,34 @@ Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is requ Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f2 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f2 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f2 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. +1 +2 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f2 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f2 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF f2 for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. +2 === Has Privilege on f1, f2 === 1 2 1 1 1 +1 +1 2 NULL 1 +1 2 0 0 +0 +2 100 200 100 200 100 200 +2 4 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Usage] is required on UDF b for user 'test-user'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. diff --git a/tests/suites/0_stateless/18_rbac/18_0001_udf_priv.sh b/tests/suites/0_stateless/18_rbac/18_0001_udf_priv.sh index 0722bfcbfcd3..531684fc8e7f 100755 --- a/tests/suites/0_stateless/18_rbac/18_0001_udf_priv.sh +++ b/tests/suites/0_stateless/18_rbac/18_0001_udf_priv.sh @@ -15,8 +15,8 @@ echo "drop table if exists default.t2;" | $BENDSQL_CLIENT_CONNECT echo "CREATE FUNCTION f1 AS (p) -> (p)" | $BENDSQL_CLIENT_CONNECT echo "CREATE FUNCTION f2 AS (p) -> (p)" | $BENDSQL_CLIENT_CONNECT -echo "create table default.t(i UInt8 not null);" | $BENDSQL_CLIENT_CONNECT -echo "create table default.t2(i UInt8 not null);" | $BENDSQL_CLIENT_CONNECT +echo "create or replace table default.t(i UInt8 not null);" | $BENDSQL_CLIENT_CONNECT +echo "create or replace table default.t2(i UInt8 not null);" | $BENDSQL_CLIENT_CONNECT ## create user echo "create user 'test-user' IDENTIFIED BY '$TEST_USER_PASSWORD'" | $BENDSQL_CLIENT_CONNECT diff --git a/tests/suites/0_stateless/18_rbac/18_0002_ownership_cover.result b/tests/suites/0_stateless/18_rbac/18_0002_ownership_cover.result index ed06eb7e20a2..bf32053ef5ed 100644 --- a/tests/suites/0_stateless/18_rbac/18_0002_ownership_cover.result +++ b/tests/suites/0_stateless/18_rbac/18_0002_ownership_cover.result @@ -1,7 +1,8 @@ === test db/table === +1 200 === test stage === -1 8 374 +1 8 400 0 === test udf === 2 @@ -54,6 +55,8 @@ Error: APIError: QueryFailed: [1063]Permission denied: User 'u1'@'%' does not ha t1 t2 1 +1 +1 2 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Select] is required on 'default'.'db1'.'t1' for user 'u2'@'%' with roles [public,role2] 2 diff --git a/tests/suites/0_stateless/18_rbac/18_0003_db_visibility.result b/tests/suites/0_stateless/18_rbac/18_0003_db_visibility.result index bf74c79bf228..942844017e8f 100644 --- a/tests/suites/0_stateless/18_rbac/18_0003_db_visibility.result +++ b/tests/suites/0_stateless/18_rbac/18_0003_db_visibility.result @@ -1,3 +1,4 @@ +1 === test u1 with role1 === information_schema system @@ -6,6 +7,7 @@ db1 true system information_schema 1 +1 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Select] is required on 'default'.'db_root'.'t1' for user 'u1'@'%' with roles [public,role1] db1 information_schema @@ -14,6 +16,7 @@ system db1 information_schema system +1 db1 db2 information_schema @@ -24,6 +27,7 @@ system === test u3 with role2 === information_schema system +1 db_u3 information_schema system diff --git a/tests/suites/0_stateless/18_rbac/18_0004_view_privilege.result b/tests/suites/0_stateless/18_rbac/18_0004_view_privilege.result index 3cd3028747ba..e40ab99fb02c 100644 --- a/tests/suites/0_stateless/18_rbac/18_0004_view_privilege.result +++ b/tests/suites/0_stateless/18_rbac/18_0004_view_privilege.result @@ -12,6 +12,8 @@ >>>> drop view if exists v_t1 >>>> create table t(id int) >>>> insert into t values(1) +1 +1 >>>> revoke create on default.* from role role1 need failed: with 1063 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Select] is required on 'default'.'default'.'t' for user 'owner'@'%' with roles [public,role1] diff --git a/tests/suites/0_stateless/18_rbac/18_0007_privilege_access.result b/tests/suites/0_stateless/18_rbac/18_0007_privilege_access.result index d5e57fd5e0c9..fd9a369f184f 100644 --- a/tests/suites/0_stateless/18_rbac/18_0007_privilege_access.result +++ b/tests/suites/0_stateless/18_rbac/18_0007_privilege_access.result @@ -13,6 +13,7 @@ information_schema system test -- insert Error: APIError: QueryFailed: [1063]Permission denied: privilege [Insert] is required on 'default'.'default'.'t20_0012' for user 'test-user'@'%' with roles [public] +2 1 2 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Delete] is required on 'default'.'default'.'t20_0012' for user 'test-user'@'%' with roles [public,test-role1,test-role2] @@ -20,13 +21,17 @@ Error: APIError: QueryFailed: [1063]Permission denied: privilege [Delete] is req 2 test -- update Error: APIError: QueryFailed: [1063]Permission denied: privilege [Update] is required on 'default'.'default'.'t20_0012' for user 'test-user'@'%' with roles [public,test-role1,test-role2] +1 2 3 test -- delete Error: APIError: QueryFailed: [1063]Permission denied: privilege [Delete] is required on 'default'.'default'.'t20_0012' for user 'test-user'@'%' with roles [public,test-role1,test-role2] +1 true test -- insert overwrite +1 2 +1 3 test -- optimize table Error: APIError: QueryFailed: [1063]Permission denied: privilege [Super] is required on 'default'.'default'.'t20_0012' for user 'test-user'@'%' with roles [public,test-role1,test-role2] @@ -36,6 +41,8 @@ test -- select 1 1 1 +1 +1 test -- select view Error: APIError: QueryFailed: [1063]Permission denied: privilege [Select] is required on 'default'.'default2'.'v_t20_0012' for user 'test-user'@'%' with roles [public,test-role1,test-role2] 1 @@ -94,13 +101,15 @@ Error: APIError: QueryFailed: [1063]Permission denied: privilege READ is require Error: APIError: QueryFailed: [1063]Permission denied: No privilege on database root_db for user b. Error: APIError: QueryFailed: [1063]Permission denied: No privilege on table root_table for user b. Error: APIError: QueryFailed: [1063]Permission denied: No privilege on table root_table for user b. -1 1 356 +1 1 374 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Select] is required on 'default'.'default'.'t1' for user 'b'@'%' with roles [public] Error: APIError: QueryFailed: [1063]Permission denied: privilege [Read] is required on STAGE s3 for user 'b'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Select] is required on 'default'.'default'.'t' for user 'b'@'%' with roles [public] Error: APIError: QueryFailed: [1063]Permission denied: privilege [Read] is required on STAGE s3 for user 'b'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Read] is required on STAGE s3 for user 'b'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Select] is required on 'default'.'default'.'t1' for user 'b'@'%' with roles [public] +0 +1 a b/data_UUID_0000_00000000.parquet 1 0 NULL NULL === check db/table_id === Read s3 USER b GRANT Read ON STAGE s3 TO 'b'@'%' @@ -111,6 +120,7 @@ SELECT default.default.t1 USER b GRANT SELECT ON 'default'.'default'.'t1' TO 'b SELECT,INSERT default.c.t USER b GRANT SELECT,INSERT ON 'default'.'c'.'t' TO 'b'@'%' OWNERSHIP default.default.t2 USER b GRANT OWNERSHIP ON 'default'.'default'.'t2' TO 'b'@'%' 1 +1 Read s3 USER b GRANT Read ON STAGE s3 TO 'b'@'%' CREATE default USER b GRANT CREATE ON 'default'.'default'.* TO 'b'@'%' SELECT system USER b GRANT SELECT ON 'default'.'system'.* TO 'b'@'%' @@ -119,6 +129,7 @@ SELECT default.default.t1 USER b GRANT SELECT ON 'default'.'default'.'t1' TO 'b SELECT,INSERT default.c.t1 USER b GRANT SELECT,INSERT ON 'default'.'c'.'t1' TO 'b'@'%' OWNERSHIP default.default.t2 USER b GRANT OWNERSHIP ON 'default'.'default'.'t2' TO 'b'@'%' 1 +1 2 Read s3 USER b GRANT Read ON STAGE s3 TO 'b'@'%' CREATE default USER b GRANT CREATE ON 'default'.'default'.* TO 'b'@'%' @@ -128,5 +139,6 @@ SELECT default.default.t1 USER b GRANT SELECT ON 'default'.'default'.'t1' TO 'b SELECT,INSERT default.d.t1 USER b GRANT SELECT,INSERT ON 'default'.'d'.'t1' TO 'b'@'%' OWNERSHIP default.default.t2 USER b GRANT OWNERSHIP ON 'default'.'default'.'t2' TO 'b'@'%' 1 +1 2 3 diff --git a/tests/suites/0_stateless/18_rbac/18_0008_privilege_ownership.result b/tests/suites/0_stateless/18_rbac/18_0008_privilege_ownership.result index 9dd04e588970..4f251bf80796 100644 --- a/tests/suites/0_stateless/18_rbac/18_0008_privilege_ownership.result +++ b/tests/suites/0_stateless/18_rbac/18_0008_privilege_ownership.result @@ -1,3 +1,4 @@ +3 1 2 3 diff --git a/tests/suites/0_stateless/18_rbac/18_0009_set_role.result b/tests/suites/0_stateless/18_rbac/18_0009_set_role.result index 648b29ce7a35..70900519b359 100644 --- a/tests/suites/0_stateless/18_rbac/18_0009_set_role.result +++ b/tests/suites/0_stateless/18_rbac/18_0009_set_role.result @@ -7,14 +7,25 @@ testrole1 Error: APIError: QueryFailed: [2206]Invalid role nonexisting_role for current session, available: public,testrole1,testrole2,testrole3 Error: APIError: QueryFailed: [2206]Invalid role testrole4 for current session, available: public,testrole1,testrole2,testrole3 -- test 3: set role as testrole1, secondary roles as NONE, can access table1, can not access table2 +0 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Insert] is required on 'default'.'default'.'t20_0015_table2' for user 'testuser1'@'%' with roles [testrole1,public] -- test 4: set role as testrole2, secondary roles as NONE, can access table2, can not access table1 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Insert] is required on 'default'.'default'.'t20_0015_table1' for user 'testuser1'@'%' with roles [testrole2,public] +0 -- test 5: set role as testrole3, secondary roles as NONE, can access table2, can not access table1, because role3 inherited from role2 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Insert] is required on 'default'.'default'.'t20_0015_table1' for user 'testuser1'@'%' with roles [testrole3,public,testrole2] +0 -- test 6: set role as testrole1, secondary roles as ALL, can access both table1 and table2 +0 +0 -- test 7: set role as testrole1, testrole2, secondary roles defaults as ALL, can both table1 and table2 +0 +0 +0 +0 -- test 8: not change role, secondary roles defaults as ALL, can both table1 and table2 +0 +0 -- test 9: set default role as testrole1, secondary roles as NONE, current role will still be testrole1 in another session testrole1 -- test 10: set default role as nonexisting_role, will fail diff --git a/tests/suites/0_stateless/20+_others/20_0010_distinct_aggr.result b/tests/suites/0_stateless/20+_others/20_0010_distinct_aggr.result index 257e563266b4..3aae936930bb 100644 --- a/tests/suites/0_stateless/20+_others/20_0010_distinct_aggr.result +++ b/tests/suites/0_stateless/20+_others/20_0010_distinct_aggr.result @@ -1 +1,101 @@ +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 102 diff --git a/tests/suites/0_stateless/20+_others/20_0011_purge_before.result b/tests/suites/0_stateless/20+_others/20_0011_purge_before.result index 5cb94f5c24ca..c8bc057b7f59 100644 --- a/tests/suites/0_stateless/20+_others/20_0011_purge_before.result +++ b/tests/suites/0_stateless/20+_others/20_0011_purge_before.result @@ -1,9 +1,15 @@ +2 +1 +1 checking that there should are 3 snapshots before purge true checking that after purge (by snapshot id) there should be 2 snapshots left true checking that after purge (by snapshot id) there should be 4 rows left true +2 +1 +1 checking that there should are 3 snapshots before purge true checking that after purge (by timestamp) there should be 1 snapshot left diff --git a/tests/suites/0_stateless/20+_others/20_0015_compact_hook_stas_issue_13947.result b/tests/suites/0_stateless/20+_others/20_0015_compact_hook_stas_issue_13947.result index ef6df0c1a475..6887d3cff3d7 100644 --- a/tests/suites/0_stateless/20+_others/20_0015_compact_hook_stas_issue_13947.result +++ b/tests/suites/0_stateless/20+_others/20_0015_compact_hook_stas_issue_13947.result @@ -1,4 +1,6 @@ -2 10 363 +1 +1 +2 10 399 expects .stats.write_progress.rows be 2 expects .error be null 2 diff --git a/tests/suites/0_stateless/20+_others/20_0015_compact_hook_stas_issue_13947.sh b/tests/suites/0_stateless/20+_others/20_0015_compact_hook_stas_issue_13947.sh index 0bd510417b0c..a4f1a39acbdf 100755 --- a/tests/suites/0_stateless/20+_others/20_0015_compact_hook_stas_issue_13947.sh +++ b/tests/suites/0_stateless/20+_others/20_0015_compact_hook_stas_issue_13947.sh @@ -5,10 +5,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # set up cat <>>> create or replace database test_fuse_time_travel_size >>>> create table test_fuse_time_travel_size.t(c int) 'fs:///tmp/test_fuse_time_travel_size/' >>>> insert into test_fuse_time_travel_size.t values (1),(2) +2 Size difference is less than 10 bytes >>>> alter table test_fuse_time_travel_size.t SET OPTIONS (data_retention_period_in_hours = 240); >>>> drop table test_fuse_time_travel_size.t diff --git a/tests/suites/1_stateful/00_stage/00_0001_copy_into_stage.result b/tests/suites/1_stateful/00_stage/00_0001_copy_into_stage.result index 453a0eb871bf..b4beef56607e 100644 --- a/tests/suites/1_stateful/00_stage/00_0001_copy_into_stage.result +++ b/tests/suites/1_stateful/00_stage/00_0001_copy_into_stage.result @@ -1,4 +1,14 @@ +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 20 160 160 -20 530 726 +20 530 818 2 20 160 160 diff --git a/tests/suites/1_stateful/00_stage/00_0007_copy_into_stage2.result b/tests/suites/1_stateful/00_stage/00_0007_copy_into_stage2.result index 6c164cc0e466..42cf78bc47e7 100644 --- a/tests/suites/1_stateful/00_stage/00_0007_copy_into_stage2.result +++ b/tests/suites/1_stateful/00_stage/00_0007_copy_into_stage2.result @@ -1,3 +1,13 @@ +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 20 160 160 ---csv 1 diff --git a/tests/suites/1_stateful/00_stage/00_0012_stage_priv.result b/tests/suites/1_stateful/00_stage/00_0012_stage_priv.result index 1d1d6fa1d178..44ed5f3f4f70 100644 --- a/tests/suites/1_stateful/00_stage/00_0012_stage_priv.result +++ b/tests/suites/1_stateful/00_stage/00_0012_stage_priv.result @@ -1,3 +1,13 @@ +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 ==== check internal stage write priv === Error: APIError: QueryFailed: [1063]Permission denied: privilege [Write] is required on STAGE s2 for user 'u1'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege [Select] is required on 'default'.'default'.'test_table' for user 'u1'@'%' with roles [public] @@ -17,7 +27,7 @@ Error: APIError: QueryFailed: [1063]Permission denied: privilege [Write] is requ Error: APIError: QueryFailed: [1063]Permission denied: privilege [Read] is required on STAGE presign_stage for user 'u1'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. 000 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Write] is required on STAGE s3 for user 'u1'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. -1 1 356 +1 1 374 Error: APIError: QueryFailed: [1063]Permission denied: privilege [Read] is required on STAGE s3 for user 'u1'@'%' with roles [public]. Note: Please ensure that your current role have the appropriate permissions to create a new Warehouse|Database|Table|UDF|Stage. Error: APIError: QueryFailed: [1063]Permission denied: privilege READ is required on stage s3 for user 'u1'@'%' Error: APIError: QueryFailed: [1063]Permission denied: privilege READ is required on stage s3 for user 'u1'@'%' @@ -33,3 +43,5 @@ i0.csv 2 0 NULL NULL 1 1 2 2 === check access user's local stage === +0 +0 diff --git a/tests/suites/1_stateful/00_stage/00_0012_stage_with_connection.result b/tests/suites/1_stateful/00_stage/00_0012_stage_with_connection.result index f563602f5183..5fb48244de4a 100644 --- a/tests/suites/1_stateful/00_stage/00_0012_stage_with_connection.result +++ b/tests/suites/1_stateful/00_stage/00_0012_stage_with_connection.result @@ -1,13 +1,14 @@ >>>> drop table if exists my_table; >>>> create table my_table (a int); >>>> insert into my_table values (1), (2), (4); +3 >>>> drop stage if exists my_stage; >>>> drop connection if exists my_conn; >>>> create connection my_conn storage_type = 's3' access_key_id ='minioadmin' secret_access_key ='minioadmin' endpoint_url='http://127.0.0.1:9900' >>>> create stage my_stage url= 's3://testbucket/admin/tempdata/' connection = (connection_name='my_conn'); >>>> remove @my_stage; >>>> copy into @my_stage/a.csv from my_table -3 13 365 +3 13 401 >>>> select * from @my_stage order by a; 1 2 diff --git a/tests/suites/1_stateful/00_stage/00_0015_unload_output.result b/tests/suites/1_stateful/00_stage/00_0015_unload_output.result index 87a009e8dc56..56294498558f 100644 --- a/tests/suites/1_stateful/00_stage/00_0015_unload_output.result +++ b/tests/suites/1_stateful/00_stage/00_0015_unload_output.result @@ -3,17 +3,27 @@ >>>> drop stage if exists s1 >>>> create stage s1 >>>> insert into t1 values(0) +1 >>>> insert into t1 values(1) +1 >>>> insert into t1 values(2) +1 >>>> insert into t1 values(3) +1 >>>> insert into t1 values(4) +1 >>>> insert into t1 values(5) +1 >>>> insert into t1 values(6) +1 >>>> insert into t1 values(7) +1 >>>> insert into t1 values(8) +1 >>>> insert into t1 values(9) +1 +copy1 <<<< ->>>> copy /*+ set_var(max_threads=1) */ into @s1/a/bc from (select * from t1) file_format = (type=csv) max_file_size=1 detailed_output=true a/bc/data_UUID_0000_00000000.csv 2 1 a/bc/data_UUID_0000_00000001.csv 2 1 a/bc/data_UUID_0000_00000002.csv 2 1 @@ -24,39 +34,40 @@ a/bc/data_UUID_0000_00000006.csv 2 1 a/bc/data_UUID_0000_00000007.csv 2 1 a/bc/data_UUID_0000_00000008.csv 2 1 a/bc/data_UUID_0000_00000009.csv 2 1 +copy2 >>>> copy into @s1/a/bc from (select * from t1) file_format = (type=csv) max_file_size=1 detailed_output=false 10 20 20 <<<< +copy3 <<<< ->>>> copy /*+ set_var(max_threads=1) */ into @s1/a/bc from (select * from t1) max_file_size=1 detailed_output=true -a/bc/data_UUID_0000_00000000.parquet 357 1 -a/bc/data_UUID_0000_00000001.parquet 357 1 -a/bc/data_UUID_0000_00000002.parquet 357 1 -a/bc/data_UUID_0000_00000003.parquet 357 1 -a/bc/data_UUID_0000_00000004.parquet 357 1 -a/bc/data_UUID_0000_00000005.parquet 357 1 -a/bc/data_UUID_0000_00000006.parquet 357 1 -a/bc/data_UUID_0000_00000007.parquet 357 1 -a/bc/data_UUID_0000_00000008.parquet 357 1 -a/bc/data_UUID_0000_00000009.parquet 357 1 +a/bc/data_UUID_0000_00000000.parquet 393 1 +a/bc/data_UUID_0000_00000001.parquet 393 1 +a/bc/data_UUID_0000_00000002.parquet 393 1 +a/bc/data_UUID_0000_00000003.parquet 393 1 +a/bc/data_UUID_0000_00000004.parquet 393 1 +a/bc/data_UUID_0000_00000005.parquet 393 1 +a/bc/data_UUID_0000_00000006.parquet 393 1 +a/bc/data_UUID_0000_00000007.parquet 393 1 +a/bc/data_UUID_0000_00000008.parquet 393 1 +a/bc/data_UUID_0000_00000009.parquet 393 1 >>>> unload path >>>> copy /*+ set_var(max_threads=1) */ into @s1 from (select 1) detailed_output=true -data_UUID_0000_00000000.parquet 356 1 +data_UUID_0000_00000000.parquet 374 1 <<<< >>>> copy /*+ set_var(max_threads=1) */ into @s1/ from (select 1) detailed_output=true -data_UUID_0000_00000000.parquet 356 1 +data_UUID_0000_00000000.parquet 374 1 <<<< >>>> copy /*+ set_var(max_threads=1) */ into @s1/a from (select 1) detailed_output=true -a/data_UUID_0000_00000000.parquet 356 1 +a/data_UUID_0000_00000000.parquet 374 1 <<<< >>>> copy /*+ set_var(max_threads=1) */ into @s1/a/ from (select 1) detailed_output=true -a/data_UUID_0000_00000000.parquet 356 1 +a/data_UUID_0000_00000000.parquet 374 1 <<<< >>>> copy /*+ set_var(max_threads=1) */ into @s1/a/bc from (select 1) detailed_output=true -a/bc/data_UUID_0000_00000000.parquet 356 1 +a/bc/data_UUID_0000_00000000.parquet 374 1 <<<< >>>> copy /*+ set_var(max_threads=1) */ into @s1/a/data_ from (select 1) detailed_output=true -a/data_UUID_0000_00000000.parquet 356 1 +a/data_UUID_0000_00000000.parquet 374 1 <<<< >>>> drop stage if exists s1 >>>> drop table if exists t1 diff --git a/tests/suites/1_stateful/00_stage/00_0015_unload_output.sh b/tests/suites/1_stateful/00_stage/00_0015_unload_output.sh index d589f61a152e..dbc7d6f0bbf7 100755 --- a/tests/suites/1_stateful/00_stage/00_0015_unload_output.sh +++ b/tests/suites/1_stateful/00_stage/00_0015_unload_output.sh @@ -17,11 +17,14 @@ for i in `seq 0 9`;do stmt "insert into t1 values($i)" done -query "copy /*+ set_var(max_threads=1) */ into @s1/a/bc from (select * from t1) file_format = (type=csv) max_file_size=1 detailed_output=true" | $RM_UUID | sort +echo "copy1" +query "copy /*+ set_var(max_threads=1) */ into @s1/a/bc from (select * from t1) file_format = (type=csv) max_file_size=1 detailed_output=true" | $RM_UUID | tail -n +2 | sort +echo "copy2" query "copy into @s1/a/bc from (select * from t1) file_format = (type=csv) max_file_size=1 detailed_output=false" -query "copy /*+ set_var(max_threads=1) */ into @s1/a/bc from (select * from t1) max_file_size=1 detailed_output=true" | $RM_UUID | sort +echo "copy3" +query "copy /*+ set_var(max_threads=1) */ into @s1/a/bc from (select * from t1) max_file_size=1 detailed_output=true" | $RM_UUID | tail -n +2 | sort query "copy into @s1/a/bc from (select * from t1) max_file_size=1 detailed_output=false" | $MYSQL diff --git a/tests/suites/1_stateful/00_stage/00_0017_copy_into_parquet.result b/tests/suites/1_stateful/00_stage/00_0017_copy_into_parquet.result index 0bbad3668ea7..b3b4e1a5e0da 100755 --- a/tests/suites/1_stateful/00_stage/00_0017_copy_into_parquet.result +++ b/tests/suites/1_stateful/00_stage/00_0017_copy_into_parquet.result @@ -5,4 +5,8 @@ 1 1 2 +>>>> remove @s1; +>>>> select count() from @s1 where a >= 0 and b <= 1000; +1000 +>>>> remove @s1; >>>> drop stage if exists s1; diff --git a/tests/suites/1_stateful/00_stage/00_0017_copy_into_parquet.sh b/tests/suites/1_stateful/00_stage/00_0017_copy_into_parquet.sh index 3d032ab8b90d..029b983d983f 100755 --- a/tests/suites/1_stateful/00_stage/00_0017_copy_into_parquet.sh +++ b/tests/suites/1_stateful/00_stage/00_0017_copy_into_parquet.sh @@ -22,4 +22,13 @@ echo "copy /*+ set_var(max_threads=4) set_var(max_memory_usage=128000000) */ int # copy /*+ set_var(max_threads=4) set_var(max_memory_usage=256000000) */ not working in cluster mode echo "set max_threads=4; set max_memory_usage=256000000; copy /*+ set_var(max_threads=4) set_var(max_memory_usage=256000000) */ into @s1/ from (select * from numbers(60000000)) max_file_size=64000000 detailed_output=true;" | $BENDSQL_CLIENT_CONNECT | wc -l | sed 's/ //g' +stmt "remove @s1;" + +for i in `seq 1 50`;do + echo "copy into @s1/ from (select number a, number + 1 b from numbers(20))" | $BENDSQL_CLIENT_CONNECT > /dev/null 2>&1 +done + +stmt "select count() from @s1 where a >= 0 and b <= 1000;" + +stmt "remove @s1;" stmt "drop stage if exists s1;" diff --git a/tests/suites/1_stateful/02_query/02_0001_create_table_with_external_location.result b/tests/suites/1_stateful/02_query/02_0001_create_table_with_external_location.result index 8965b7099a46..0d72fca1852e 100644 --- a/tests/suites/1_stateful/02_query/02_0001_create_table_with_external_location.result +++ b/tests/suites/1_stateful/02_query/02_0001_create_table_with_external_location.result @@ -1,2 +1,4 @@ +1 +1 888 1024 diff --git a/tests/suites/1_stateful/05_formats/05_00_01_load_unload_all.result b/tests/suites/1_stateful/05_formats/05_00_01_load_unload_all.result index de6d253846c0..5075675eccff 100755 --- a/tests/suites/1_stateful/05_formats/05_00_01_load_unload_all.result +++ b/tests/suites/1_stateful/05_formats/05_00_01_load_unload_all.result @@ -1,16 +1,19 @@ ---CSV +2 2 179 179 "a""b",1.0,"['a""b']","{""k"":""v""}","2044-05-06 10:25:02.868894",10.01,"{'k1':10,'k2':20}","('a',5)" \N,2.0,"['a''b']","[1]","2044-05-06 10:25:02.868894",-10.01,"{}","('b',10)" unload1.txt 2 0 NULL NULL 2 179 179 ---TSV +2 2 151 151 a"b 1.0 ['a"b'] {"k":"v"} 2044-05-06 10:25:02.868894 10.01 {'k1':10,'k2':20} ('a',5) \N 2.0 ['a''b'] [1] 2044-05-06 10:25:02.868894 -10.01 {} ('b',10) unload1.txt 2 0 NULL NULL 2 151 151 ---NDJSON +2 2 244 244 {"a":"a\"b","b":1.0,"c":["a\"b"],"d":{"k":"v"},"e":"2044-05-06 10:25:02.868894","f":10.01,"g":{"k1":10,"k2":20},"h":{"1":"a","2":5}} {"a":null,"b":2.0,"c":["a'b"],"d":[1],"e":"2044-05-06 10:25:02.868894","f":-10.01,"g":{},"h":{"1":"b","2":10}} diff --git a/tests/suites/1_stateful/05_formats/05_05_parquet/05_05_01_parquet_load_unload.result b/tests/suites/1_stateful/05_formats/05_05_parquet/05_05_01_parquet_load_unload.result index 1e08a537ba24..0066945e036c 100644 --- a/tests/suites/1_stateful/05_formats/05_05_parquet/05_05_01_parquet_load_unload.result +++ b/tests/suites/1_stateful/05_formats/05_05_parquet/05_05_01_parquet_load_unload.result @@ -23,19 +23,20 @@ ['[1]'], [('b',10)] ); +2 >>>> select * from test_load_unload a"b 1 ['a"b'] {"k":"v"} 2044-05-06 10:25:02.868894 10.01 ('a',5) ['{"k":"v"}'] [('a',5)] NULL 2 ['a'b'] [1] 2044-05-06 10:25:02.868894 -10.01 ('b',10) ['[1]'] [('b',10)] <<<< >>>> copy into @s1/unload1/ from test_load_unload -2 390 2703 +2 390 3096 >>>> truncate table test_load_unload >>>> copy into test_load_unload from @s1/unload1.parquet force=true; unload1.parquet 2 0 NULL NULL begin diff select end diff >>>> copy into @s1/unload2/ from test_load_unload -2 390 2703 +2 390 3096 begin diff parquet end diff >>>> truncate table test_load_unload diff --git a/tests/suites/1_stateful/08_select_stage/08_00_parquet/08_00_00_basic.result b/tests/suites/1_stateful/08_select_stage/08_00_parquet/08_00_00_basic.result index 069a4e19ea94..6d16d279d5e2 100644 --- a/tests/suites/1_stateful/08_select_stage/08_00_parquet/08_00_00_basic.result +++ b/tests/suites/1_stateful/08_select_stage/08_00_parquet/08_00_00_basic.result @@ -1,15 +1,17 @@ +2 --- named internal stage -2 53 699 +2 53 791 1 2 3 4 5 6 -2 53 699 +2 53 791 --- external stage 1 2 3 4 5 6 --- file_format 1 2 3 4 5 6 +2 --- variant named internal stage -2 70 677 +2 70 767 1 [1,2,3] 2 {"k":"v"} diff --git a/tests/suites/1_stateful/08_select_stage/08_00_parquet/08_00_06_transform.result b/tests/suites/1_stateful/08_select_stage/08_00_parquet/08_00_06_transform.result index d1c385cd68ed..a560d769a5c9 100755 --- a/tests/suites/1_stateful/08_select_stage/08_00_parquet/08_00_06_transform.result +++ b/tests/suites/1_stateful/08_select_stage/08_00_parquet/08_00_06_transform.result @@ -1,4 +1,5 @@ -2 18 524 +2 +2 18 596 --- copy from uri with transform 2 5 diff --git a/tests/suites/5_ee/01_vacuum/01_0002_ee_vacuum_drop_table.sh b/tests/suites/5_ee/01_vacuum/01_0002_ee_vacuum_drop_table.sh index 8bc427a65ce9..a8233b7b5111 100755 --- a/tests/suites/5_ee/01_vacuum/01_0002_ee_vacuum_drop_table.sh +++ b/tests/suites/5_ee/01_vacuum/01_0002_ee_vacuum_drop_table.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo "drop database if exists test_vacuum_drop_dry_run" | $BENDSQL_CLIENT_CONNECT echo "CREATE DATABASE test_vacuum_drop_dry_run" | $BENDSQL_CLIENT_CONNECT echo "create table test_vacuum_drop_dry_run.a(c int)" | $BENDSQL_CLIENT_CONNECT -echo "INSERT INTO test_vacuum_drop_dry_run.a VALUES (1)" | $BENDSQL_CLIENT_CONNECT +echo "INSERT INTO test_vacuum_drop_dry_run.a VALUES (1)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "drop table test_vacuum_drop_dry_run.a" | $BENDSQL_CLIENT_CONNECT count=$(echo "set data_retention_time_in_days=0; vacuum drop table dry run" | $BENDSQL_CLIENT_CONNECT | wc -l) if [[ ! "$count" ]]; then @@ -30,7 +30,7 @@ echo "drop database if exists test_vacuum_drop_4" | $BENDSQL_CLIENT_CONNECT echo "CREATE DATABASE test_vacuum_drop" | $BENDSQL_CLIENT_CONNECT echo "create table test_vacuum_drop.a(c int)" | $BENDSQL_CLIENT_CONNECT -echo "INSERT INTO test_vacuum_drop.a VALUES (1)" | $BENDSQL_CLIENT_CONNECT +echo "INSERT INTO test_vacuum_drop.a VALUES (1)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "select * from test_vacuum_drop.a" | $BENDSQL_CLIENT_CONNECT @@ -40,7 +40,7 @@ echo "set data_retention_time_in_days=0;vacuum drop table from test_vacuum_drop" echo "create table test_vacuum_drop.b(c int)" | $BENDSQL_CLIENT_CONNECT -echo "INSERT INTO test_vacuum_drop.b VALUES (2)" | $BENDSQL_CLIENT_CONNECT +echo "INSERT INTO test_vacuum_drop.b VALUES (2)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "drop table test_vacuum_drop.b" | $BENDSQL_CLIENT_CONNECT @@ -54,12 +54,12 @@ echo "select * from test_vacuum_drop.b" | $BENDSQL_CLIENT_CONNECT echo "CREATE DATABASE test_vacuum_drop_2" | $BENDSQL_CLIENT_CONNECT echo "create table test_vacuum_drop_2.a(c int)" | $BENDSQL_CLIENT_CONNECT -echo "INSERT INTO test_vacuum_drop_2.a VALUES (3)" | $BENDSQL_CLIENT_CONNECT +echo "INSERT INTO test_vacuum_drop_2.a VALUES (3)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "CREATE DATABASE test_vacuum_drop_3" | $BENDSQL_CLIENT_CONNECT echo "create table test_vacuum_drop_3.a(c int)" | $BENDSQL_CLIENT_CONNECT -echo "INSERT INTO test_vacuum_drop_3.a VALUES (4)" | $BENDSQL_CLIENT_CONNECT +echo "INSERT INTO test_vacuum_drop_3.a VALUES (4)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "select * from test_vacuum_drop_2.a" | $BENDSQL_CLIENT_CONNECT echo "select * from test_vacuum_drop_3.a" | $BENDSQL_CLIENT_CONNECT @@ -87,7 +87,7 @@ table_inserts=( ) for i in "${table_inserts[@]}"; do - echo "$i" | $BENDSQL_CLIENT_CONNECT + echo "$i" | $BENDSQL_CLIENT_OUTPUT_NULL done ## Select table @@ -100,7 +100,7 @@ echo "set data_retention_time_in_days=0;vacuum drop table" | $BENDSQL_CLIENT_CON ## dry run echo "CREATE DATABASE test_vacuum_drop_4" | $BENDSQL_CLIENT_CONNECT echo "create table test_vacuum_drop_4.a(c int)" | $BENDSQL_CLIENT_CONNECT -echo "INSERT INTO test_vacuum_drop_4.a VALUES (1)" | $BENDSQL_CLIENT_CONNECT +echo "INSERT INTO test_vacuum_drop_4.a VALUES (1)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "select * from test_vacuum_drop_4.a" | $BENDSQL_CLIENT_CONNECT echo "drop table test_vacuum_drop_4.a" | $BENDSQL_CLIENT_CONNECT echo "set data_retention_time_in_days=0;vacuum drop table dry run" | $BENDSQL_CLIENT_CONNECT > /dev/null @@ -109,17 +109,17 @@ echo "select * from test_vacuum_drop_4.a" | $BENDSQL_CLIENT_CONNECT # check vacuum drop table with the same name echo "create table test_vacuum_drop_4.b(c int)" | $BENDSQL_CLIENT_CONNECT -echo "INSERT INTO test_vacuum_drop_4.b VALUES (1)" | $BENDSQL_CLIENT_CONNECT +echo "INSERT INTO test_vacuum_drop_4.b VALUES (1)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "drop table test_vacuum_drop_4.b" | $BENDSQL_CLIENT_CONNECT echo "create table test_vacuum_drop_4.b(c int)" | $BENDSQL_CLIENT_CONNECT -echo "INSERT INTO test_vacuum_drop_4.b VALUES (2)" | $BENDSQL_CLIENT_CONNECT +echo "INSERT INTO test_vacuum_drop_4.b VALUES (2)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "select * from test_vacuum_drop_4.b" | $BENDSQL_CLIENT_CONNECT echo "set data_retention_time_in_days=0; vacuum drop table" | $BENDSQL_CLIENT_CONNECT > /dev/null echo "select * from test_vacuum_drop_4.b" | $BENDSQL_CLIENT_CONNECT ## test vacuum table output echo "create table test_vacuum_drop_4.c(c int)" | $BENDSQL_CLIENT_CONNECT -echo "INSERT INTO test_vacuum_drop_4.c VALUES (1)" | $BENDSQL_CLIENT_CONNECT +echo "INSERT INTO test_vacuum_drop_4.c VALUES (1)" | $BENDSQL_CLIENT_OUTPUT_NULL count=$(echo "set data_retention_time_in_days=0; vacuum table test_vacuum_drop_4.c" | $BENDSQL_CLIENT_CONNECT | awk '{print $9}') if [[ "$count" != "4" ]]; then echo "vacuum table, count:$count" diff --git a/tests/suites/5_ee/01_vacuum/01_003_vacuum_drop_table_continue.result b/tests/suites/5_ee/01_vacuum/01_003_vacuum_drop_table_continue.result index ab6c604174f9..cbc235260b8e 100644 --- a/tests/suites/5_ee/01_vacuum/01_003_vacuum_drop_table_continue.result +++ b/tests/suites/5_ee/01_vacuum/01_003_vacuum_drop_table_continue.result @@ -4,9 +4,13 @@ >>>> create table test_vacuum_drop_table_continue.c(c int) >>>> create table test_vacuum_drop_table_continue.d(c int) >>>> insert into test_vacuum_drop_table_continue.a values (1) +1 >>>> insert into test_vacuum_drop_table_continue.b values (1) +1 >>>> insert into test_vacuum_drop_table_continue.c values (1) +1 >>>> insert into test_vacuum_drop_table_continue.d values (1) +1 >>>> drop database test_vacuum_drop_table_continue >>>> set data_retention_time_in_days=0; vacuum drop table >>>> undrop database test_vacuum_drop_table_continue diff --git a/tests/suites/5_ee/01_vacuum/01_003_vacuum_table_only_orphans.result b/tests/suites/5_ee/01_vacuum/01_003_vacuum_table_only_orphans.result index b6ff4ca271c5..ce3eacb02322 100644 --- a/tests/suites/5_ee/01_vacuum/01_003_vacuum_table_only_orphans.result +++ b/tests/suites/5_ee/01_vacuum/01_003_vacuum_table_only_orphans.result @@ -1,8 +1,11 @@ >>>> create or replace database test_vacuum_table_only_orphans >>>> create or replace table test_vacuum_table_only_orphans.a(c int) 'fs:///tmp/test_vacuum_table_only_orphans/' >>>> insert into test_vacuum_table_only_orphans.a values (1) +1 >>>> insert into test_vacuum_table_only_orphans.a values (2) +1 >>>> insert into test_vacuum_table_only_orphans.a values (3) +1 before purge 4 4 diff --git a/tests/suites/5_ee/04_attach_read_only/02_0004_attach_table.result b/tests/suites/5_ee/04_attach_read_only/02_0004_attach_table.result index 874cc27767ea..41723e33d003 100644 --- a/tests/suites/5_ee/04_attach_read_only/02_0004_attach_table.result +++ b/tests/suites/5_ee/04_attach_read_only/02_0004_attach_table.result @@ -6,8 +6,11 @@ >>>> drop connection if exists my_conn; >>>> create connection my_conn storage_type = 's3' access_key_id ='minioadmin' secret_access_key ='minioadmin' endpoint_url='http://127.0.0.1:9900' >>>> insert into table_from(a) values(0) +1 >>>> insert into table_from(a) values(1) +1 >>>> insert into table_from(a) values(2) +1 #### attaching table #### select attach table >>>> select * from table_to order by a; diff --git a/tests/suites/5_ee/04_attach_read_only/02_0005_attach_table_read_only.sh b/tests/suites/5_ee/04_attach_read_only/02_0005_attach_table_read_only.sh index b635d7cb37a8..a15fa88f19c8 100755 --- a/tests/suites/5_ee/04_attach_read_only/02_0005_attach_table_read_only.sh +++ b/tests/suites/5_ee/04_attach_read_only/02_0005_attach_table_read_only.sh @@ -22,7 +22,7 @@ echo "select sum(number) from attach_read_only;" | $BENDSQL_CLIENT_CONNECT # 2. data should be in-sync echo "attach table should reflects the mutation of table being attached" -echo "delete from base where number > 0;" | $BENDSQL_CLIENT_CONNECT +echo "delete from base where number > 0;" | $BENDSQL_CLIENT_OUTPUT_NULL echo "content of base table after deletion" echo "select * from attach_read_only order by number;" | $BENDSQL_CLIENT_CONNECT echo "content of test attach only table after deletion" diff --git a/tests/suites/5_ee/04_attach_read_only/04_0001_check_mutations.sh b/tests/suites/5_ee/04_attach_read_only/04_0001_check_mutations.sh index 55edabf1875f..3337cd18eccb 100755 --- a/tests/suites/5_ee/04_attach_read_only/04_0001_check_mutations.sh +++ b/tests/suites/5_ee/04_attach_read_only/04_0001_check_mutations.sh @@ -8,7 +8,7 @@ echo "create or replace database test_attach_only;" | $BENDSQL_CLIENT_CONNECT # mutation related enterprise features echo "create or replace table test_attach_only.test_json(id int, val json) 's3://testbucket/admin/data/' connection=(access_key_id ='minioadmin' secret_access_key ='minioadmin' endpoint_url='${STORAGE_S3_ENDPOINT_URL}');" | $BENDSQL_CLIENT_CONNECT -echo "insert into test_attach_only.test_json values(1, '{\"a\":33,\"b\":44}'),(2, '{\"a\":55,\"b\":66}')" | $BENDSQL_CLIENT_CONNECT +echo "insert into test_attach_only.test_json values(1, '{\"a\":33,\"b\":44}'),(2, '{\"a\":55,\"b\":66}')" | $BENDSQL_CLIENT_OUTPUT_NULL storage_prefix=$(mysql -uroot -h127.0.0.1 -P3307 -e "set global hide_options_in_show_create_table=0;show create table test_attach_only.test_json" | grep -i snapshot_location | awk -F'SNAPSHOT_LOCATION='"'"'|_ss' '{print $2}') echo "attach table test_attach_only.test_json_read_only 's3://testbucket/admin/data/$storage_prefix' connection=(access_key_id ='minioadmin' secret_access_key ='minioadmin' endpoint_url='${STORAGE_S3_ENDPOINT_URL}');" | $BENDSQL_CLIENT_CONNECT diff --git a/tests/suites/5_ee/05_stream/05_0000_ee_stream.sh b/tests/suites/5_ee/05_stream/05_0000_ee_stream.sh index 4158ac518ac3..b7b3bd97fa6d 100755 --- a/tests/suites/5_ee/05_stream/05_0000_ee_stream.sh +++ b/tests/suites/5_ee/05_stream/05_0000_ee_stream.sh @@ -7,9 +7,9 @@ echo "drop database if exists db_stream" | $BENDSQL_CLIENT_CONNECT echo "CREATE DATABASE db_stream" | $BENDSQL_CLIENT_CONNECT echo "create table db_stream.t(a int) change_tracking = true" | $BENDSQL_CLIENT_CONNECT -echo "insert into db_stream.t values(1)" | $BENDSQL_CLIENT_CONNECT +echo "insert into db_stream.t values(1)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "create stream default.test_s on table db_stream.t comment = 'test'" | $BENDSQL_CLIENT_CONNECT -echo "insert into db_stream.t values(2)" | $BENDSQL_CLIENT_CONNECT +echo "insert into db_stream.t values(2)" | $BENDSQL_CLIENT_OUTPUT_NULL BASE_ROW_ID=$(echo "select _base_row_id from default.test_s" | $BENDSQL_CLIENT_CONNECT) echo "select change\$row_id='$BASE_ROW_ID' from default.test_s" | $BENDSQL_CLIENT_CONNECT diff --git a/tests/suites/5_ee/05_stream/05_0001_ee_stream_consume.sh b/tests/suites/5_ee/05_stream/05_0001_ee_stream_consume.sh index fe98d0262a50..0f302f6c59c6 100755 --- a/tests/suites/5_ee/05_stream/05_0001_ee_stream_consume.sh +++ b/tests/suites/5_ee/05_stream/05_0001_ee_stream_consume.sh @@ -13,7 +13,7 @@ echo "create table db_stream.sink(a int)" | $BENDSQL_CLIENT_CONNECT # Define function to write data into the base table. write_to_base() { for i in {1..20}; do - echo "insert into db_stream.base select * from db_stream.rand limit 10" | $BENDSQL_CLIENT_CONNECT + echo "insert into db_stream.base select * from db_stream.rand limit 10" | $BENDSQL_CLIENT_OUTPUT_NULL if (( i % 5 == 0 )); then echo "optimize table db_stream.base compact" | $BENDSQL_CLIENT_CONNECT @@ -24,7 +24,7 @@ write_to_base() { # Define function to consume data from the stream into the sink table. consume_from_stream() { for i in {1..10}; do - echo "insert into db_stream.sink select a from db_stream.s" | $BENDSQL_CLIENT_CONNECT + echo "insert into db_stream.sink select a from db_stream.s" | $BENDSQL_CLIENT_OUTPUT_NULL done } @@ -40,7 +40,7 @@ wait $write_pid wait $consume_pid # Perform a final consume operation from the stream to ensure all data is consumed -echo "insert into db_stream.sink select a from db_stream.s" | $BENDSQL_CLIENT_CONNECT +echo "insert into db_stream.sink select a from db_stream.s" | $BENDSQL_CLIENT_OUTPUT_NULL # Fetch the counts and sums from both base and sink tables base_count=$(echo "SELECT COUNT(*) FROM db_stream.base;" | $BENDSQL_CLIENT_CONNECT) diff --git a/tests/suites/5_ee/05_stream/05_0002_ee_stream_create.sh b/tests/suites/5_ee/05_stream/05_0002_ee_stream_create.sh index e7bff2deb31b..492868825593 100755 --- a/tests/suites/5_ee/05_stream/05_0002_ee_stream_create.sh +++ b/tests/suites/5_ee/05_stream/05_0002_ee_stream_create.sh @@ -6,10 +6,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo "drop database if exists db_stream" | $BENDSQL_CLIENT_CONNECT echo "create database db_stream" | $BENDSQL_CLIENT_CONNECT echo "create table db_stream.base(a int)" | $BENDSQL_CLIENT_CONNECT -echo "insert into db_stream.base values(1)" | $BENDSQL_CLIENT_CONNECT +echo "insert into db_stream.base values(1)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "alter table db_stream.base set options(change_tracking = true)" | $BENDSQL_CLIENT_CONNECT -echo "insert into db_stream.base values(2)" | $BENDSQL_CLIENT_CONNECT -echo "insert into db_stream.base values(3)" | $BENDSQL_CLIENT_CONNECT +echo "insert into db_stream.base values(2)" | $BENDSQL_CLIENT_OUTPUT_NULL +echo "insert into db_stream.base values(3)" | $BENDSQL_CLIENT_OUTPUT_NULL BASE_ROW_ID=$(echo "select _base_row_id from db_stream.base where a = 3" | $BENDSQL_CLIENT_CONNECT) diff --git a/tests/suites/5_ee/05_stream/05_0003_ee_stream_copy_into_location.sh b/tests/suites/5_ee/05_stream/05_0003_ee_stream_copy_into_location.sh index 619a2b4e2eae..ddbecf5b0681 100755 --- a/tests/suites/5_ee/05_stream/05_0003_ee_stream_copy_into_location.sh +++ b/tests/suites/5_ee/05_stream/05_0003_ee_stream_copy_into_location.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo "create or replace database db_stream" | $BENDSQL_CLIENT_CONNECT echo "create table db_stream.t(c int)" | $BENDSQL_CLIENT_CONNECT echo "create stream db_stream.s on table db_stream.t" | $BENDSQL_CLIENT_CONNECT -echo "insert into db_stream.t values(1)" | $BENDSQL_CLIENT_CONNECT +echo "insert into db_stream.t values(1)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "create or replace stage stage_05_0003" | $BENDSQL_CLIENT_CONNECT diff --git a/tests/suites/5_ee/06_inverted_index/06_0000_purge_inverted_index.sh b/tests/suites/5_ee/06_inverted_index/06_0000_purge_inverted_index.sh index aa68f1d5f716..9e1011f50396 100755 --- a/tests/suites/5_ee/06_inverted_index/06_0000_purge_inverted_index.sh +++ b/tests/suites/5_ee/06_inverted_index/06_0000_purge_inverted_index.sh @@ -28,7 +28,7 @@ echo "###################" echo "###1st insertion###" echo "###################" -echo "insert into ${TEST_DB}.customer_feedback values('a', 'b')" | $BENDSQL_CLIENT_CONNECT +echo "insert into ${TEST_DB}.customer_feedback values('a', 'b')" | $BENDSQL_CLIENT_OUTPUT_NULL echo "== number of snapshots (expects 1)==" # 1 snapshot for the init insertion echo "select snapshot_id, previous_snapshot_id from fuse_snapshot('db_purge_inverted_index', 'customer_feedback') limit 100" | $BENDSQL_CLIENT_CONNECT | wc -l @@ -46,7 +46,7 @@ echo "###################" echo "###2nd insertion###" echo "###################" -echo "insert into ${TEST_DB}.customer_feedback values('a', 'b')" | $BENDSQL_CLIENT_CONNECT +echo "insert into ${TEST_DB}.customer_feedback values('a', 'b')" | $BENDSQL_CLIENT_OUTPUT_NULL echo "== number of snapshots (expects 1)==" # NOTE: # - since previous snapshots should be purged, @@ -67,7 +67,7 @@ echo "###################" echo "###3nd insertion###" echo "###################" -echo "insert into ${TEST_DB}.customer_feedback values('a', 'b')" | $BENDSQL_CLIENT_CONNECT +echo "insert into ${TEST_DB}.customer_feedback values('a', 'b')" | $BENDSQL_CLIENT_OUTPUT_NULL echo "== number of snapshots (expects 1)==" echo "select snapshot_id, previous_snapshot_id from fuse_snapshot('db_purge_inverted_index', 'customer_feedback') limit 100" | $BENDSQL_CLIENT_CONNECT | wc -l @@ -101,7 +101,7 @@ echo "###################" echo "####new insertion##" echo "###################" -echo "insert into ${TEST_DB}.customer_feedback values('a', 'b')" | $BENDSQL_CLIENT_CONNECT +echo "insert into ${TEST_DB}.customer_feedback values('a', 'b')" | $BENDSQL_CLIENT_OUTPUT_NULL echo "== number of snapshots (expects 1) ==" echo "select snapshot_id, previous_snapshot_id from fuse_snapshot('db_purge_inverted_index', 'customer_feedback') limit 100" | $BENDSQL_CLIENT_CONNECT | wc -l echo "== number of invert index files (expects 4) ==" diff --git a/tests/suites/5_ee/06_inverted_index/06_0001_index_visibility.sh b/tests/suites/5_ee/06_inverted_index/06_0001_index_visibility.sh index f0010bbe4c8c..abb24cf66795 100755 --- a/tests/suites/5_ee/06_inverted_index/06_0001_index_visibility.sh +++ b/tests/suites/5_ee/06_inverted_index/06_0001_index_visibility.sh @@ -10,7 +10,7 @@ echo "drop user if exists u2" | $BENDSQL_CLIENT_CONNECT echo "drop database if exists db1" | $BENDSQL_CLIENT_CONNECT echo "create database db1" | $BENDSQL_CLIENT_CONNECT echo "create table db1.t1(id int, title string, inverted index idx1(title))" | $BENDSQL_CLIENT_CONNECT -echo "insert into db1.t1 values(1, 'hello world')" | $BENDSQL_CLIENT_CONNECT +echo "insert into db1.t1 values(1, 'hello world')" | $BENDSQL_CLIENT_OUTPUT_NULL echo "create role role1;" | $BENDSQL_CLIENT_CONNECT echo "create role role2;" | $BENDSQL_CLIENT_CONNECT echo "grant select on db1.t1 to role role1;" | $BENDSQL_CLIENT_CONNECT diff --git a/tests/suites/5_ee/07_failsafe/08_0000_amend_table.sh b/tests/suites/5_ee/07_failsafe/08_0000_amend_table.sh index c7376450dc75..1361fda26821 100755 --- a/tests/suites/5_ee/07_failsafe/08_0000_amend_table.sh +++ b/tests/suites/5_ee/07_failsafe/08_0000_amend_table.sh @@ -12,9 +12,9 @@ echo "CREATE or replace stage test_amend_stage URL='s3://testbucket/test_amend/' # generate multiple blocks & segments -echo "insert into test_failsafe.t select * from numbers(11)" | $BENDSQL_CLIENT_CONNECT -echo "insert into test_failsafe.t select * from numbers(11)" | $BENDSQL_CLIENT_CONNECT -echo "insert into test_failsafe.t select * from numbers(11)" | $BENDSQL_CLIENT_CONNECT +echo "insert into test_failsafe.t select * from numbers(11)" | $BENDSQL_CLIENT_OUTPUT_NULL +echo "insert into test_failsafe.t select * from numbers(11)" | $BENDSQL_CLIENT_OUTPUT_NULL +echo "insert into test_failsafe.t select * from numbers(11)" | $BENDSQL_CLIENT_OUTPUT_NULL echo "remove @test_amend_stage" | $BENDSQL_CLIENT_CONNECT