diff --git a/Cargo.toml b/Cargo.toml index 233e6644c39a1..cbbc8b78e3ef8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -288,6 +288,8 @@ new_without_default = "allow" # TODO: remove later https://github.com/rust-lang/rust-clippy/issues/12436 mixed_attributes_style = "allow" too_long_first_doc_paragraph = "allow" +str_to_string = "warn" +string_to_string = "warn" [workspace.lints.rustdoc] private_intra_doc_links = "allow" diff --git a/src/batch/benches/filter.rs b/src/batch/benches/filter.rs index f09603caf3698..496052f14f93a 100644 --- a/src/batch/benches/filter.rs +++ b/src/batch/benches/filter.rs @@ -30,7 +30,7 @@ fn create_filter_executor(chunk_size: usize, chunk_num: usize) -> BoxedExecutor Box::new(FilterExecutor::new( build_from_pretty("(equal:boolean (modulus:int8 $0:int8 2:int8) 0:int8)"), input, - "FilterBenchmark".to_string(), + "FilterBenchmark".to_owned(), CHUNK_SIZE, )) } diff --git a/src/batch/benches/hash_agg.rs b/src/batch/benches/hash_agg.rs index b4d773ae425f2..fc962e4819e90 100644 --- a/src/batch/benches/hash_agg.rs +++ b/src/batch/benches/hash_agg.rs @@ -105,7 +105,7 @@ fn create_hash_agg_executor( group_key_types, schema, input, - "HashAggExecutor".to_string(), + "HashAggExecutor".to_owned(), CHUNK_SIZE, MemoryContext::none(), None, diff --git a/src/batch/src/executor/delete.rs b/src/batch/src/executor/delete.rs index 6ffa6cb13dc13..3e3ce7aef0db4 100644 --- a/src/batch/src/executor/delete.rs +++ b/src/batch/src/executor/delete.rs @@ -246,7 +246,7 @@ mod tests { dml_manager, Box::new(mock_executor), 1024, - "DeleteExecutor".to_string(), + "DeleteExecutor".to_owned(), false, 0, )); diff --git a/src/batch/src/executor/expand.rs b/src/batch/src/executor/expand.rs index 166dbe90e507a..3069e02ac4ff7 100644 --- a/src/batch/src/executor/expand.rs +++ b/src/batch/src/executor/expand.rs @@ -164,7 +164,7 @@ mod tests { column_subsets, child: Box::new(mock_executor), schema: expand_schema, - identity: "ExpandExecutor".to_string(), + identity: "ExpandExecutor".to_owned(), chunk_size: CHUNK_SIZE, }); let mut stream = expand_executor.execute(); diff --git a/src/batch/src/executor/filter.rs b/src/batch/src/executor/filter.rs index 9463e85993549..b69cb319a1ac3 100644 --- a/src/batch/src/executor/filter.rs +++ b/src/batch/src/executor/filter.rs @@ -154,7 +154,7 @@ mod tests { let filter_executor = Box::new(FilterExecutor { expr: build_from_pretty("(greater_than:boolean $0:int4[] {2}:int4[])"), child: Box::new(mock_executor), - identity: "FilterExecutor".to_string(), + identity: "FilterExecutor".to_owned(), chunk_size: CHUNK_SIZE, }); @@ -207,7 +207,7 @@ mod tests { let filter_executor = Box::new(FilterExecutor { expr: build_from_pretty("(equal:boolean $0:int4 $1:int4)"), child: Box::new(mock_executor), - identity: "FilterExecutor".to_string(), + identity: "FilterExecutor".to_owned(), chunk_size: CHUNK_SIZE, }); let fields = &filter_executor.schema().fields; diff --git a/src/batch/src/executor/generic_exchange.rs b/src/batch/src/executor/generic_exchange.rs index 2c51c24cb653b..0e73441a9f52a 100644 --- a/src/batch/src/executor/generic_exchange.rs +++ b/src/batch/src/executor/generic_exchange.rs @@ -296,7 +296,7 @@ mod tests { fields: vec![Field::unnamed(DataType::Int32)], }, task_id: TaskId::default(), - identity: "GenericExchangeExecutor2".to_string(), + identity: "GenericExchangeExecutor2".to_owned(), }); let mut stream = executor.execute(); diff --git a/src/batch/src/executor/group_top_n.rs b/src/batch/src/executor/group_top_n.rs index fb21152eb18e7..5b9b31c5d4a79 100644 --- a/src/batch/src/executor/group_top_n.rs +++ b/src/batch/src/executor/group_top_n.rs @@ -292,7 +292,7 @@ mod tests { with_ties: false, group_key: vec![2], group_key_types: vec![DataType::Int32], - identity: "GroupTopNExecutor".to_string(), + identity: "GroupTopNExecutor".to_owned(), chunk_size: CHUNK_SIZE, mem_ctx, }) diff --git a/src/batch/src/executor/hash_agg.rs b/src/batch/src/executor/hash_agg.rs index db2fb8515c44f..f3a27ae4d587f 100644 --- a/src/batch/src/executor/hash_agg.rs +++ b/src/batch/src/executor/hash_agg.rs @@ -817,7 +817,7 @@ mod tests { &agg_prost, src_exec, TaskId::default(), - "HashAggExecutor".to_string(), + "HashAggExecutor".to_owned(), CHUNK_SIZE, mem_context.clone(), None, @@ -892,7 +892,7 @@ mod tests { &agg_prost, Box::new(src_exec), TaskId::default(), - "HashAggExecutor".to_string(), + "HashAggExecutor".to_owned(), CHUNK_SIZE, MemoryContext::none(), None, @@ -959,7 +959,7 @@ mod tests { }, ); for i in 0..10 { - map.entry(i).or_insert_with(|| "i".to_string()); + map.entry(i).or_insert_with(|| "i".to_owned()); } for (k, v) in map { @@ -1011,7 +1011,7 @@ mod tests { &agg_prost, Box::new(src_exec), TaskId::default(), - "HashAggExecutor".to_string(), + "HashAggExecutor".to_owned(), CHUNK_SIZE, MemoryContext::none(), None, @@ -1105,7 +1105,7 @@ mod tests { &agg_prost, src_exec, TaskId::default(), - "HashAggExecutor".to_string(), + "HashAggExecutor".to_owned(), CHUNK_SIZE, mem_context.clone(), Some(SpillBackend::Memory), diff --git a/src/batch/src/executor/hop_window.rs b/src/batch/src/executor/hop_window.rs index 3cd99dd0e5b8e..972b65bf108e7 100644 --- a/src/batch/src/executor/hop_window.rs +++ b/src/batch/src/executor/hop_window.rs @@ -251,7 +251,7 @@ mod tests { schema, window_slide, window_size, - "test".to_string(), + "test".to_owned(), window_start_exprs, window_end_exprs, output_indices, diff --git a/src/batch/src/executor/iceberg_scan.rs b/src/batch/src/executor/iceberg_scan.rs index d17cb0083e39c..5efdbc25e0401 100644 --- a/src/batch/src/executor/iceberg_scan.rs +++ b/src/batch/src/executor/iceberg_scan.rs @@ -140,7 +140,7 @@ impl IcebergScanExecutor { .load_table_v2_with_metadata(self.table_meta) .await?; let data_types = self.schema.data_types(); - let table_name = table.identifier().name().to_string(); + let table_name = table.identifier().name().to_owned(); let (mut position_delete_filter, data_file_scan_tasks) = match Option::take(&mut self.file_scan_tasks) { @@ -336,7 +336,7 @@ impl PositionDeleteFilter { continue; } let entry = position_delete_file_path_pos_map - .entry(file_path.to_string()) + .entry(file_path.to_owned()) .or_default(); // Split `pos` by `batch_size`, because the data file will also be split by `batch_size` let delete_vec_index = pos as usize / batch_size; diff --git a/src/batch/src/executor/insert.rs b/src/batch/src/executor/insert.rs index 6b7fe19228760..7028c441a1344 100644 --- a/src/batch/src/executor/insert.rs +++ b/src/batch/src/executor/insert.rs @@ -343,7 +343,7 @@ mod tests { dml_manager, Box::new(mock_executor), 1024, - "InsertExecutor".to_string(), + "InsertExecutor".to_owned(), vec![0, 1, 2], // Ignoring insertion order vec![], row_id_index, diff --git a/src/batch/src/executor/join/hash_join.rs b/src/batch/src/executor/join/hash_join.rs index f4e4b2f270a42..b1e14f3b9d769 100644 --- a/src/batch/src/executor/join/hash_join.rs +++ b/src/batch/src/executor/join/hash_join.rs @@ -2625,7 +2625,7 @@ mod tests { vec![0], vec![null_safe], cond, - "HashJoinExecutor".to_string(), + "HashJoinExecutor".to_owned(), chunk_size, if test_spill { Some(SpillBackend::Memory) diff --git a/src/batch/src/executor/join/local_lookup_join.rs b/src/batch/src/executor/join/local_lookup_join.rs index 32c139c6929fe..496202321f887 100644 --- a/src/batch/src/executor/join/local_lookup_join.rs +++ b/src/batch/src/executor/join/local_lookup_join.rs @@ -128,7 +128,7 @@ impl InnerSideExecutorBuilder { plan: Some(PlanFragment { root: Some(PlanNode { children: vec![], - identity: "SeqScan".to_string(), + identity: "SeqScan".to_owned(), node_body: Some(self.create_row_seq_scan_node(id)?), }), exchange_info: Some(ExchangeInfo { @@ -229,7 +229,7 @@ impl LookupExecutorBuilder for InnerSideExecutorBuilder { let plan_node = PlanNode { children: vec![], - identity: "LocalLookupJoinExchangeExecutor".to_string(), + identity: "LocalLookupJoinExchangeExecutor".to_owned(), node_body: Some(exchange_node), }; @@ -597,7 +597,7 @@ mod tests { schema: original_schema.clone(), output_indices: (0..original_schema.len()).collect(), chunk_size: CHUNK_SIZE, - identity: "TestLookupJoinExecutor".to_string(), + identity: "TestLookupJoinExecutor".to_owned(), shutdown_rx: ShutdownToken::empty(), mem_ctx: MemoryContext::none(), } diff --git a/src/batch/src/executor/limit.rs b/src/batch/src/executor/limit.rs index 08719b9e4efea..68055a1333753 100644 --- a/src/batch/src/executor/limit.rs +++ b/src/batch/src/executor/limit.rs @@ -187,7 +187,7 @@ mod tests { child: Box::new(mock_executor), limit, offset, - identity: "LimitExecutor2".to_string(), + identity: "LimitExecutor2".to_owned(), }); let fields = &limit_executor.schema().fields; assert_eq!(fields[0].data_type, DataType::Int32); @@ -309,7 +309,7 @@ mod tests { child: Box::new(mock_executor), limit, offset, - identity: "LimitExecutor2".to_string(), + identity: "LimitExecutor2".to_owned(), }); let mut results = vec![]; diff --git a/src/batch/src/executor/managed.rs b/src/batch/src/executor/managed.rs index 3c22b32c1b50b..ff77439c44d48 100644 --- a/src/batch/src/executor/managed.rs +++ b/src/batch/src/executor/managed.rs @@ -50,7 +50,7 @@ impl Executor for ManagedExecutor { #[try_stream(boxed, ok = DataChunk, error = BatchError)] async fn execute(mut self: Box) { - let input_desc = self.child.identity().to_string(); + let input_desc = self.child.identity().to_owned(); let span = tracing::info_span!("batch_executor", "otel.name" = input_desc); let mut child_stream = self.child.execute(); diff --git a/src/batch/src/executor/merge_sort_exchange.rs b/src/batch/src/executor/merge_sort_exchange.rs index 1d6f5fc801e57..1e54e07016100 100644 --- a/src/batch/src/executor/merge_sort_exchange.rs +++ b/src/batch/src/executor/merge_sort_exchange.rs @@ -215,7 +215,7 @@ mod tests { fields: vec![Field::unnamed(DataType::Int32)], }, TaskId::default(), - "MergeSortExchangeExecutor2".to_string(), + "MergeSortExchangeExecutor2".to_owned(), CHUNK_SIZE, )); diff --git a/src/batch/src/executor/mod.rs b/src/batch/src/executor/mod.rs index f144576fb5808..8fe346d380974 100644 --- a/src/batch/src/executor/mod.rs +++ b/src/batch/src/executor/mod.rs @@ -279,7 +279,7 @@ mod tests { let task_id = &TaskId { task_id: 1, stage_id: 1, - query_id: "test_query_id".to_string(), + query_id: "test_query_id".to_owned(), }; let builder = ExecutorBuilder::new( &plan_node, diff --git a/src/batch/src/executor/order_by.rs b/src/batch/src/executor/order_by.rs index 042073fc51ff6..9016858e7bb44 100644 --- a/src/batch/src/executor/order_by.rs +++ b/src/batch/src/executor/order_by.rs @@ -462,7 +462,7 @@ mod tests { let order_by_executor = Box::new(SortExecutor::new( Box::new(mock_executor), Arc::new(column_orders), - "SortExecutor2".to_string(), + "SortExecutor2".to_owned(), CHUNK_SIZE, MemoryContext::none(), None, @@ -514,7 +514,7 @@ mod tests { let order_by_executor = Box::new(SortExecutor::new( Box::new(mock_executor), Arc::new(column_orders), - "SortExecutor2".to_string(), + "SortExecutor2".to_owned(), CHUNK_SIZE, MemoryContext::none(), None, @@ -566,7 +566,7 @@ mod tests { let order_by_executor = Box::new(SortExecutor::new( Box::new(mock_executor), Arc::new(column_orders), - "SortExecutor2".to_string(), + "SortExecutor2".to_owned(), CHUNK_SIZE, MemoryContext::none(), None, @@ -643,7 +643,7 @@ mod tests { let order_by_executor = Box::new(SortExecutor::new( Box::new(mock_executor), Arc::new(column_orders), - "SortExecutor".to_string(), + "SortExecutor".to_owned(), CHUNK_SIZE, MemoryContext::none(), None, @@ -725,7 +725,7 @@ mod tests { let order_by_executor = Box::new(SortExecutor::new( Box::new(mock_executor), Arc::new(column_orders), - "SortExecutor".to_string(), + "SortExecutor".to_owned(), CHUNK_SIZE, MemoryContext::none(), None, @@ -833,7 +833,7 @@ mod tests { let order_by_executor = Box::new(SortExecutor::new( Box::new(mock_executor), column_orders.into(), - "SortExecutor".to_string(), + "SortExecutor".to_owned(), CHUNK_SIZE, MemoryContext::none(), None, @@ -982,7 +982,7 @@ mod tests { let order_by_executor = Box::new(SortExecutor::new( Box::new(mock_executor), Arc::new(column_orders), - "SortExecutor".to_string(), + "SortExecutor".to_owned(), CHUNK_SIZE, MemoryContext::none(), None, @@ -1024,7 +1024,7 @@ mod tests { let order_by_executor = Box::new(SortExecutor::new( Box::new(mock_executor), Arc::new(column_orders), - "SortExecutor2".to_string(), + "SortExecutor2".to_owned(), CHUNK_SIZE, MemoryContext::for_spill_test(), Some(SpillBackend::Memory), diff --git a/src/batch/src/executor/project.rs b/src/batch/src/executor/project.rs index db29319cccc72..a44209c739473 100644 --- a/src/batch/src/executor/project.rs +++ b/src/batch/src/executor/project.rs @@ -147,7 +147,7 @@ mod tests { expr: expr_vec, child: Box::new(mock_executor), schema: Schema { fields }, - identity: "ProjectExecutor".to_string(), + identity: "ProjectExecutor".to_owned(), }); let fields = &proj_executor.schema().fields; @@ -174,7 +174,7 @@ mod tests { let values_executor2: Box = Box::new(ValuesExecutor::new( vec![vec![]], // One single row with no column. Schema::default(), - "ValuesExecutor".to_string(), + "ValuesExecutor".to_owned(), CHUNK_SIZE, )); @@ -182,7 +182,7 @@ mod tests { expr: vec![Box::new(literal)], child: values_executor2, schema: schema_unnamed!(DataType::Int32), - identity: "ProjectExecutor2".to_string(), + identity: "ProjectExecutor2".to_owned(), }); let mut stream = proj_executor.execute(); let chunk = stream.next().await.unwrap().unwrap(); diff --git a/src/batch/src/executor/project_set.rs b/src/batch/src/executor/project_set.rs index c1ecd2489aef5..1296a4fa0ba55 100644 --- a/src/batch/src/executor/project_set.rs +++ b/src/batch/src/executor/project_set.rs @@ -273,7 +273,7 @@ mod tests { select_list, child: Box::new(mock_executor), schema: Schema { fields }, - identity: "ProjectSetExecutor".to_string(), + identity: "ProjectSetExecutor".to_owned(), chunk_size: CHUNK_SIZE, }); @@ -318,7 +318,7 @@ mod tests { let values_executor2: Box = Box::new(ValuesExecutor::new( vec![vec![]], // One single row with no column. Schema::default(), - "ValuesExecutor".to_string(), + "ValuesExecutor".to_owned(), CHUNK_SIZE, )); @@ -326,7 +326,7 @@ mod tests { select_list: vec![literal.boxed().into(), tf.into()], child: values_executor2, schema: schema_unnamed!(DataType::Int32, DataType::Int32), - identity: "ProjectSetExecutor2".to_string(), + identity: "ProjectSetExecutor2".to_owned(), chunk_size: CHUNK_SIZE, }); let mut stream = proj_executor.execute(); diff --git a/src/batch/src/executor/sort_agg.rs b/src/batch/src/executor/sort_agg.rs index ad0000fc60980..87e12e30d0bfd 100644 --- a/src/batch/src/executor/sort_agg.rs +++ b/src/batch/src/executor/sort_agg.rs @@ -420,7 +420,7 @@ mod tests { group_key: group_exprs, child: Box::new(child), schema: Schema { fields }, - identity: "SortAggExecutor".to_string(), + identity: "SortAggExecutor".to_owned(), output_size_limit: 3, shutdown_rx: ShutdownToken::empty(), }); @@ -504,7 +504,7 @@ mod tests { group_key: group_exprs, child: Box::new(child), schema: Schema { fields }, - identity: "SortAggExecutor".to_string(), + identity: "SortAggExecutor".to_owned(), output_size_limit: 3, shutdown_rx: ShutdownToken::empty(), }); @@ -596,7 +596,7 @@ mod tests { group_key: vec![], child: Box::new(child), schema: Schema { fields }, - identity: "SortAggExecutor".to_string(), + identity: "SortAggExecutor".to_owned(), output_size_limit: 4, shutdown_rx: ShutdownToken::empty(), }); @@ -668,7 +668,7 @@ mod tests { group_key: group_exprs, child: Box::new(child), schema: Schema { fields }, - identity: "SortAggExecutor".to_string(), + identity: "SortAggExecutor".to_owned(), output_size_limit, shutdown_rx: ShutdownToken::empty(), }); @@ -759,7 +759,7 @@ mod tests { group_key: group_exprs, child: Box::new(child), schema: Schema { fields }, - identity: "SortAggExecutor".to_string(), + identity: "SortAggExecutor".to_owned(), output_size_limit: 3, shutdown_rx: ShutdownToken::empty(), }); @@ -854,7 +854,7 @@ mod tests { group_key: group_exprs, child: Box::new(child), schema: Schema { fields }, - identity: "SortAggExecutor".to_string(), + identity: "SortAggExecutor".to_owned(), output_size_limit, shutdown_rx, }); diff --git a/src/batch/src/executor/test_utils.rs b/src/batch/src/executor/test_utils.rs index ac0c829d60aa5..a7aeb902de902 100644 --- a/src/batch/src/executor/test_utils.rs +++ b/src/batch/src/executor/test_utils.rs @@ -129,7 +129,7 @@ impl MockExecutor { Self { chunks: VecDeque::new(), schema, - identity: "MockExecutor".to_string(), + identity: "MockExecutor".to_owned(), } } diff --git a/src/batch/src/executor/top_n.rs b/src/batch/src/executor/top_n.rs index ca60cec0e9351..cc4fc9291b88f 100644 --- a/src/batch/src/executor/top_n.rs +++ b/src/batch/src/executor/top_n.rs @@ -333,7 +333,7 @@ mod tests { 1, 3, false, - "TopNExecutor".to_string(), + "TopNExecutor".to_owned(), CHUNK_SIZE, MemoryContext::none(), )); @@ -391,7 +391,7 @@ mod tests { 1, 0, false, - "TopNExecutor".to_string(), + "TopNExecutor".to_owned(), CHUNK_SIZE, MemoryContext::none(), )); diff --git a/src/batch/src/executor/union.rs b/src/batch/src/executor/union.rs index 343368e80f026..6da58320d9ad2 100644 --- a/src/batch/src/executor/union.rs +++ b/src/batch/src/executor/union.rs @@ -123,7 +123,7 @@ mod tests { let union_executor = Box::new(UnionExecutor { inputs: vec![Box::new(mock_executor1), Box::new(mock_executor2)], - identity: "UnionExecutor".to_string(), + identity: "UnionExecutor".to_owned(), }); let fields = &union_executor.schema().fields; assert_eq!(fields[0].data_type, DataType::Int32); diff --git a/src/batch/src/executor/values.rs b/src/batch/src/executor/values.rs index c3239b0e93768..c9690450822e8 100644 --- a/src/batch/src/executor/values.rs +++ b/src/batch/src/executor/values.rs @@ -178,7 +178,7 @@ mod tests { let values_executor = Box::new(ValuesExecutor { rows: vec![exprs].into_iter(), schema: Schema { fields }, - identity: "ValuesExecutor2".to_string(), + identity: "ValuesExecutor2".to_owned(), chunk_size: CHUNK_SIZE, }); @@ -241,7 +241,7 @@ mod tests { let values_executor = Box::new(ValuesExecutor::new( rows, Schema { fields }, - "ValuesExecutor2".to_string(), + "ValuesExecutor2".to_owned(), 3, )); let mut stream = values_executor.execute(); @@ -256,7 +256,7 @@ mod tests { let values_executor = Box::new(ValuesExecutor::new( vec![vec![]], Schema::default(), - "ValuesExecutor2".to_string(), + "ValuesExecutor2".to_owned(), CHUNK_SIZE, )); let mut stream = values_executor.execute(); diff --git a/src/batch/src/spill/spill_op.rs b/src/batch/src/spill/spill_op.rs index 6b3f8a7394040..982b537ea25e6 100644 --- a/src/batch/src/spill/spill_op.rs +++ b/src/batch/src/spill/spill_op.rs @@ -56,7 +56,7 @@ impl SpillOp { assert!(path.ends_with('/')); let spill_dir = - std::env::var(RW_BATCH_SPILL_DIR_ENV).unwrap_or_else(|_| DEFAULT_SPILL_DIR.to_string()); + std::env::var(RW_BATCH_SPILL_DIR_ENV).unwrap_or_else(|_| DEFAULT_SPILL_DIR.to_owned()); let root = format!("/{}/{}/{}/", spill_dir, RW_MANAGED_SPILL_DIR, path); let op = match spill_backend { @@ -81,7 +81,7 @@ impl SpillOp { let _guard = LOCK.lock().await; let spill_dir = - std::env::var(RW_BATCH_SPILL_DIR_ENV).unwrap_or_else(|_| DEFAULT_SPILL_DIR.to_string()); + std::env::var(RW_BATCH_SPILL_DIR_ENV).unwrap_or_else(|_| DEFAULT_SPILL_DIR.to_owned()); let root = format!("/{}/{}/", spill_dir, RW_MANAGED_SPILL_DIR); let builder = Fs::default().root(&root); diff --git a/src/batch/src/task/task_execution.rs b/src/batch/src/task/task_execution.rs index c50eb7f593692..0d8a346bd73b2 100644 --- a/src/batch/src/task/task_execution.rs +++ b/src/batch/src/task/task_execution.rs @@ -475,7 +475,7 @@ impl BatchTaskExecution { .send(TaskInfoResponse { task_id: Some(self.task_id.to_prost()), task_status: task_status.into(), - error_message: err_str.unwrap_or("".to_string()), + error_message: err_str.unwrap_or("".to_owned()), }) .await } else { @@ -685,7 +685,7 @@ mod tests { let task_id = TaskId { task_id: 1, stage_id: 2, - query_id: "abc".to_string(), + query_id: "abc".to_owned(), }; let task_output_id = TaskOutputId { task_id, diff --git a/src/batch/src/task/task_manager.rs b/src/batch/src/task/task_manager.rs index 81c53a9844981..c17ac275e8d84 100644 --- a/src/batch/src/task/task_manager.rs +++ b/src/batch/src/task/task_manager.rs @@ -148,7 +148,7 @@ impl BatchManager { StateReporter::new_with_test(), TracingContext::none(), ExprContext { - time_zone: "UTC".to_string(), + time_zone: "UTC".to_owned(), strict_mode: false, }, ) @@ -176,7 +176,7 @@ impl BatchManager { .send(TaskInfoResponse { task_id: Some(task_id.to_prost()), task_status: TaskStatus::Ping.into(), - error_message: "".to_string(), + error_message: "".to_owned(), }) .await .is_err() @@ -309,7 +309,7 @@ mod tests { let task_id = TaskId { task_id: 0, stage_id: 0, - query_id: "abc".to_string(), + query_id: "abc".to_owned(), }; let error = manager.check_if_task_running(&task_id).unwrap_err(); @@ -337,7 +337,7 @@ mod tests { let plan = PlanFragment { root: Some(PlanNode { children: vec![], - identity: "".to_string(), + identity: "".to_owned(), node_body: Some(NodeBody::Values(ValuesNode { tuples: vec![], fields: vec![], @@ -349,7 +349,7 @@ mod tests { }), }; let task_id = PbTaskId { - query_id: "".to_string(), + query_id: "".to_owned(), stage_id: 0, task_id: 0, }; @@ -378,7 +378,7 @@ mod tests { let plan = PlanFragment { root: Some(PlanNode { children: vec![], - identity: "".to_string(), + identity: "".to_owned(), node_body: Some(NodeBody::BusyLoopExecutor(true)), }), exchange_info: Some(ExchangeInfo { @@ -387,7 +387,7 @@ mod tests { }), }; let task_id = PbTaskId { - query_id: "".to_string(), + query_id: "".to_owned(), stage_id: 0, task_id: 0, }; @@ -409,7 +409,7 @@ mod tests { let plan = PlanFragment { root: Some(PlanNode { children: vec![], - identity: "".to_string(), + identity: "".to_owned(), node_body: Some(NodeBody::BusyLoopExecutor(true)), }), exchange_info: Some(ExchangeInfo { @@ -418,7 +418,7 @@ mod tests { }), }; let task_id = PbTaskId { - query_id: "".to_string(), + query_id: "".to_owned(), stage_id: 0, task_id: 0, }; diff --git a/src/bench/sink_bench/main.rs b/src/bench/sink_bench/main.rs index 423d317b37a79..b15317533c370 100644 --- a/src/bench/sink_bench/main.rs +++ b/src/bench/sink_bench/main.rs @@ -121,7 +121,7 @@ impl LogReader for MockRangeLogReader { }, )) } - _ => Err(anyhow!("Can't assert message type".to_string())), + _ => Err(anyhow!("Can't assert message type".to_owned())), } } } @@ -348,7 +348,7 @@ impl MockDatagenSource { Either::Right(Message::Chunk(chunk)) => yield Message::Chunk(chunk), _ => { return Err(StreamExecutorError::from( - "Can't assert message type".to_string(), + "Can't assert message type".to_owned(), )) } } @@ -385,7 +385,7 @@ where } let log_sinker = sink.new_log_sinker(sink_writer_param).await.unwrap(); match log_sinker.consume_log_and_sink(&mut log_reader).await { - Ok(_) => Err("Stream closed".to_string()), + Ok(_) => Err("Stream closed".to_owned()), Err(e) => Err(e.to_report_string()), } } @@ -515,7 +515,7 @@ async fn main() { stop_rx, data_size_tx, ); - if cfg.sink.eq(&BENCH_TEST.to_string()) { + if cfg.sink.eq(&BENCH_TEST.to_owned()) { println!("Start Sink Bench!, Wait {:?}s", BENCH_TIME); tokio::spawn(async move { mock_range_log_reader.init().await.unwrap(); @@ -532,7 +532,7 @@ async fn main() { let connector = properties.get("connector").unwrap().clone(); let format_desc = mock_from_legacy_type( &connector.clone(), - properties.get("type").unwrap_or(&"append-only".to_string()), + properties.get("type").unwrap_or(&"append-only".to_owned()), ) .unwrap(); let sink_param = SinkParam { @@ -543,8 +543,8 @@ async fn main() { downstream_pk: table_schema.pk_indices, sink_type: SinkType::AppendOnly, format_desc, - db_name: "not_need_set".to_string(), - sink_from_name: "not_need_set".to_string(), + db_name: "not_need_set".to_owned(), + sink_from_name: "not_need_set".to_owned(), }; let sink = build_sink(sink_param).unwrap(); let sink_writer_param = SinkWriterParam::for_test(); diff --git a/src/cmd_all/src/single_node.rs b/src/cmd_all/src/single_node.rs index 08d6e7f97cb4f..9f5e26ab59ee4 100644 --- a/src/cmd_all/src/single_node.rs +++ b/src/cmd_all/src/single_node.rs @@ -152,13 +152,13 @@ pub fn map_single_node_opts_to_standalone_opts(opts: SingleNodeOpts) -> ParsedSt let store_directory = opts.store_directory.unwrap_or_else(|| { let mut home_path = home_dir().unwrap(); home_path.push(".risingwave"); - home_path.to_str().unwrap().to_string() + home_path.to_str().unwrap().to_owned() }); // Set state store for meta (if not set). It could be set by environment variables before this. if meta_opts.state_store.is_none() { if opts.in_memory { - meta_opts.state_store = Some("hummock+memory".to_string()); + meta_opts.state_store = Some("hummock+memory".to_owned()); } else { let state_store_dir = format!("{}/state_store", &store_directory); std::fs::create_dir_all(&state_store_dir).unwrap(); @@ -167,7 +167,7 @@ pub fn map_single_node_opts_to_standalone_opts(opts: SingleNodeOpts) -> ParsedSt } // FIXME: otherwise it reports: missing system param "data_directory", but I think it should be set by this way... - meta_opts.data_directory = Some("hummock_001".to_string()); + meta_opts.data_directory = Some("hummock_001".to_owned()); } // Set meta store for meta (if not set). It could be set by environment variables before this. @@ -192,17 +192,17 @@ pub fn map_single_node_opts_to_standalone_opts(opts: SingleNodeOpts) -> ParsedSt } // Set listen addresses (force to override) - meta_opts.listen_addr = "0.0.0.0:5690".to_string(); - meta_opts.advertise_addr = "127.0.0.1:5690".to_string(); - meta_opts.dashboard_host = Some("0.0.0.0:5691".to_string()); - compute_opts.listen_addr = "0.0.0.0:5688".to_string(); - compactor_opts.listen_addr = "0.0.0.0:6660".to_string(); + meta_opts.listen_addr = "0.0.0.0:5690".to_owned(); + meta_opts.advertise_addr = "127.0.0.1:5690".to_owned(); + meta_opts.dashboard_host = Some("0.0.0.0:5691".to_owned()); + compute_opts.listen_addr = "0.0.0.0:5688".to_owned(); + compactor_opts.listen_addr = "0.0.0.0:6660".to_owned(); if let Some(frontend_addr) = &opts.node_opts.listen_addr { frontend_opts.listen_addr.clone_from(frontend_addr); } // Set Meta addresses for all nodes (force to override) - let meta_addr = "http://127.0.0.1:5690".to_string(); + let meta_addr = "http://127.0.0.1:5690".to_owned(); compute_opts.meta_address = meta_addr.parse().unwrap(); frontend_opts.meta_addr = meta_addr.parse().unwrap(); compactor_opts.meta_address = meta_addr.parse().unwrap(); diff --git a/src/cmd_all/src/standalone.rs b/src/cmd_all/src/standalone.rs index b5730d8d7845a..feef016b35bae 100644 --- a/src/cmd_all/src/standalone.rs +++ b/src/cmd_all/src/standalone.rs @@ -433,12 +433,12 @@ mod test { assert_eq!(meta_opts.advertise_addr, "127.0.0.1:9999"); assert_eq!( meta_opts.data_directory, - Some("some path with spaces".to_string()) + Some("some path with spaces".to_owned()) ); assert_eq!(meta_opts.temp_secret_file_dir, "./meta/secrets/"); assert_eq!( meta_opts.prometheus_listener_addr, - Some("127.0.0.1:1234".to_string()) + Some("127.0.0.1:1234".to_owned()) ); assert_eq!(meta_opts.config_path, "src/config/test.toml"); } else { diff --git a/src/common/benches/bench_data_chunk_compact.rs b/src/common/benches/bench_data_chunk_compact.rs index 82704304a5951..fe42f66f78cd8 100644 --- a/src/common/benches/bench_data_chunk_compact.rs +++ b/src/common/benches/bench_data_chunk_compact.rs @@ -28,7 +28,7 @@ struct DataChunkBenchCase { impl DataChunkBenchCase { pub fn new(name: &str, data_types: Vec) -> Self { Self { - name: name.to_string(), + name: name.to_owned(), data_types, } } diff --git a/src/common/benches/bench_data_chunk_encoding.rs b/src/common/benches/bench_data_chunk_encoding.rs index 4b09aeaeed5c2..86bc88676d81a 100644 --- a/src/common/benches/bench_data_chunk_encoding.rs +++ b/src/common/benches/bench_data_chunk_encoding.rs @@ -30,7 +30,7 @@ struct DataChunkBenchCase { impl DataChunkBenchCase { pub fn new(name: &str, data_types: Vec) -> Self { Self { - name: name.to_string(), + name: name.to_owned(), data_types, } } diff --git a/src/common/benches/bench_encoding.rs b/src/common/benches/bench_encoding.rs index 65c4bd86265b8..891b6d76f4a62 100644 --- a/src/common/benches/bench_encoding.rs +++ b/src/common/benches/bench_encoding.rs @@ -36,7 +36,7 @@ struct Case { impl Case { pub fn new(name: &str, ty: DataType, scalar: ScalarImpl) -> Self { Self { - name: name.to_string(), + name: name.to_owned(), ty, datum: Some(scalar), } @@ -128,16 +128,16 @@ fn bench_encoding(c: &mut Criterion) { ), ]; - let filter = env::var(ENV_CASE).unwrap_or_else(|_| "".to_string()); + let filter = env::var(ENV_CASE).unwrap_or_else(|_| "".to_owned()); let cases = cases .into_iter() .filter(|case| case.name.contains(&filter)) .collect::>(); let bench_ser = !env::var(ENV_BENCH_SER) - .unwrap_or_else(|_| "1".to_string()) + .unwrap_or_else(|_| "1".to_owned()) .eq("0"); let bench_de = !env::var(ENV_BENCH_DE) - .unwrap_or_else(|_| "1".to_string()) + .unwrap_or_else(|_| "1".to_owned()) .eq("0"); if bench_ser { diff --git a/src/common/benches/bench_hash_key_encoding.rs b/src/common/benches/bench_hash_key_encoding.rs index c1529a212ac9e..3e6424e870900 100644 --- a/src/common/benches/bench_hash_key_encoding.rs +++ b/src/common/benches/bench_hash_key_encoding.rs @@ -92,14 +92,14 @@ impl HashKeyBenchCase { } pub fn bench_vec_ser(&self, c: &mut Criterion) { - let vectorize_serialize_id = "vec ser ".to_string() + &self.id; + let vectorize_serialize_id = "vec ser ".to_owned() + &self.id; c.bench_function(&vectorize_serialize_id, |b| { b.iter(|| K::build_many(&self.col_idxes, &self.input_chunk)) }); } pub fn bench_vec_deser(&self, c: &mut Criterion) { - let vectorize_deserialize_id = "vec deser ".to_string() + &self.id; + let vectorize_deserialize_id = "vec deser ".to_owned() + &self.id; c.bench_function(&vectorize_deserialize_id, |b| { let mut array_builders = self .input_chunk @@ -117,7 +117,7 @@ impl HashKeyBenchCase { } pub fn bench_deser(&self, c: &mut Criterion) { - let vectorize_deserialize_id = "row deser ".to_string() + &self.id; + let vectorize_deserialize_id = "row deser ".to_owned() + &self.id; c.bench_function(&vectorize_deserialize_id, |b| { b.iter(|| { for key in &self.keys { @@ -139,43 +139,43 @@ fn case_builders() -> Vec { vec![ HashKeyBenchCaseBuilder { data_types: vec![DataType::Serial], - describe: "Serial".to_string(), + describe: "Serial".to_owned(), }, HashKeyBenchCaseBuilder { data_types: vec![DataType::Int32], - describe: "int32".to_string(), + describe: "int32".to_owned(), }, HashKeyBenchCaseBuilder { data_types: vec![DataType::Int64], - describe: "int64".to_string(), + describe: "int64".to_owned(), }, HashKeyBenchCaseBuilder { data_types: vec![DataType::Varchar], - describe: "varchar".to_string(), + describe: "varchar".to_owned(), }, HashKeyBenchCaseBuilder { data_types: vec![DataType::Varchar, DataType::Varchar], - describe: "composite varchar".to_string(), + describe: "composite varchar".to_owned(), }, HashKeyBenchCaseBuilder { data_types: vec![DataType::Int32, DataType::Int32, DataType::Int32], - describe: "composite fixed, case 1".to_string(), + describe: "composite fixed, case 1".to_owned(), }, HashKeyBenchCaseBuilder { data_types: vec![DataType::Int32, DataType::Int64, DataType::Int32], - describe: "composite fixed, case 2".to_string(), + describe: "composite fixed, case 2".to_owned(), }, HashKeyBenchCaseBuilder { data_types: vec![DataType::Int32, DataType::Varchar], - describe: "mix fixed and not fixed, case 1".to_string(), + describe: "mix fixed and not fixed, case 1".to_owned(), }, HashKeyBenchCaseBuilder { data_types: vec![DataType::Int64, DataType::Varchar], - describe: "mix fixed and not fixed, case 2".to_string(), + describe: "mix fixed and not fixed, case 2".to_owned(), }, HashKeyBenchCaseBuilder { data_types: vec![DataType::Int64; 8], - describe: "medium fixed".to_string(), + describe: "medium fixed".to_owned(), }, HashKeyBenchCaseBuilder { data_types: { @@ -183,11 +183,11 @@ fn case_builders() -> Vec { v[7] = DataType::Varchar; v }, - describe: "medium mixed".to_string(), + describe: "medium mixed".to_owned(), }, HashKeyBenchCaseBuilder { data_types: vec![DataType::Int64; 16], - describe: "large fixed".to_string(), + describe: "large fixed".to_owned(), }, HashKeyBenchCaseBuilder { data_types: { @@ -195,7 +195,7 @@ fn case_builders() -> Vec { v[15] = DataType::Varchar; v }, - describe: "large mixed".to_string(), + describe: "large mixed".to_owned(), }, // These benchmark cases will test unaligned key sizes. // For instance five keys of Int64 cannot fit within Key256 (5 * 64 = 320 > 256), @@ -203,15 +203,15 @@ fn case_builders() -> Vec { // This means 24 bytes of wasted memory. HashKeyBenchCaseBuilder { data_types: vec![DataType::Int64; 5], - describe: "unaligned small fixed".to_string(), + describe: "unaligned small fixed".to_owned(), }, HashKeyBenchCaseBuilder { data_types: vec![DataType::Int64; 9], - describe: "unaligned medium fixed".to_string(), + describe: "unaligned medium fixed".to_owned(), }, HashKeyBenchCaseBuilder { data_types: vec![DataType::Int64; 17], - describe: "unaligned large fixed".to_string(), + describe: "unaligned large fixed".to_owned(), }, ] } diff --git a/src/common/estimate_size/src/collections/btreemap.rs b/src/common/estimate_size/src/collections/btreemap.rs index 25d0c5f82c677..c38b48ed6cd05 100644 --- a/src/common/estimate_size/src/collections/btreemap.rs +++ b/src/common/estimate_size/src/collections/btreemap.rs @@ -279,25 +279,25 @@ mod tests { assert!(left.is_empty()); assert!(right.is_empty()); - map.insert(1, "hello".to_string()); - map.insert(6, "world".to_string()); + map.insert(1, "hello".to_owned()); + map.insert(6, "world".to_owned()); let (left, right) = map.retain_range(&6..=&6); assert_eq!(map.len(), 1); - assert_eq!(map.inner[&6], "world".to_string()); + assert_eq!(map.inner[&6], "world".to_owned()); assert_eq!(left.len(), 1); - assert_eq!(left[&1], "hello".to_string()); + assert_eq!(left[&1], "hello".to_owned()); assert!(right.is_empty()); - map.insert(8, "risingwave".to_string()); - map.insert(3, "great".to_string()); - map.insert(0, "wooow".to_string()); + map.insert(8, "risingwave".to_owned()); + map.insert(3, "great".to_owned()); + map.insert(0, "wooow".to_owned()); let (left, right) = map.retain_range(&2..=&7); assert_eq!(map.len(), 2); - assert_eq!(map.inner[&3], "great".to_string()); - assert_eq!(map.inner[&6], "world".to_string()); + assert_eq!(map.inner[&3], "great".to_owned()); + assert_eq!(map.inner[&6], "world".to_owned()); assert_eq!(left.len(), 1); - assert_eq!(left[&0], "wooow".to_string()); + assert_eq!(left[&0], "wooow".to_owned()); assert_eq!(right.len(), 1); - assert_eq!(right[&8], "risingwave".to_string()); + assert_eq!(right[&8], "risingwave".to_owned()); } } diff --git a/src/common/metrics/src/guarded_metrics.rs b/src/common/metrics/src/guarded_metrics.rs index 97ea311455270..8e953e28ac948 100644 --- a/src/common/metrics/src/guarded_metrics.rs +++ b/src/common/metrics/src/guarded_metrics.rs @@ -178,7 +178,7 @@ struct LabelGuardedMetricsInfo { impl LabelGuardedMetricsInfo { fn register_new_label(mutex: &Arc>, labels: &[&str; N]) -> LabelGuard { let mut guard = mutex.lock(); - let label_string = labels.map(|str| str.to_string()); + let label_string = labels.map(|str| str.to_owned()); guard.uncollected_removed_labels.remove(&label_string); *guard .labeled_metrics_count diff --git a/src/common/metrics/src/monitor/connection.rs b/src/common/metrics/src/monitor/connection.rs index 54fa399c3806e..50efac038391d 100644 --- a/src/common/metrics/src/monitor/connection.rs +++ b/src/common/metrics/src/monitor/connection.rs @@ -276,7 +276,7 @@ where fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { let ret = self.inner.poll_ready(cx); if let Poll::Ready(Err(_)) = &ret { - self.monitor.on_err("".to_string()); + self.monitor.on_err("".to_owned()); } ret } @@ -316,7 +316,7 @@ where let remote_addr = conn.connect_info().remote_addr(); let endpoint = remote_addr .map(|remote_addr| format!("{}", remote_addr.ip())) - .unwrap_or("unknown".to_string()); + .unwrap_or("unknown".to_owned()); MonitoredConnection::new(conn, monitor.new_connection_monitor(endpoint)) }) }) @@ -350,7 +350,7 @@ mod compat { fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { let ret = self.inner.poll_ready(cx); if let Poll::Ready(Err(_)) = &ret { - self.monitor.on_err("".to_string()); + self.monitor.on_err("".to_owned()); } ret } diff --git a/src/common/proc_macro/src/config_doc.rs b/src/common/proc_macro/src/config_doc.rs index 7fdc11fc933c2..0f620f5b120e4 100644 --- a/src/common/proc_macro/src/config_doc.rs +++ b/src/common/proc_macro/src/config_doc.rs @@ -59,7 +59,7 @@ fn extract_comment(attrs: &Vec) -> String { if trimmed.is_empty() { None } else { - Some(trimmed.to_string()) + Some(trimmed.to_owned()) } }) .join(" ") diff --git a/src/common/proc_macro/src/session_config.rs b/src/common/proc_macro/src/session_config.rs index aff98faf11745..cec92042baa15 100644 --- a/src/common/proc_macro/src/session_config.rs +++ b/src/common/proc_macro/src/session_config.rs @@ -54,7 +54,7 @@ pub(crate) fn derive_config(input: DeriveInput) -> TokenStream { let meta = attr.parse_meta().expect_or_abort("Failed to parse meta"); if let syn::Meta::NameValue(val) = meta { if let syn::Lit::Str(desc) = val.lit { - doc_list.push(desc.value().trim().to_string()); + doc_list.push(desc.value().trim().to_owned()); } } } diff --git a/src/common/secret/src/secret_manager.rs b/src/common/secret/src/secret_manager.rs index 41cae5070810f..8257cbf68d902 100644 --- a/src/common/secret/src/secret_manager.rs +++ b/src/common/secret/src/secret_manager.rs @@ -68,7 +68,7 @@ impl LocalSecretManager { pub fn global() -> &'static LocalSecretManager { // Initialize the secret manager for unit tests. #[cfg(debug_assertions)] - LocalSecretManager::init("./tmp".to_string(), "test_cluster".to_string(), 0); + LocalSecretManager::init("./tmp".to_owned(), "test_cluster".to_owned(), 0); INSTANCE.get().unwrap() } diff --git a/src/common/src/array/arrow/arrow_iceberg.rs b/src/common/src/array/arrow/arrow_iceberg.rs index be6520cff94c5..279cb234a3f91 100644 --- a/src/common/src/array/arrow/arrow_iceberg.rs +++ b/src/common/src/array/arrow/arrow_iceberg.rs @@ -195,9 +195,9 @@ impl IcebergCreateTableArrowConvert { let mut metadata = HashMap::new(); // for iceberg-rust - metadata.insert("PARQUET:field_id".to_string(), field_id.to_string()); + metadata.insert("PARQUET:field_id".to_owned(), field_id.to_string()); // for icelake - metadata.insert("column_id".to_string(), field_id.to_string()); + metadata.insert("column_id".to_owned(), field_id.to_string()); arrow_field.set_metadata(metadata); } } diff --git a/src/common/src/array/arrow/arrow_impl.rs b/src/common/src/array/arrow/arrow_impl.rs index a341e218878c9..7b398a58924c6 100644 --- a/src/common/src/array/arrow/arrow_impl.rs +++ b/src/common/src/array/arrow/arrow_impl.rs @@ -654,7 +654,7 @@ pub trait FromArrow { let array: &arrow_array::StringArray = array.as_any().downcast_ref().ok_or_else(|| { ArrayError::from_arrow( - "expected string array for `arrowudf.decimal`".to_string(), + "expected string array for `arrowudf.decimal`".to_owned(), ) })?; Ok(ArrayImpl::Decimal(array.try_into()?)) @@ -663,7 +663,7 @@ pub trait FromArrow { let array: &arrow_array::StringArray = array.as_any().downcast_ref().ok_or_else(|| { ArrayError::from_arrow( - "expected string array for `arrowudf.json`".to_string(), + "expected string array for `arrowudf.json`".to_owned(), ) })?; Ok(ArrayImpl::Jsonb(array.try_into()?)) diff --git a/src/common/src/array/list_array.rs b/src/common/src/array/list_array.rs index 745b1f6bbab05..a20b98cfa86e4 100644 --- a/src/common/src/array/list_array.rs +++ b/src/common/src/array/list_array.rs @@ -749,7 +749,7 @@ impl ListValue { fn parse_array(&mut self) -> Result { self.skip_whitespace(); if !self.try_consume('{') { - return Err("Array value must start with \"{\"".to_string()); + return Err("Array value must start with \"{\"".to_owned()); } self.skip_whitespace(); if self.try_consume('}') { @@ -775,7 +775,7 @@ impl ListValue { break; } None => return Err(Self::eoi()), - _ => return Err("Unexpected array element.".to_string()), + _ => return Err("Unexpected array element.".to_owned()), } } Ok(ListValue::new(builder.finish())) @@ -813,8 +813,8 @@ impl ListValue { Cow::Borrowed(trimmed) }; } - (_, '{') => return Err("Unexpected \"{\" character.".to_string()), - (_, '"') => return Err("Unexpected array element.".to_string()), + (_, '{') => return Err("Unexpected \"{\" character.".to_owned()), + (_, '"') => return Err("Unexpected array element.".to_owned()), _ => {} } }; @@ -921,7 +921,7 @@ impl ListValue { fn expect_end(&mut self) -> Result<(), String> { self.skip_whitespace(); match self.peek() { - Some(_) => Err("Junk after closing right brace.".to_string()), + Some(_) => Err("Junk after closing right brace.".to_owned()), None => Ok(()), } } diff --git a/src/common/src/array/map_array.rs b/src/common/src/array/map_array.rs index d3f852dda0560..534530dca4b61 100644 --- a/src/common/src/array/map_array.rs +++ b/src/common/src/array/map_array.rs @@ -238,10 +238,10 @@ mod scalar { let struct_array = entries.into_array(); for key in struct_array.as_struct().field_at(0).iter() { let Some(key) = key else { - return Err("map keys must not be NULL".to_string()); + return Err("map keys must not be NULL".to_owned()); }; if !keys.insert(key) { - return Err("map keys must be unique".to_string()); + return Err("map keys must be unique".to_owned()); } } Ok(MapValue(ListValue::new(struct_array))) @@ -250,14 +250,14 @@ mod scalar { /// Returns error if [map invariants](`super::MapArray`) are violated. pub fn try_from_kv(key: ListValue, value: ListValue) -> Result { if key.len() != value.len() { - return Err("map keys and values have different length".to_string()); + return Err("map keys and values have different length".to_owned()); } let unique_keys: HashSet<_> = key.iter().unique().collect(); if unique_keys.len() != key.len() { - return Err("map keys must be unique".to_string()); + return Err("map keys must be unique".to_owned()); } if unique_keys.contains(&None) { - return Err("map keys must not be NULL".to_string()); + return Err("map keys must not be NULL".to_owned()); } let len = key.len(); diff --git a/src/common/src/array/stream_chunk.rs b/src/common/src/array/stream_chunk.rs index 2827f07493fd2..b004329a084c1 100644 --- a/src/common/src/array/stream_chunk.rs +++ b/src/common/src/array/stream_chunk.rs @@ -97,7 +97,7 @@ impl Op { Op::UpdateInsert => "UpdateInsert", Op::UpdateDelete => "UpdateDelete", } - .to_string() + .to_owned() } } diff --git a/src/common/src/cache.rs b/src/common/src/cache.rs index 678a101ae62a6..44119f1a57003 100644 --- a/src/common/src/cache.rs +++ b/src/common/src/cache.rs @@ -1091,7 +1091,7 @@ mod tests { for k in keys { lru = (*lru).next; assert!( - (*lru).is_same_key(&k.to_string()), + (*lru).is_same_key(&k.to_owned()), "compare failed: {} vs {}, get value: {:?}", (*lru).get_key(), k, @@ -1107,10 +1107,10 @@ mod tests { fn lookup(cache: &mut LruCacheShard, key: &str) -> bool { unsafe { - let h = cache.lookup(0, &key.to_string()); + let h = cache.lookup(0, &key.to_owned()); let exist = !h.is_null(); if exist { - assert!((*h).is_same_key(&key.to_string())); + assert!((*h).is_same_key(&key.to_owned())); cache.release(h); } exist @@ -1126,10 +1126,10 @@ mod tests { let mut free_list = vec![]; unsafe { let handle = cache.insert( - key.to_string(), + key.to_owned(), 0, value.len(), - value.to_string(), + value.to_owned(), priority, &mut free_list, ); @@ -1160,7 +1160,7 @@ mod tests { assert!(lookup(&mut cache, "z")); validate_lru_list(&mut cache, vec!["d", "x", "y", "e", "z"]); unsafe { - let h = cache.erase(0, &"x".to_string()); + let h = cache.erase(0, &"x".to_owned()); assert!(h.is_some()); validate_lru_list(&mut cache, vec!["d", "y", "e", "z"]); } @@ -1198,22 +1198,22 @@ mod tests { let mut free_list = vec![]; validate_lru_list(&mut cache, vec!["k1", "k2"]); unsafe { - let h1 = cache.lookup(0, &"k1".to_string()); + let h1 = cache.lookup(0, &"k1".to_owned()); assert!(!h1.is_null()); - let h2 = cache.lookup(0, &"k2".to_string()); + let h2 = cache.lookup(0, &"k2".to_owned()); assert!(!h2.is_null()); let h3 = cache.insert( - "k3".to_string(), + "k3".to_owned(), 0, 2, - "bb".to_string(), + "bb".to_owned(), CachePriority::High, &mut free_list, ); assert_eq!(cache.usage.load(Ordering::Relaxed), 6); assert!(!h3.is_null()); - let h4 = cache.lookup(0, &"k1".to_string()); + let h4 = cache.lookup(0, &"k1".to_owned()); assert!(!h4.is_null()); cache.release(h1); @@ -1234,37 +1234,37 @@ mod tests { let mut to_delete = vec![]; let mut cache = create_cache(5); let insert_handle = cache.insert( - "key".to_string(), + "key".to_owned(), 0, 1, - "old_value".to_string(), + "old_value".to_owned(), CachePriority::High, &mut to_delete, ); - let old_entry = cache.lookup(0, &"key".to_string()); + let old_entry = cache.lookup(0, &"key".to_owned()); assert!(!old_entry.is_null()); - assert_eq!((*old_entry).get_value(), &"old_value".to_string()); + assert_eq!((*old_entry).get_value(), &"old_value".to_owned()); assert_eq!((*old_entry).refs, 2); cache.release(insert_handle); assert_eq!((*old_entry).refs, 1); let insert_handle = cache.insert( - "key".to_string(), + "key".to_owned(), 0, 1, - "new_value".to_string(), + "new_value".to_owned(), CachePriority::Low, &mut to_delete, ); assert!(!(*old_entry).is_in_cache()); - let new_entry = cache.lookup(0, &"key".to_string()); + let new_entry = cache.lookup(0, &"key".to_owned()); assert!(!new_entry.is_null()); - assert_eq!((*new_entry).get_value(), &"new_value".to_string()); + assert_eq!((*new_entry).get_value(), &"new_value".to_owned()); assert_eq!((*new_entry).refs, 2); cache.release(insert_handle); assert_eq!((*new_entry).refs, 1); assert!(!old_entry.is_null()); - assert_eq!((*old_entry).get_value(), &"old_value".to_string()); + assert_eq!((*old_entry).get_value(), &"old_value".to_owned()); assert_eq!((*old_entry).refs, 1); cache.release(new_entry); @@ -1274,7 +1274,7 @@ mod tests { // assert old value unchanged. assert!(!old_entry.is_null()); - assert_eq!((*old_entry).get_value(), &"old_value".to_string()); + assert_eq!((*old_entry).get_value(), &"old_value".to_owned()); assert_eq!((*old_entry).refs, 1); cache.release(old_entry); @@ -1295,31 +1295,31 @@ mod tests { // The cache can only hold one handle let mut cache = create_cache(1); let insert_handle = cache.insert( - "key".to_string(), + "key".to_owned(), 0, 1, - "old_value".to_string(), + "old_value".to_owned(), CachePriority::High, &mut to_delete, ); cache.release(insert_handle); - let old_entry = cache.lookup(0, &"key".to_string()); + let old_entry = cache.lookup(0, &"key".to_owned()); assert!(!old_entry.is_null()); - assert_eq!((*old_entry).get_value(), &"old_value".to_string()); + assert_eq!((*old_entry).get_value(), &"old_value".to_owned()); assert_eq!((*old_entry).refs, 1); let insert_handle = cache.insert( - "key".to_string(), + "key".to_owned(), 0, 1, - "new_value".to_string(), + "new_value".to_owned(), CachePriority::High, &mut to_delete, ); assert!(!(*old_entry).is_in_cache()); - let new_entry = cache.lookup(0, &"key".to_string()); + let new_entry = cache.lookup(0, &"key".to_owned()); assert!(!new_entry.is_null()); - assert_eq!((*new_entry).get_value(), &"new_value".to_string()); + assert_eq!((*new_entry).get_value(), &"new_value".to_owned()); assert_eq!((*new_entry).refs, 2); cache.release(insert_handle); assert_eq!((*new_entry).refs, 1); @@ -1333,9 +1333,9 @@ mod tests { assert_eq!(1, cache.usage.load(Relaxed)); assert_eq!(0, cache.lru_usage.load(Relaxed)); - let new_entry_again = cache.lookup(0, &"key".to_string()); + let new_entry_again = cache.lookup(0, &"key".to_owned()); assert!(!new_entry_again.is_null()); - assert_eq!((*new_entry_again).get_value(), &"new_value".to_string()); + assert_eq!((*new_entry_again).get_value(), &"new_value".to_owned()); assert_eq!((*new_entry_again).refs, 2); cache.release(new_entry); @@ -1355,21 +1355,21 @@ mod tests { assert!(lookup(&mut shard, "a")); } assert!(matches!( - cache.lookup_for_request(0, "a".to_string()), + cache.lookup_for_request(0, "a".to_owned()), LookupResult::Cached(_) )); assert!(matches!( - cache.lookup_for_request(0, "b".to_string()), + cache.lookup_for_request(0, "b".to_owned()), LookupResult::Miss )); - let ret2 = cache.lookup_for_request(0, "b".to_string()); + let ret2 = cache.lookup_for_request(0, "b".to_owned()); match ret2 { LookupResult::WaitPendingRequest(mut recv) => { assert!(matches!(recv.try_recv(), Err(TryRecvError::Empty))); - cache.insert("b".to_string(), 0, 1, "v2".to_string(), CachePriority::Low); + cache.insert("b".to_owned(), 0, 1, "v2".to_owned(), CachePriority::Low); recv.try_recv().unwrap(); assert!( - matches!(cache.lookup_for_request(0, "b".to_string()), LookupResult::Cached(v) if v.eq("v2")) + matches!(cache.lookup_for_request(0, "b".to_owned()), LookupResult::Cached(v) if v.eq("v2")) ); } _ => panic!(), @@ -1396,59 +1396,41 @@ mod tests { let cache = Arc::new(LruCache::with_event_listener(1, 2, 0, listener.clone())); // full-fill cache - let h = cache.insert( - "k1".to_string(), - 0, - 1, - "v1".to_string(), - CachePriority::High, - ); + let h = cache.insert("k1".to_owned(), 0, 1, "v1".to_owned(), CachePriority::High); drop(h); - let h = cache.insert( - "k2".to_string(), - 0, - 1, - "v2".to_string(), - CachePriority::High, - ); + let h = cache.insert("k2".to_owned(), 0, 1, "v2".to_owned(), CachePriority::High); drop(h); assert_eq!(cache.get_memory_usage(), 2); assert!(listener.released.lock().is_empty()); // test evict - let h = cache.insert( - "k3".to_string(), - 0, - 1, - "v3".to_string(), - CachePriority::High, - ); + let h = cache.insert("k3".to_owned(), 0, 1, "v3".to_owned(), CachePriority::High); drop(h); assert_eq!(cache.get_memory_usage(), 2); assert!(listener.released.lock().remove("k1").is_some()); // test erase - cache.erase(0, &"k2".to_string()); + cache.erase(0, &"k2".to_owned()); assert_eq!(cache.get_memory_usage(), 1); assert!(listener.released.lock().remove("k2").is_some()); // test refill - let h = cache.insert("k4".to_string(), 0, 1, "v4".to_string(), CachePriority::Low); + let h = cache.insert("k4".to_owned(), 0, 1, "v4".to_owned(), CachePriority::Low); drop(h); assert_eq!(cache.get_memory_usage(), 2); assert!(listener.released.lock().is_empty()); // test release after full // 1. full-fill cache but not release - let h1 = cache.insert("k5".to_string(), 0, 1, "v5".to_string(), CachePriority::Low); + let h1 = cache.insert("k5".to_owned(), 0, 1, "v5".to_owned(), CachePriority::Low); assert_eq!(cache.get_memory_usage(), 2); assert!(listener.released.lock().remove("k3").is_some()); - let h2 = cache.insert("k6".to_string(), 0, 1, "v6".to_string(), CachePriority::Low); + let h2 = cache.insert("k6".to_owned(), 0, 1, "v6".to_owned(), CachePriority::Low); assert_eq!(cache.get_memory_usage(), 2); assert!(listener.released.lock().remove("k4").is_some()); // 2. insert one more entry after cache is full, cache will be oversized - let h3 = cache.insert("k7".to_string(), 0, 1, "v7".to_string(), CachePriority::Low); + let h3 = cache.insert("k7".to_owned(), 0, 1, "v7".to_owned(), CachePriority::Low); assert_eq!(cache.get_memory_usage(), 3); assert!(listener.released.lock().is_empty()); diff --git a/src/common/src/cast/mod.rs b/src/common/src/cast/mod.rs index 41d3c1c1ceae9..0684e7b394860 100644 --- a/src/common/src/cast/mod.rs +++ b/src/common/src/cast/mod.rs @@ -148,7 +148,7 @@ pub fn parse_bytes_hex(s: &str) -> Result> { let v2 = get_hex(c)?; res.push((v1 << 4) | v2); } - None => return Err("invalid hexadecimal data: odd number of digits".to_string()), + None => return Err("invalid hexadecimal data: odd number of digits".to_owned()), } } @@ -174,12 +174,12 @@ pub fn parse_bytes_traditional(s: &str) -> Result> { } _ => { // one backslash, not followed by another or ### valid octal - return Err("invalid input syntax for type bytea".to_string()); + return Err("invalid input syntax for type bytea".to_owned()); } }, _ => { // one backslash, not followed by another or ### valid octal - return Err("invalid input syntax for type bytea".to_string()); + return Err("invalid input syntax for type bytea".to_owned()); } } } diff --git a/src/common/src/catalog/column.rs b/src/common/src/catalog/column.rs index 514f877b17c13..81b2943a88a75 100644 --- a/src/common/src/catalog/column.rs +++ b/src/common/src/catalog/column.rs @@ -256,9 +256,9 @@ impl ColumnDesc { Self { data_type, column_id: ColumnId::new(column_id), - name: name.to_string(), + name: name.to_owned(), field_descs: vec![], - type_name: "".to_string(), + type_name: "".to_owned(), generated_or_default_column: None, description: None, additional_column: AdditionalColumn { column_type: None }, @@ -278,9 +278,9 @@ impl ColumnDesc { Self { data_type, column_id: ColumnId::new(column_id), - name: name.to_string(), + name: name.to_owned(), field_descs: fields, - type_name: type_name.to_string(), + type_name: type_name.to_owned(), generated_or_default_column: None, description: None, additional_column: AdditionalColumn { column_type: None }, diff --git a/src/common/src/catalog/mod.rs b/src/common/src/catalog/mod.rs index 5556650bd328c..240be79d51a44 100644 --- a/src/common/src/catalog/mod.rs +++ b/src/common/src/catalog/mod.rs @@ -544,10 +544,10 @@ impl ConflictBehavior { pub fn debug_to_string(self) -> String { match self { - ConflictBehavior::NoCheck => "NoCheck".to_string(), - ConflictBehavior::Overwrite => "Overwrite".to_string(), - ConflictBehavior::IgnoreConflict => "IgnoreConflict".to_string(), - ConflictBehavior::DoUpdateIfNotNull => "DoUpdateIfNotNull".to_string(), + ConflictBehavior::NoCheck => "NoCheck".to_owned(), + ConflictBehavior::Overwrite => "Overwrite".to_owned(), + ConflictBehavior::IgnoreConflict => "IgnoreConflict".to_owned(), + ConflictBehavior::DoUpdateIfNotNull => "DoUpdateIfNotNull".to_owned(), } } } @@ -576,8 +576,8 @@ impl Engine { pub fn debug_to_string(self) -> String { match self { - Engine::Hummock => "Hummock".to_string(), - Engine::Iceberg => "Iceberg".to_string(), + Engine::Hummock => "Hummock".to_owned(), + Engine::Iceberg => "Iceberg".to_owned(), } } } diff --git a/src/common/src/catalog/schema.rs b/src/common/src/catalog/schema.rs index 221d8aed50252..0b49bf4da8bf0 100644 --- a/src/common/src/catalog/schema.rs +++ b/src/common/src/catalog/schema.rs @@ -55,7 +55,7 @@ impl Field { pub fn to_prost(&self) -> PbField { PbField { data_type: Some(self.data_type.to_protobuf()), - name: self.name.to_string(), + name: self.name.clone(), } } } diff --git a/src/common/src/catalog/test_utils.rs b/src/common/src/catalog/test_utils.rs index ae87b3a881f84..941c114be3652 100644 --- a/src/common/src/catalog/test_utils.rs +++ b/src/common/src/catalog/test_utils.rs @@ -34,7 +34,7 @@ impl ColumnDescTestExt for ColumnDesc { Self { column_type: Some(data_type), column_id, - name: name.to_string(), + name: name.to_owned(), additional_column: Some(AdditionalColumn { column_type: None }), version: ColumnDescVersion::Pr13707 as i32, ..Default::default() @@ -55,8 +55,8 @@ impl ColumnDescTestExt for ColumnDesc { ..Default::default() }), column_id, - name: name.to_string(), - type_name: type_name.to_string(), + name: name.to_owned(), + type_name: type_name.to_owned(), field_descs: fields, generated_or_default_column: None, description: None, diff --git a/src/common/src/config.rs b/src/common/src/config.rs index bdd05bbeebf9d..1da7efdf008a0 100644 --- a/src/common/src/config.rs +++ b/src/common/src/config.rs @@ -447,7 +447,7 @@ impl Serialize for DefaultParallelism { Int(usize), } match self { - DefaultParallelism::Full => Parallelism::Str("Full".to_string()).serialize(serializer), + DefaultParallelism::Full => Parallelism::Str("Full".to_owned()).serialize(serializer), DefaultParallelism::Default(val) => { Parallelism::Int(val.get() as _).serialize(serializer) } @@ -1395,7 +1395,7 @@ impl SystemConfig { if let Some(hummock_state_store) = state_store.strip_prefix("hummock+") { system_params.backup_storage_url = Some(hummock_state_store.to_owned()); } else { - system_params.backup_storage_url = Some("memory".to_string()); + system_params.backup_storage_url = Some("memory".to_owned()); } tracing::info!("initialize backup_storage_url based on state_store"); } @@ -1857,7 +1857,7 @@ pub mod default { use foyer::{Compression, RecoverMode, RuntimeOptions, TokioRuntimeOptions}; pub fn dir() -> String { - "".to_string() + "".to_owned() } pub fn capacity_mb() -> usize { @@ -1945,7 +1945,7 @@ pub mod default { } pub fn dir() -> String { - "./".to_string() + "./".to_owned() } } @@ -2690,7 +2690,7 @@ mod tests { fn rw_config_to_markdown() -> String { let mut config_rustdocs = BTreeMap::>::new(); - RwConfig::config_docs("".to_string(), &mut config_rustdocs); + RwConfig::config_docs("".to_owned(), &mut config_rustdocs); // Section -> Config Name -> ConfigItemDoc let mut configs: BTreeMap> = config_rustdocs @@ -2703,7 +2703,7 @@ mod tests { name, ConfigItemDoc { desc, - default: "".to_string(), // unset + default: "".to_owned(), // unset }, ) }) @@ -2715,10 +2715,10 @@ mod tests { let toml_doc: BTreeMap = toml::from_str(&toml::to_string(&default_config_for_docs()).unwrap()).unwrap(); toml_doc.into_iter().for_each(|(name, value)| { - set_default_values("".to_string(), name, value, &mut configs); + set_default_values("".to_owned(), name, value, &mut configs); }); - let mut markdown = "# RisingWave System Configurations\n\n".to_string() + let mut markdown = "# RisingWave System Configurations\n\n".to_owned() + "This page is automatically generated by `./risedev generate-example-config`\n"; for (section, configs) in configs { if configs.is_empty() { @@ -2806,7 +2806,7 @@ mod tests { .s3 .developer .retryable_service_error_codes, - vec!["dummy".to_string()] + vec!["dummy".to_owned()] ); } @@ -2852,7 +2852,7 @@ mod tests { .s3 .developer .retryable_service_error_codes, - vec!["dummy".to_string()] + vec!["dummy".to_owned()] ); } } diff --git a/src/common/src/field_generator/mod.rs b/src/common/src/field_generator/mod.rs index 679d60ba1f188..108dd20bf5c22 100644 --- a/src/common/src/field_generator/mod.rs +++ b/src/common/src/field_generator/mod.rs @@ -336,8 +336,8 @@ mod tests { i32_fields.push( FieldGeneratorImpl::with_number_sequence( DataType::Int32, - Some("1".to_string()), - Some("20".to_string()), + Some("1".to_owned()), + Some("20".to_owned()), split_index, split_num, 0, diff --git a/src/common/src/field_generator/numeric.rs b/src/common/src/field_generator/numeric.rs index 684c3589b6b80..51925c4566714 100644 --- a/src/common/src/field_generator/numeric.rs +++ b/src/common/src/field_generator/numeric.rs @@ -203,7 +203,7 @@ mod tests { #[test] fn test_sequence_field_generator() { let mut i16_field = - I16SequenceField::new(Some("5".to_string()), Some("10".to_string()), 0, 1, 0).unwrap(); + I16SequenceField::new(Some("5".to_owned()), Some("10".to_owned()), 0, 1, 0).unwrap(); for i in 5..=10 { assert_eq!(i16_field.generate(), json!(i)); } @@ -211,7 +211,7 @@ mod tests { #[test] fn test_random_field_generator() { let mut i64_field = - I64RandomField::new(Some("5".to_string()), Some("10".to_string()), 114).unwrap(); + I64RandomField::new(Some("5".to_owned()), Some("10".to_owned()), 114).unwrap(); for i in 0..100 { let res = i64_field.generate(i as u64); assert!(res.is_number()); @@ -231,7 +231,7 @@ mod tests { #[test] fn test_sequence_datum_generator() { let mut f32_field = - F32SequenceField::new(Some("5.0".to_string()), Some("10.0".to_string()), 0, 1, 0) + F32SequenceField::new(Some("5.0".to_owned()), Some("10.0".to_owned()), 0, 1, 0) .unwrap(); for i in 5..=10 { @@ -244,7 +244,7 @@ mod tests { #[test] fn test_random_datum_generator() { let mut i32_field = - I32RandomField::new(Some("-5".to_string()), Some("5".to_string()), 123).unwrap(); + I32RandomField::new(Some("-5".to_owned()), Some("5".to_owned()), 123).unwrap(); let (lower, upper) = ((-5).to_scalar_value(), 5.to_scalar_value()); for i in 0..100 { let res = i32_field.generate_datum(i as u64); @@ -257,13 +257,13 @@ mod tests { #[test] fn test_sequence_field_generator_float() { let mut f64_field = - F64SequenceField::new(Some("0".to_string()), Some("10".to_string()), 0, 1, 0).unwrap(); + F64SequenceField::new(Some("0".to_owned()), Some("10".to_owned()), 0, 1, 0).unwrap(); for i in 0..=10 { assert_eq!(f64_field.generate(), json!(i as f64)); } let mut f32_field = - F32SequenceField::new(Some("-5".to_string()), Some("5".to_string()), 0, 1, 0).unwrap(); + F32SequenceField::new(Some("-5".to_owned()), Some("5".to_owned()), 0, 1, 0).unwrap(); for i in -5..=5 { assert_eq!(f32_field.generate(), json!(i as f32)); } @@ -272,7 +272,7 @@ mod tests { #[test] fn test_random_field_generator_float() { let mut f64_field = - F64RandomField::new(Some("5".to_string()), Some("10".to_string()), 114).unwrap(); + F64RandomField::new(Some("5".to_owned()), Some("10".to_owned()), 114).unwrap(); for i in 0..100 { let res = f64_field.generate(i as u64); assert!(res.is_number()); @@ -290,7 +290,7 @@ mod tests { } let mut f32_field = - F32RandomField::new(Some("5".to_string()), Some("10".to_string()), 114).unwrap(); + F32RandomField::new(Some("5".to_owned()), Some("10".to_owned()), 114).unwrap(); for i in 0..100 { let res = f32_field.generate(i as u64); assert!(res.is_number()); diff --git a/src/common/src/field_generator/varchar.rs b/src/common/src/field_generator/varchar.rs index 9f68bcb287c46..eb56987fc7ecd 100644 --- a/src/common/src/field_generator/varchar.rs +++ b/src/common/src/field_generator/varchar.rs @@ -94,7 +94,7 @@ impl VarcharConstant { pub fn generate_datum() -> Datum { Some( Self::CONSTANT_STRING - .to_string() + .to_owned() .into_boxed_str() .to_scalar_value(), ) diff --git a/src/common/src/row/owned_row.rs b/src/common/src/row/owned_row.rs index 15a01d192a64c..05d5c0ebadc28 100644 --- a/src/common/src/row/owned_row.rs +++ b/src/common/src/row/owned_row.rs @@ -76,7 +76,7 @@ impl OwnedRow { DataType::Int64 => x.parse::().unwrap().into(), DataType::Float32 => x.parse::().unwrap().into(), DataType::Float64 => x.parse::().unwrap().into(), - DataType::Varchar => x.to_string().into(), + DataType::Varchar => x.to_owned().into(), DataType::Boolean => x.parse::().unwrap().into(), DataType::Date => x.parse::().unwrap().into(), DataType::Time => x.parse::