From 561fd105a7144794515912369154052f784c48e4 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Mon, 18 Mar 2024 14:05:22 +0200 Subject: [PATCH] Rename `single_disk_farm` to `farm` and `disk_farm_index` to `farm_index` in many places, no other code changes --- .../src/bin/subspace-farmer/commands/farm.rs | 275 ++++++++---------- .../src/bin/subspace-farmer/commands/info.rs | 6 +- .../src/bin/subspace-farmer/commands/scrub.rs | 4 +- .../bin/subspace-farmer/commands/shared.rs | 4 +- crates/subspace-farmer/src/farmer_cache.rs | 30 +- .../subspace-farmer/src/single_disk_farm.rs | 10 +- .../src/utils/plotted_pieces.rs | 14 +- 7 files changed, 157 insertions(+), 186 deletions(-) diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs index 10f1fbb4c7..e5b785dd0d 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs @@ -650,7 +650,7 @@ where .map(|farming_thread_pool_size| farming_thread_pool_size.get()) .unwrap_or_else(recommended_number_of_farming_threads); - let (single_disk_farms, plotting_delay_senders) = { + let (farms, plotting_delay_senders) = { let node_rpc_url = &node_rpc_url; let global_mutex = Arc::default(); let info_mutex = &AsyncMutex::new(()); @@ -661,12 +661,12 @@ where .map(|_| oneshot::channel()) .unzip::<_, _, Vec<_>, Vec<_>>(); - let mut single_disk_farms = Vec::with_capacity(disk_farms.len()); - let mut single_disk_farms_stream = disk_farms + let mut farms = Vec::with_capacity(disk_farms.len()); + let mut farms_stream = disk_farms .into_iter() .zip(plotting_delay_receivers) .enumerate() - .map(|(disk_farm_index, (disk_farm, plotting_delay_receiver))| { + .map(|(farm_index, (disk_farm, plotting_delay_receiver))| { let farmer_app_info = farmer_app_info.clone(); let kzg = kzg.clone(); let erasure_coding = erasure_coding.clone(); @@ -684,11 +684,11 @@ where let node_client = match NodeRpcClient::new(node_rpc_url).await { Ok(node_client) => node_client, Err(error) => { - return (disk_farm_index, Err(error.into())); + return (farm_index, Err(error.into())); } }; - let single_disk_farm_fut = SingleDiskFarm::new::<_, _, PosTable>( + let farm_fut = SingleDiskFarm::new::<_, _, PosTable>( SingleDiskFarmOptions { directory: disk_farm.directory.clone(), farmer_app_info, @@ -711,17 +711,17 @@ where faster_read_sector_record_chunks_mode_barrier, faster_read_sector_record_chunks_mode_concurrency, }, - disk_farm_index, + farm_index, ); - let single_disk_farm = match single_disk_farm_fut.await { - Ok(single_disk_farm) => single_disk_farm, + let farm = match farm_fut.await { + Ok(farm) => farm, Err(SingleDiskFarmError::InsufficientAllocatedSpace { min_space, allocated_space, }) => { return ( - disk_farm_index, + farm_index, Err(anyhow::anyhow!( "Allocated space {} ({}) is not enough, minimum is ~{} (~{}, \ {} bytes to be exact)", @@ -734,15 +734,15 @@ where ); } Err(error) => { - return (disk_farm_index, Err(error.into())); + return (farm_index, Err(error.into())); } }; if !no_info { let _info_guard = info_mutex.lock().await; - let info = single_disk_farm.info(); - info!("Single disk farm {disk_farm_index}:"); + let info = farm.info(); + info!("Farm {farm_index}:"); info!(" ID: {}", info.id()); info!(" Genesis hash: 0x{}", hex::encode(info.genesis_hash())); info!(" Public key: 0x{}", hex::encode(info.public_key())); @@ -754,33 +754,31 @@ where info!(" Directory: {}", disk_farm.directory.display()); } - (disk_farm_index, Ok(single_disk_farm)) + (farm_index, Ok(Box::new(farm) as Box)) } - .instrument(info_span!("", %disk_farm_index)) + .instrument(info_span!("", %farm_index)) }) .collect::>(); - while let Some((disk_farm_index, single_disk_farm)) = single_disk_farms_stream.next().await - { - if let Err(error) = &single_disk_farm { - let span = info_span!("", %disk_farm_index); + while let Some((farm_index, farm)) = farms_stream.next().await { + if let Err(error) = &farm { + let span = info_span!("", %farm_index); let _span_guard = span.enter(); - error!(%error, "Single disk creation failed"); + error!(%error, "Farm creation failed"); } - single_disk_farms.push((disk_farm_index, single_disk_farm?)); + farms.push((farm_index, farm?)); } // Restore order after unordered initialization - single_disk_farms - .sort_unstable_by_key(|(disk_farm_index, _single_disk_farm)| *disk_farm_index); + farms.sort_unstable_by_key(|(farm_index, _farm)| *farm_index); - let single_disk_farms = single_disk_farms + let farms = farms .into_iter() - .map(|(_disk_farm_index, single_disk_farm)| Box::new(single_disk_farm) as Box) + .map(|(_farm_index, farm)| farm) .collect::>(); - (single_disk_farms, plotting_delay_senders) + (farms, plotting_delay_senders) }; { @@ -806,15 +804,9 @@ where } farmer_cache .replace_backing_caches( - single_disk_farms - .iter() - .map(|single_disk_farm| single_disk_farm.piece_cache()) - .collect(), + farms.iter().map(|farm| farm.piece_cache()).collect(), if plot_cache { - single_disk_farms - .iter() - .map(|single_disk_farm| single_disk_farm.plot_cache()) - .collect() + farms.iter().map(|farm| farm.plot_cache()).collect() } else { Vec::new() }, @@ -823,9 +815,9 @@ where drop(farmer_cache); // Store piece readers so we can reference them later - let piece_readers = single_disk_farms + let piece_readers = farms .iter() - .map(|single_disk_farm| single_disk_farm.piece_reader()) + .map(|farm| farm.piece_reader()) .collect::>(); info!("Collecting already plotted pieces (this will take some time)..."); @@ -834,8 +826,8 @@ where { let mut future_plotted_pieces = PlottedPieces::new(piece_readers); - for (disk_farm_index, single_disk_farm) in single_disk_farms.iter().enumerate() { - let disk_farm_index = disk_farm_index.try_into().map_err(|_error| { + for (farm_index, farm) in farms.iter().enumerate() { + let farm_index = farm_index.try_into().map_err(|_error| { anyhow!( "More than 256 plots are not supported, consider running multiple farmer \ instances" @@ -843,17 +835,17 @@ where })?; for (sector_index, mut plotted_sectors) in - (0 as SectorIndex..).zip(single_disk_farm.plotted_sectors().await) + (0 as SectorIndex..).zip(farm.plotted_sectors().await) { while let Some(plotted_sector_result) = plotted_sectors.next().await { match plotted_sector_result { Ok(plotted_sector) => { - future_plotted_pieces.add_sector(disk_farm_index, &plotted_sector); + future_plotted_pieces.add_sector(farm_index, &plotted_sector); } Err(error) => { error!( %error, - %disk_farm_index, + %farm_index, %sector_index, "Failed reading plotted sector on startup, skipping" ); @@ -868,21 +860,17 @@ where info!("Finished collecting already plotted pieces successfully"); - let total_and_plotted_sectors = single_disk_farms + let total_and_plotted_sectors = farms .iter() .enumerate() - .map(|(disk_farm_index, single_disk_farm)| async move { - let total_sector_count = single_disk_farm.total_sectors_count(); - let plotted_sectors_count = - single_disk_farm - .plotted_sectors_count() - .await - .map_err(|error| { - anyhow!( - "Failed to get plotted sectors count from from index \ - {disk_farm_index}: {error}" - ) - })?; + .map(|(farm_index, farm)| async move { + let total_sector_count = farm.total_sectors_count(); + let plotted_sectors_count = farm.plotted_sectors_count().await.map_err(|error| { + anyhow!( + "Failed to get plotted sectors count from from index {farm_index}: \ + {error}" + ) + })?; anyhow::Ok((total_sector_count, plotted_sectors_count)) }) @@ -890,16 +878,12 @@ where .try_collect::>() .await?; - let mut single_disk_farms_stream = single_disk_farms - .into_iter() - .enumerate() + let mut farms_stream = (0u8..) + .zip(farms) .zip(total_and_plotted_sectors) - .map(|((disk_farm_index, single_disk_farm), sector_counts)| { - let disk_farm_index = disk_farm_index.try_into().expect( - "More than 256 plots are not supported, this is checked above already; qed", - ); + .map(|((farm_index, farm), sector_counts)| { let plotted_pieces = Arc::clone(&plotted_pieces); - let span = info_span!("", %disk_farm_index); + let span = info_span!("", %farm_index); // Collect newly plotted pieces let on_plotted_sector_callback = @@ -914,111 +898,98 @@ where .expect("Initial value was populated above; qed"); if let Some(old_plotted_sector) = &maybe_old_plotted_sector { - plotted_pieces.delete_sector(disk_farm_index, old_plotted_sector); + plotted_pieces.delete_sector(farm_index, old_plotted_sector); } - plotted_pieces.add_sector(disk_farm_index, plotted_sector); + plotted_pieces.add_sector(farm_index, plotted_sector); } }; let (total_sector_count, plotted_sectors_count) = sector_counts; farmer_metrics.update_sectors_total( - single_disk_farm.id(), + farm.id(), total_sector_count - plotted_sectors_count, SectorState::NotPlotted, ); farmer_metrics.update_sectors_total( - single_disk_farm.id(), + farm.id(), plotted_sectors_count, SectorState::Plotted, ); - single_disk_farm - .on_sector_update(Arc::new({ - let single_disk_farm_id = *single_disk_farm.id(); - let farmer_metrics = farmer_metrics.clone(); - - move |(_sector_index, sector_state)| match sector_state { - SectorUpdate::Plotting(SectorPlottingDetails::Starting { .. }) => { - farmer_metrics.sector_plotting.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Downloading) => { - farmer_metrics.sector_downloading.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Downloaded(time)) => { - farmer_metrics - .observe_sector_downloading_time(&single_disk_farm_id, time); - farmer_metrics.sector_downloaded.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Encoding) => { - farmer_metrics.sector_encoding.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Encoded(time)) => { - farmer_metrics.observe_sector_encoding_time(&single_disk_farm_id, time); - farmer_metrics.sector_encoded.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Writing) => { - farmer_metrics.sector_writing.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Written(time)) => { - farmer_metrics.observe_sector_writing_time(&single_disk_farm_id, time); - farmer_metrics.sector_written.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Finished { - plotted_sector, - old_plotted_sector, - time, - }) => { - on_plotted_sector_callback(plotted_sector, old_plotted_sector); - farmer_metrics.observe_sector_plotting_time(&single_disk_farm_id, time); - farmer_metrics.sector_plotted.inc(); - farmer_metrics - .update_sector_state(&single_disk_farm_id, SectorState::Plotted); - } - SectorUpdate::Expiration(SectorExpirationDetails::AboutToExpire) => { - farmer_metrics.update_sector_state( - &single_disk_farm_id, - SectorState::AboutToExpire, - ); - } - SectorUpdate::Expiration(SectorExpirationDetails::Expired) => { - farmer_metrics - .update_sector_state(&single_disk_farm_id, SectorState::Expired); - } - SectorUpdate::Expiration(SectorExpirationDetails::Determined { - .. - }) => { - // Not interested in here - } + farm.on_sector_update(Arc::new({ + let farm_id = *farm.id(); + let farmer_metrics = farmer_metrics.clone(); + + move |(_sector_index, sector_state)| match sector_state { + SectorUpdate::Plotting(SectorPlottingDetails::Starting { .. }) => { + farmer_metrics.sector_plotting.inc(); } - })) - .detach(); - - single_disk_farm - .on_farming_notification(Arc::new({ - let single_disk_farm_id = *single_disk_farm.id(); - let farmer_metrics = farmer_metrics.clone(); - - move |farming_notification| match farming_notification { - FarmingNotification::Auditing(auditing_details) => { - farmer_metrics.observe_auditing_time( - &single_disk_farm_id, - &auditing_details.time, - ); - } - FarmingNotification::Proving(proving_details) => { - farmer_metrics.observe_proving_time( - &single_disk_farm_id, - &proving_details.time, - proving_details.result, - ); - } - FarmingNotification::NonFatalError(error) => { - farmer_metrics.note_farming_error(&single_disk_farm_id, error); - } + SectorUpdate::Plotting(SectorPlottingDetails::Downloading) => { + farmer_metrics.sector_downloading.inc(); + } + SectorUpdate::Plotting(SectorPlottingDetails::Downloaded(time)) => { + farmer_metrics.observe_sector_downloading_time(&farm_id, time); + farmer_metrics.sector_downloaded.inc(); + } + SectorUpdate::Plotting(SectorPlottingDetails::Encoding) => { + farmer_metrics.sector_encoding.inc(); + } + SectorUpdate::Plotting(SectorPlottingDetails::Encoded(time)) => { + farmer_metrics.observe_sector_encoding_time(&farm_id, time); + farmer_metrics.sector_encoded.inc(); + } + SectorUpdate::Plotting(SectorPlottingDetails::Writing) => { + farmer_metrics.sector_writing.inc(); + } + SectorUpdate::Plotting(SectorPlottingDetails::Written(time)) => { + farmer_metrics.observe_sector_writing_time(&farm_id, time); + farmer_metrics.sector_written.inc(); + } + SectorUpdate::Plotting(SectorPlottingDetails::Finished { + plotted_sector, + old_plotted_sector, + time, + }) => { + on_plotted_sector_callback(plotted_sector, old_plotted_sector); + farmer_metrics.observe_sector_plotting_time(&farm_id, time); + farmer_metrics.sector_plotted.inc(); + farmer_metrics.update_sector_state(&farm_id, SectorState::Plotted); } - })) - .detach(); + SectorUpdate::Expiration(SectorExpirationDetails::AboutToExpire) => { + farmer_metrics.update_sector_state(&farm_id, SectorState::AboutToExpire); + } + SectorUpdate::Expiration(SectorExpirationDetails::Expired) => { + farmer_metrics.update_sector_state(&farm_id, SectorState::Expired); + } + SectorUpdate::Expiration(SectorExpirationDetails::Determined { .. }) => { + // Not interested in here + } + } + })) + .detach(); + + farm.on_farming_notification(Arc::new({ + let farm_id = *farm.id(); + let farmer_metrics = farmer_metrics.clone(); + + move |farming_notification| match farming_notification { + FarmingNotification::Auditing(auditing_details) => { + farmer_metrics.observe_auditing_time(&farm_id, &auditing_details.time); + } + FarmingNotification::Proving(proving_details) => { + farmer_metrics.observe_proving_time( + &farm_id, + &proving_details.time, + proving_details.result, + ); + } + FarmingNotification::NonFatalError(error) => { + farmer_metrics.note_farming_error(&farm_id, error); + } + } + })) + .detach(); - single_disk_farm.run() + farm.run() }) .collect::>(); @@ -1028,7 +999,7 @@ where let farm_fut = run_future_in_dedicated_thread( move || async move { - while let Some(result) = single_disk_farms_stream.next().await { + while let Some(result) = farms_stream.next().await { let id = result?; info!(%id, "Farm exited successfully"); diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/info.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/info.rs index 149bd453ed..2eb3d4b390 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/info.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/info.rs @@ -2,11 +2,11 @@ use crate::commands::shared::print_disk_farm_info; use std::path::PathBuf; pub(crate) fn info(disk_farms: Vec) { - for (disk_farm_index, disk_farm) in disk_farms.into_iter().enumerate() { - if disk_farm_index > 0 { + for (farm_index, disk_farm) in disk_farms.into_iter().enumerate() { + if farm_index > 0 { println!(); } - print_disk_farm_info(disk_farm, disk_farm_index); + print_disk_farm_info(disk_farm, farm_index); } } diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/scrub.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/scrub.rs index 8af94c906c..186e8851de 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/scrub.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/scrub.rs @@ -7,8 +7,8 @@ pub(crate) fn scrub(disk_farms: &[PathBuf], disable_farm_locking: bool) { disk_farms .into_par_iter() .enumerate() - .for_each(|(disk_farm_index, directory)| { - let span = info_span!("", %disk_farm_index); + .for_each(|(farm_index, directory)| { + let span = info_span!("", %farm_index); let _span_guard = span.enter(); info!( path = %directory.display(), diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/shared.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/shared.rs index c135d394d4..422c13099d 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/shared.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/shared.rs @@ -1,8 +1,8 @@ use std::path::PathBuf; use subspace_farmer::single_disk_farm::{SingleDiskFarm, SingleDiskFarmSummary}; -pub(crate) fn print_disk_farm_info(directory: PathBuf, disk_farm_index: usize) { - println!("Single disk farm {disk_farm_index}:"); +pub(crate) fn print_disk_farm_info(directory: PathBuf, farm_index: usize) { + println!("Single disk farm {farm_index}:"); match SingleDiskFarm::collect_summary(directory) { SingleDiskFarmSummary::Found { info, directory } => { println!(" ID: {}", info.id()); diff --git a/crates/subspace-farmer/src/farmer_cache.rs b/crates/subspace-farmer/src/farmer_cache.rs index 60e2799bc6..2fa2ce0d0f 100644 --- a/crates/subspace-farmer/src/farmer_cache.rs +++ b/crates/subspace-farmer/src/farmer_cache.rs @@ -168,7 +168,7 @@ where WorkerCommand::ForgetKey { key } => { let mut caches = self.caches.write().await; - for (disk_farm_index, cache) in caches.iter_mut().enumerate() { + for (farm_index, cache) in caches.iter_mut().enumerate() { let Some(offset) = cache.stored_pieces.remove(&key) else { // Not this disk farm continue; @@ -182,7 +182,7 @@ where } Ok(None) => { warn!( - %disk_farm_index, + %farm_index, %offset, "Piece index out of range, this is likely an implementation bug, \ not freeing heap element" @@ -191,7 +191,7 @@ where Err(error) => { error!( %error, - %disk_farm_index, + %farm_index, ?key, %offset, "Error while reading piece from cache, might be a disk corruption" @@ -435,7 +435,7 @@ where // populated first sorted_caches.sort_by_key(|(_, cache)| cache.stored_pieces.len()); if !stream::iter(sorted_caches) - .any(|(disk_farm_index, cache)| async move { + .any(|(farm_index, cache)| async move { let Some(offset) = cache.free_offsets.pop_front() else { return false; }; @@ -444,7 +444,7 @@ where { error!( %error, - %disk_farm_index, + %farm_index, %piece_index, %offset, "Failed to write piece into cache" @@ -674,7 +674,7 @@ where match worker_state.heap.insert(heap_key) { // Entry is already occupied, we need to find and replace old piece with new one Some(KeyWrapper(old_piece_index)) => { - for (disk_farm_index, cache) in caches.iter_mut().enumerate() { + for (farm_index, cache) in caches.iter_mut().enumerate() { let old_record_key = RecordKey::from(old_piece_index.to_multihash()); let Some(offset) = cache.stored_pieces.remove(&old_record_key) else { // Not this disk farm @@ -685,14 +685,14 @@ where { error!( %error, - %disk_farm_index, + %farm_index, %piece_index, %offset, "Failed to write piece into cache" ); } else { trace!( - %disk_farm_index, + %farm_index, %old_piece_index, %piece_index, %offset, @@ -716,7 +716,7 @@ where // Sort piece caches by number of stored pieces to fill those that are less // populated first sorted_caches.sort_by_key(|(_, cache)| cache.stored_pieces.len()); - for (disk_farm_index, cache) in sorted_caches { + for (farm_index, cache) in sorted_caches { let Some(offset) = cache.free_offsets.pop_front() else { // Not this disk farm continue; @@ -726,14 +726,14 @@ where { error!( %error, - %disk_farm_index, + %farm_index, %piece_index, %offset, "Failed to write piece into cache" ); } else { trace!( - %disk_farm_index, + %farm_index, %piece_index, %offset, "Successfully stored piece in cache" @@ -802,7 +802,7 @@ impl FarmerCache { /// Get piece from cache pub async fn get_piece(&self, key: RecordKey) -> Option { - for (disk_farm_index, cache) in self.piece_caches.read().await.iter().enumerate() { + for (farm_index, cache) in self.piece_caches.read().await.iter().enumerate() { let Some(&offset) = cache.stored_pieces.get(&key) else { continue; }; @@ -813,7 +813,7 @@ impl FarmerCache { Err(error) => { error!( %error, - %disk_farm_index, + %farm_index, ?key, %offset, "Error while reading piece from cache, might be a disk corruption" @@ -853,7 +853,7 @@ impl FarmerCache { } let mut should_store = false; - for (disk_farm_index, cache) in self.plot_caches.read().await.iter().enumerate() { + for (farm_index, cache) in self.plot_caches.read().await.iter().enumerate() { match cache.is_piece_maybe_stored(&key).await { Ok(MaybePieceStoredResult::No) => { // Try another one if there is any @@ -868,7 +868,7 @@ impl FarmerCache { } Err(error) => { warn!( - %disk_farm_index, + %farm_index, %piece_index, %error, "Failed to check piece stored in cache" diff --git a/crates/subspace-farmer/src/single_disk_farm.rs b/crates/subspace-farmer/src/single_disk_farm.rs index e7cbfe8d91..39a15eb733 100644 --- a/crates/subspace-farmer/src/single_disk_farm.rs +++ b/crates/subspace-farmer/src/single_disk_farm.rs @@ -683,7 +683,7 @@ impl SingleDiskFarm { /// Create new single disk farm instance pub async fn new( options: SingleDiskFarmOptions, - disk_farm_index: usize, + farm_index: usize, ) -> Result where NC: NodeClient, @@ -772,7 +772,7 @@ impl SingleDiskFarm { }; let farming_thread_pool = ThreadPoolBuilder::new() - .thread_name(move |thread_index| format!("farming-{disk_farm_index}.{thread_index}")) + .thread_name(move |thread_index| format!("farming-{farm_index}.{thread_index}")) .num_threads(farming_thread_pool_size) .spawn_handler(tokio_rayon_spawn_handler()) .build() @@ -909,7 +909,7 @@ impl SingleDiskFarm { // Panic will already be printed by now plotting_join_handle.await.map_err(|_error| { BackgroundTaskError::BackgroundTaskPanicked { - task: format!("plotting-{disk_farm_index}"), + task: format!("plotting-{farm_index}"), } }) })); @@ -1014,7 +1014,7 @@ impl SingleDiskFarm { // Panic will already be printed by now farming_join_handle.await.map_err(|_error| { BackgroundTaskError::BackgroundTaskPanicked { - task: format!("farming-{disk_farm_index}"), + task: format!("farming-{farm_index}"), } }) })); @@ -1053,7 +1053,7 @@ impl SingleDiskFarm { // Panic will already be printed by now reading_join_handle.await.map_err(|_error| { BackgroundTaskError::BackgroundTaskPanicked { - task: format!("reading-{disk_farm_index}"), + task: format!("reading-{farm_index}"), } }) })); diff --git a/crates/subspace-farmer/src/utils/plotted_pieces.rs b/crates/subspace-farmer/src/utils/plotted_pieces.rs index fdcf107739..c88fab264c 100644 --- a/crates/subspace-farmer/src/utils/plotted_pieces.rs +++ b/crates/subspace-farmer/src/utils/plotted_pieces.rs @@ -10,7 +10,7 @@ use tracing::{trace, warn}; #[derive(Debug, Copy, Clone, Eq, PartialEq)] struct PieceDetails { - disk_farm_index: u8, + farm_index: u8, sector_index: SectorIndex, piece_offset: PieceOffset, } @@ -57,7 +57,7 @@ impl PlottedPieces { return None; } }; - let reader = match self.readers.get(usize::from(piece_details.disk_farm_index)) { + let reader = match self.readers.get(usize::from(piece_details.farm_index)) { Some(reader) => reader.clone(), None => { warn!(?piece_index, ?piece_details, "Plot offset is invalid"); @@ -73,7 +73,7 @@ impl PlottedPieces { warn!( %error, %piece_index, - disk_farm_index = piece_details.disk_farm_index, + farm_index = piece_details.farm_index, sector_index = piece_details.sector_index, "Failed to retrieve piece" ); @@ -83,12 +83,12 @@ impl PlottedPieces { } /// Add new sector to collect plotted pieces - pub fn add_sector(&mut self, disk_farm_index: u8, plotted_sector: &PlottedSector) { + pub fn add_sector(&mut self, farm_index: u8, plotted_sector: &PlottedSector) { for (piece_offset, &piece_index) in (PieceOffset::ZERO..).zip(plotted_sector.piece_indexes.iter()) { let piece_details = PieceDetails { - disk_farm_index, + farm_index, sector_index: plotted_sector.sector_index, piece_offset, }; @@ -105,12 +105,12 @@ impl PlottedPieces { } /// Add old sector from plotted pieces (happens on replotting) - pub fn delete_sector(&mut self, disk_farm_index: u8, plotted_sector: &PlottedSector) { + pub fn delete_sector(&mut self, farm_index: u8, plotted_sector: &PlottedSector) { for (piece_offset, &piece_index) in (PieceOffset::ZERO..).zip(plotted_sector.piece_indexes.iter()) { let searching_piece_details = PieceDetails { - disk_farm_index, + farm_index, sector_index: plotted_sector.sector_index, piece_offset, };