Skip to content

Commit

Permalink
Rename single_disk_farm to farm and disk_farm_index to `farm_in…
Browse files Browse the repository at this point in the history
…dex` in many places, no other code changes
  • Loading branch information
nazar-pc committed Mar 18, 2024
1 parent 90972e1 commit 561fd10
Show file tree
Hide file tree
Showing 7 changed files with 157 additions and 186 deletions.
275 changes: 123 additions & 152 deletions crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@ use crate::commands::shared::print_disk_farm_info;
use std::path::PathBuf;

pub(crate) fn info(disk_farms: Vec<PathBuf>) {
for (disk_farm_index, disk_farm) in disk_farms.into_iter().enumerate() {
if disk_farm_index > 0 {
for (farm_index, disk_farm) in disk_farms.into_iter().enumerate() {
if farm_index > 0 {
println!();
}

print_disk_farm_info(disk_farm, disk_farm_index);
print_disk_farm_info(disk_farm, farm_index);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ pub(crate) fn scrub(disk_farms: &[PathBuf], disable_farm_locking: bool) {
disk_farms
.into_par_iter()
.enumerate()
.for_each(|(disk_farm_index, directory)| {
let span = info_span!("", %disk_farm_index);
.for_each(|(farm_index, directory)| {
let span = info_span!("", %farm_index);
let _span_guard = span.enter();
info!(
path = %directory.display(),
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
use std::path::PathBuf;
use subspace_farmer::single_disk_farm::{SingleDiskFarm, SingleDiskFarmSummary};

pub(crate) fn print_disk_farm_info(directory: PathBuf, disk_farm_index: usize) {
println!("Single disk farm {disk_farm_index}:");
pub(crate) fn print_disk_farm_info(directory: PathBuf, farm_index: usize) {
println!("Single disk farm {farm_index}:");
match SingleDiskFarm::collect_summary(directory) {
SingleDiskFarmSummary::Found { info, directory } => {
println!(" ID: {}", info.id());
Expand Down
30 changes: 15 additions & 15 deletions crates/subspace-farmer/src/farmer_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ where
WorkerCommand::ForgetKey { key } => {
let mut caches = self.caches.write().await;

for (disk_farm_index, cache) in caches.iter_mut().enumerate() {
for (farm_index, cache) in caches.iter_mut().enumerate() {
let Some(offset) = cache.stored_pieces.remove(&key) else {
// Not this disk farm
continue;
Expand All @@ -182,7 +182,7 @@ where
}
Ok(None) => {
warn!(
%disk_farm_index,
%farm_index,
%offset,
"Piece index out of range, this is likely an implementation bug, \
not freeing heap element"
Expand All @@ -191,7 +191,7 @@ where
Err(error) => {
error!(
%error,
%disk_farm_index,
%farm_index,
?key,
%offset,
"Error while reading piece from cache, might be a disk corruption"
Expand Down Expand Up @@ -435,7 +435,7 @@ where
// populated first
sorted_caches.sort_by_key(|(_, cache)| cache.stored_pieces.len());
if !stream::iter(sorted_caches)
.any(|(disk_farm_index, cache)| async move {
.any(|(farm_index, cache)| async move {
let Some(offset) = cache.free_offsets.pop_front() else {
return false;
};
Expand All @@ -444,7 +444,7 @@ where
{
error!(
%error,
%disk_farm_index,
%farm_index,
%piece_index,
%offset,
"Failed to write piece into cache"
Expand Down Expand Up @@ -674,7 +674,7 @@ where
match worker_state.heap.insert(heap_key) {
// Entry is already occupied, we need to find and replace old piece with new one
Some(KeyWrapper(old_piece_index)) => {
for (disk_farm_index, cache) in caches.iter_mut().enumerate() {
for (farm_index, cache) in caches.iter_mut().enumerate() {
let old_record_key = RecordKey::from(old_piece_index.to_multihash());
let Some(offset) = cache.stored_pieces.remove(&old_record_key) else {
// Not this disk farm
Expand All @@ -685,14 +685,14 @@ where
{
error!(
%error,
%disk_farm_index,
%farm_index,
%piece_index,
%offset,
"Failed to write piece into cache"
);
} else {
trace!(
%disk_farm_index,
%farm_index,
%old_piece_index,
%piece_index,
%offset,
Expand All @@ -716,7 +716,7 @@ where
// Sort piece caches by number of stored pieces to fill those that are less
// populated first
sorted_caches.sort_by_key(|(_, cache)| cache.stored_pieces.len());
for (disk_farm_index, cache) in sorted_caches {
for (farm_index, cache) in sorted_caches {
let Some(offset) = cache.free_offsets.pop_front() else {
// Not this disk farm
continue;
Expand All @@ -726,14 +726,14 @@ where
{
error!(
%error,
%disk_farm_index,
%farm_index,
%piece_index,
%offset,
"Failed to write piece into cache"
);
} else {
trace!(
%disk_farm_index,
%farm_index,
%piece_index,
%offset,
"Successfully stored piece in cache"
Expand Down Expand Up @@ -802,7 +802,7 @@ impl FarmerCache {

/// Get piece from cache
pub async fn get_piece(&self, key: RecordKey) -> Option<Piece> {
for (disk_farm_index, cache) in self.piece_caches.read().await.iter().enumerate() {
for (farm_index, cache) in self.piece_caches.read().await.iter().enumerate() {
let Some(&offset) = cache.stored_pieces.get(&key) else {
continue;
};
Expand All @@ -813,7 +813,7 @@ impl FarmerCache {
Err(error) => {
error!(
%error,
%disk_farm_index,
%farm_index,
?key,
%offset,
"Error while reading piece from cache, might be a disk corruption"
Expand Down Expand Up @@ -853,7 +853,7 @@ impl FarmerCache {
}

let mut should_store = false;
for (disk_farm_index, cache) in self.plot_caches.read().await.iter().enumerate() {
for (farm_index, cache) in self.plot_caches.read().await.iter().enumerate() {
match cache.is_piece_maybe_stored(&key).await {
Ok(MaybePieceStoredResult::No) => {
// Try another one if there is any
Expand All @@ -868,7 +868,7 @@ impl FarmerCache {
}
Err(error) => {
warn!(
%disk_farm_index,
%farm_index,
%piece_index,
%error,
"Failed to check piece stored in cache"
Expand Down
10 changes: 5 additions & 5 deletions crates/subspace-farmer/src/single_disk_farm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -683,7 +683,7 @@ impl SingleDiskFarm {
/// Create new single disk farm instance
pub async fn new<NC, PG, PosTable>(
options: SingleDiskFarmOptions<NC, PG>,
disk_farm_index: usize,
farm_index: usize,
) -> Result<Self, SingleDiskFarmError>
where
NC: NodeClient,
Expand Down Expand Up @@ -772,7 +772,7 @@ impl SingleDiskFarm {
};

let farming_thread_pool = ThreadPoolBuilder::new()
.thread_name(move |thread_index| format!("farming-{disk_farm_index}.{thread_index}"))
.thread_name(move |thread_index| format!("farming-{farm_index}.{thread_index}"))
.num_threads(farming_thread_pool_size)
.spawn_handler(tokio_rayon_spawn_handler())
.build()
Expand Down Expand Up @@ -909,7 +909,7 @@ impl SingleDiskFarm {
// Panic will already be printed by now
plotting_join_handle.await.map_err(|_error| {
BackgroundTaskError::BackgroundTaskPanicked {
task: format!("plotting-{disk_farm_index}"),
task: format!("plotting-{farm_index}"),
}
})
}));
Expand Down Expand Up @@ -1014,7 +1014,7 @@ impl SingleDiskFarm {
// Panic will already be printed by now
farming_join_handle.await.map_err(|_error| {
BackgroundTaskError::BackgroundTaskPanicked {
task: format!("farming-{disk_farm_index}"),
task: format!("farming-{farm_index}"),
}
})
}));
Expand Down Expand Up @@ -1053,7 +1053,7 @@ impl SingleDiskFarm {
// Panic will already be printed by now
reading_join_handle.await.map_err(|_error| {
BackgroundTaskError::BackgroundTaskPanicked {
task: format!("reading-{disk_farm_index}"),
task: format!("reading-{farm_index}"),
}
})
}));
Expand Down
14 changes: 7 additions & 7 deletions crates/subspace-farmer/src/utils/plotted_pieces.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use tracing::{trace, warn};

#[derive(Debug, Copy, Clone, Eq, PartialEq)]
struct PieceDetails {
disk_farm_index: u8,
farm_index: u8,
sector_index: SectorIndex,
piece_offset: PieceOffset,
}
Expand Down Expand Up @@ -57,7 +57,7 @@ impl PlottedPieces {
return None;
}
};
let reader = match self.readers.get(usize::from(piece_details.disk_farm_index)) {
let reader = match self.readers.get(usize::from(piece_details.farm_index)) {
Some(reader) => reader.clone(),
None => {
warn!(?piece_index, ?piece_details, "Plot offset is invalid");
Expand All @@ -73,7 +73,7 @@ impl PlottedPieces {
warn!(
%error,
%piece_index,
disk_farm_index = piece_details.disk_farm_index,
farm_index = piece_details.farm_index,
sector_index = piece_details.sector_index,
"Failed to retrieve piece"
);
Expand All @@ -83,12 +83,12 @@ impl PlottedPieces {
}

/// Add new sector to collect plotted pieces
pub fn add_sector(&mut self, disk_farm_index: u8, plotted_sector: &PlottedSector) {
pub fn add_sector(&mut self, farm_index: u8, plotted_sector: &PlottedSector) {
for (piece_offset, &piece_index) in
(PieceOffset::ZERO..).zip(plotted_sector.piece_indexes.iter())
{
let piece_details = PieceDetails {
disk_farm_index,
farm_index,
sector_index: plotted_sector.sector_index,
piece_offset,
};
Expand All @@ -105,12 +105,12 @@ impl PlottedPieces {
}

/// Add old sector from plotted pieces (happens on replotting)
pub fn delete_sector(&mut self, disk_farm_index: u8, plotted_sector: &PlottedSector) {
pub fn delete_sector(&mut self, farm_index: u8, plotted_sector: &PlottedSector) {
for (piece_offset, &piece_index) in
(PieceOffset::ZERO..).zip(plotted_sector.piece_indexes.iter())
{
let searching_piece_details = PieceDetails {
disk_farm_index,
farm_index,
sector_index: plotted_sector.sector_index,
piece_offset,
};
Expand Down

0 comments on commit 561fd10

Please sign in to comment.