Skip to content

Commit

Permalink
Generate convolution kernel at compile time
Browse files Browse the repository at this point in the history
  • Loading branch information
Waridley committed Dec 16, 2023
1 parent 265081d commit 8581cf1
Show file tree
Hide file tree
Showing 5 changed files with 225 additions and 42 deletions.
11 changes: 11 additions & 0 deletions rs/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 3 additions & 1 deletion rs/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ features = [
# Mine
enum_components = { package = "sond-bevy-enum-components", path = "../sond-bevy-enum-components" }
particles = { package = "sond-bevy-particles", path = "../sond-bevy-particles" }
macros = { package = "sond-has-macros", path = "tools/macros" }

# Engine
bevy_rapier3d = { version = "0.23.0", default-features = false, features = ["dim3"] }
Expand All @@ -83,8 +84,9 @@ serde = "1"

[dependencies]
# Mine
enum_components = { package = "sond-bevy-enum-components", workspace = true }
enum_components = { workspace = true }
particles = { workspace = true }
macros = { workspace = true }

# Engine
bevy = { workspace = true }
Expand Down
56 changes: 15 additions & 41 deletions rs/src/planet/terrain/noise.rs
Original file line number Diff line number Diff line change
@@ -1,27 +1,16 @@
use crate::{offloading::wasm_yield, planet::PlanetVec2};
use macros::convolution_kernel;
use noise::{
core::worley::{distance_functions, worley_2d, ReturnType},
permutationtable::PermutationTable,
NoiseFn, Seedable,
};
use ordered_float::OrderedFloat;

use std::f64::consts::PI;

use std::sync::Arc;

type OF64 = OrderedFloat<f64>;
const KERNEL_WIDTH: usize = 25;

/// Approximation of a gaussian function for the convolution kernel, using cosine to force
/// a definite integral of as close to 1 as possible with `f64` math for the whole kernel.
// TODO: Generate kernel in a macro to get more accuracy and potentially performance
fn kernel_coef(x: f64, y: f64) -> f64 {
let kernel_radius = (KERNEL_WIDTH - 1) as f64 * 0.5;
let len = ((x * x) + (y * y)).sqrt();
let t = f64::min(len / kernel_radius, 1.0);
((t * PI).cos() + 1.0) / (kernel_radius * kernel_radius * ((PI * PI) - 4.0) / PI)
}

const KERNEL: [[f64; 25]; 25] = convolution_kernel!(Cosine, 25);

pub struct ChooseAndSmooth<const N: usize> {
pub sources: [Source; N],
Expand All @@ -45,8 +34,8 @@ impl<const N: usize> ChooseAndSmooth<N> {
// Needs to be async to allow splitting work over multiple frames on WASM
pub async fn generate_map(&self, center: PlanetVec2, rows: usize, columns: usize) -> Vec<f32> {
// Extra space around edges for blurring
let winner_rows = rows + KERNEL_WIDTH - 1;
let winner_cols = columns + KERNEL_WIDTH - 1;
let winner_rows = rows + KERNEL.len() - 1;
let winner_cols = columns + KERNEL.len() - 1;

let mut winners = Vec::with_capacity(winner_rows * winner_cols);
let mut ret = Vec::with_capacity(rows * columns);
Expand All @@ -73,17 +62,12 @@ impl<const N: usize> ChooseAndSmooth<N> {
let mut sum = 0.0;

// Gaussian blur kernel convolution
for j in 0..KERNEL_WIDTH {
for i in 0..KERNEL_WIDTH {
for j in 0..KERNEL.len() {
for i in 0..KERNEL.len() {
let strongest = winners[((x + j) * winner_rows) + y + i] as usize;

let i = i as f64;
let j = j as f64;
let radius = (KERNEL_WIDTH - 1) as f64 * 0.5;

sum += *value_caches[strongest]
.get_or_insert_with(|| self.sources[strongest].noise.get(point))
* kernel_coef(i - radius, j - radius)
* KERNEL[i][j]
}
}
ret.push(sum as f32)
Expand All @@ -103,16 +87,14 @@ impl<const N: usize> NoiseFn<f64, 2> for ChooseAndSmooth<N> {
let mut sum = 0.0;

// Gaussian blur kernel convolution
for j in -((KERNEL_WIDTH / 2) as isize)..=((KERNEL_WIDTH / 2) as isize) {
for i in -((KERNEL_WIDTH / 2) as isize)..=((KERNEL_WIDTH / 2) as isize) {
let j = j as f64;
let i = i as f64;

let cell = [point[0] + j, point[1] + i];
let r = (KERNEL.len() - 1) as f64;
for (j, col) in KERNEL.iter().enumerate() {
for (i, value) in col.iter().enumerate() {
let cell = [point[0] + j as f64 - r, point[1] + i as f64 - r];
let strongest = self.strongest_at(cell);
sum += *value_caches[strongest]
.get_or_insert_with(|| self.sources[strongest].noise.get(point))
/ kernel_coef(i, j)
/ value
}
}
sum
Expand Down Expand Up @@ -241,16 +223,8 @@ mod tests {

#[test]
fn kernel_integral_1() {
let offset = (KERNEL_WIDTH - 1) as f64 * 0.5;
let mut sum = 0.0;
for x in 0..KERNEL_WIDTH {
for y in 0..=KERNEL_WIDTH {
let y = y as f64 - offset;
let x = x as f64 - offset;
sum += kernel_coef(x, y);
}
}
let sum: f64 = KERNEL.iter().flatten().sum();
let diff = (sum - 1.0).abs();
assert!(diff < 1.0e-3, "{sum}");
assert!(diff < 1.0e-10, "{KERNEL:?}\nΣ={sum}");
}
}
13 changes: 13 additions & 0 deletions rs/tools/macros/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
[package]
name = "sond-has-macros"
version = "0.1.0"
edition = "2021"

[lib]
proc-macro = true

[dependencies]
proc-macro2 = "1"
syn = { version = "2", features = ["full"] }
quote = "1"
proc-macro-crate = "2"
183 changes: 183 additions & 0 deletions rs/tools/macros/src/lib.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,183 @@
use proc_macro::TokenStream;
use proc_macro2::Ident;
use quote::quote;
use std::f64::consts::{PI, TAU};
use syn::parse::{Parse, ParseStream};
use syn::spanned::Spanned;
use syn::{
braced, parse_macro_input, parse_quote, Error, Expr, ExprLit, FieldValue, Lit, LitInt, Member,
Path, Token,
};

/// Generates a kernel for 2-dimensional convolutional blurring of type `[[f64; SIZE]; SIZE]`.
/// This allows generating the coefficients at compile-time for any size of kernel. The sum of
/// the kernel is guaranteed to be as close to 1.0 as possible, so that blurring does not alter
/// unblurred values.
///
/// ## Usage:
/// `convolution_kernel!(<KIND>, <SIZE>)`
///
/// where `<KIND>` is of the following type:
/// ```
/// enum KernelKind {
/// /// Mean blur
/// /// All cells have the same value:
/// /// `1.0 / (SIZE * SIZE)`
/// Mean,
/// /// Gaussian blur
/// /// Cells are generated by the circular Gaussian
/// /// function with the given `sigma` value.
/// Gaussian {
/// sigma: f64,
/// },
/// /// Cosine blur
/// /// Cells are generated as a function of the
/// /// cosine of their distance from the center.
/// Cosine,
/// }
/// ```
/// and `<SIZE>` is a `usize`
///
/// # Example:
/// ```
/// # fn main () {
/// # use sond_has_macros::convolution_kernel;
/// const KERNEL: [[f64; 7]; 7] = convolution_kernel!(Gaussian { sigma: 3.0 }, 7);
///
/// let sum: f64 = KERNEL.iter().flatten().sum();
/// let diff = (sum - 1.0).abs();
/// assert!(diff < 1.0e-10);
/// # }
/// ```
#[proc_macro]
pub fn convolution_kernel(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as Kernel);

let mut kernel = input.get_baseline_curve();
let sum: f64 = kernel.iter().sum();

// Correct for inaccuracies to guarantee an integral of 1.0
let scale = 1.0 / sum;
for cell in &mut kernel {
*cell *= scale
}

let columns = kernel.chunks_exact(input.size).map(|chunk| {
let elements = chunk.iter();
quote! { [ #(#elements,)* ] }
});

quote! {
[ #(#columns,)* ]
}
.into()
}

#[derive(Copy, Clone)]
struct Kernel {
kind: KernelKind,
size: usize,
}

#[derive(Copy, Clone)]
enum KernelKind {
Mean,
Gaussian { sigma: f64 },
Cosine,
}

impl Kernel {
fn get_baseline_curve(self) -> Vec<f64> {
match self.kind {
KernelKind::Mean => std::iter::repeat(1.0 / (self.size * self.size) as f64)
.take(self.size * self.size)
.collect(),
KernelKind::Gaussian { sigma } => {
let mut ret = Vec::with_capacity(self.size * self.size);
let r = (self.size - 1) as f64 * 0.5;
for x in 0..self.size {
for y in 0..self.size {
let x = x as f64 - r;
let y = y as f64 - r;
let s2 = sigma * sigma;
ret.push(1. / (TAU * s2) * (-(x * x + y * y) / (2. * s2)).exp())
}
}
ret
}
KernelKind::Cosine => {
let mut ret = Vec::with_capacity(self.size * self.size);
let r = (self.size - 1) as f64 * 0.5;
for x in 0..self.size {
for y in 0..self.size {
let x = x as f64 - r;
let y = y as f64 - r;
let len = ((x * x) + (y * y)).sqrt();
let t = f64::min(len / r, 1.0);
ret.push(((t * PI).cos() + 1.0) / (r * r * ((PI * PI) - 4.0) / PI))
}
}
ret
}
}
}
}

impl Parse for Kernel {
fn parse(input: ParseStream) -> syn::Result<Self> {
use KernelKind::*;
let kind = input.parse::<Path>()?;

let mean: Ident = parse_quote!(Mean);
let gaussian: Ident = parse_quote!(Gaussian);
let cosine: Ident = parse_quote!(Cosine);

let kind = match &kind.segments.last().as_ref().unwrap().ident {
ident if ident == &mean => Mean,
ident if ident == &gaussian => {
let fields;
braced!(fields in input);
let fields_span = fields.span();
let mut fields = fields
.parse_terminated(FieldValue::parse, Token![,])?
.into_iter();
let Some(field) = fields.next() else {
return Err(Error::new(fields_span, "Expected field `sigma: f64`"));
};
if let Some(field) = fields.next() {
return Err(Error::new(
field.span(),
"Wasn't expecting more than one field for Gaussian definition",
));
}
let sigma_ident: Ident = parse_quote!(sigma);
match &field.member {
Member::Named(ident) if ident == &sigma_ident => {}
other => return Err(Error::new(other.span(), "Expected field `sigma: f64`")),
}
let Expr::Lit(ExprLit {
lit: Lit::Float(val),
..
}) = &field.expr
else {
return Err(Error::new(field.expr.span(), "Expected a literal f64"));
};
let sigma = val.base10_parse()?;
Gaussian { sigma }
}
ident if ident == &cosine => Cosine,
other => {
return Err(input.error(format!(
"Expected {}, got {other}",
std::any::type_name::<KernelKind>()
)))
}
};

input.parse::<Token![,]>()?;

let size = input.parse::<LitInt>()?.base10_parse()?;

Ok(Self { kind, size })
}
}

0 comments on commit 8581cf1

Please sign in to comment.