diff --git a/dfdx-core/src/shapes/shape.rs b/dfdx-core/src/shapes/shape.rs index 184337cd..0fc392e4 100644 --- a/dfdx-core/src/shapes/shape.rs +++ b/dfdx-core/src/shapes/shape.rs @@ -117,6 +117,81 @@ where } } +macro_rules! add_impl { + ($X1:expr, $X2:expr, $Y:expr) => { + #[cfg(not(feature = "nightly"))] + impl core::ops::Add> for Const<$X1> { + type Output = Const<$Y>; + fn add(self, _: Const<$X2>) -> Self::Output { + Const + } + } + }; +} + +macro_rules! add_impl_2 { + ($X1:expr, $X2:expr, $Y:expr) => { + add_impl!($X1, $X2, $Y); + add_impl!($X2, $X1, $Y); + }; +} + +add_impl!(1, 1, 2); +add_impl!(2, 2, 4); +add_impl!(3, 3, 6); +add_impl_2!(1, 2, 3); +add_impl_2!(1, 3, 4); +add_impl_2!(1, 4, 5); +add_impl_2!(1, 5, 6); +add_impl_2!(1, 6, 7); +add_impl_2!(2, 3, 5); +add_impl_2!(2, 4, 6); +add_impl_2!(2, 5, 7); +add_impl_2!(2, 6, 8); + +macro_rules! mul_div_impl { + ($X1:expr, $X2:expr, $Y:expr) => { + #[cfg(not(feature = "nightly"))] + impl core::ops::Mul> for Const<$X1> { + type Output = Const<$Y>; + fn mul(self, _: Const<$X2>) -> Self::Output { + Const + } + } + + #[cfg(not(feature = "nightly"))] + impl core::ops::Div> for Const<$Y> { + type Output = Const<$X1>; + fn div(self, _: Const<$X2>) -> Self::Output { + Const + } + } + }; +} + +macro_rules! mul_div_impl_2 { + ($X1:expr, $X2:expr, $Y:expr) => { + mul_div_impl!($X1, $X2, $Y); + mul_div_impl!($X2, $X1, $Y); + }; +} + +mul_div_impl!(1, 1, 1); +mul_div_impl!(2, 2, 4); +mul_div_impl!(3, 3, 9); +mul_div_impl!(4, 4, 16); +mul_div_impl!(5, 5, 25); +mul_div_impl_2!(1, 2, 2); +mul_div_impl_2!(1, 3, 3); +mul_div_impl_2!(2, 3, 6); +mul_div_impl_2!(1, 4, 4); +mul_div_impl_2!(2, 4, 8); +mul_div_impl_2!(3, 4, 12); +mul_div_impl_2!(1, 5, 5); +mul_div_impl_2!(2, 5, 10); +mul_div_impl_2!(3, 5, 15); +mul_div_impl_2!(4, 5, 20); + /// Represents either `[T; N]` or `Vec` pub trait Array: IntoIterator { type Dim: Dim; diff --git a/dfdx-core/src/tensor_ops/conv1d/mod.rs b/dfdx-core/src/tensor_ops/conv1d/mod.rs index 5fd8e23b..e0b7d9d2 100644 --- a/dfdx-core/src/tensor_ops/conv1d/mod.rs +++ b/dfdx-core/src/tensor_ops/conv1d/mod.rs @@ -133,6 +133,40 @@ where } } +macro_rules! const_try_conv { + ($Dim:expr, $Kernel:expr, $Stride:expr, $Padding:expr, $Dilation:expr, out=$Out_dim:expr) => { + #[cfg(not(feature = "nightly"))] + impl TryConv1D, Const<$Padding>, Const<$Dilation>, Groups> + for (Const<$Dim>, Const<$Kernel>) + { + // ($Dim + 2 * $Padding - $Dilation * ($Kernel - 1) - 1) / $Stride + 1 + // def compute_output_size(dim, kernel_size, stride, padding, dilation): + // output_size = int(int(dim + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1) + // return output_size + type Convolved = Const<$Out_dim>; + + fn try_conv1d( + self, + _: Const<$Stride>, + _: Const<$Padding>, + _: Const<$Dilation>, + _: Groups, + ) -> Result { + Ok(Const) + } + } + }; +} + +const_try_conv!(1, 2, 1, 0, 1, out = 0); +const_try_conv!(1, 2, 1, 2, 1, out = 4); +const_try_conv!(2, 2, 1, 0, 1, out = 1); +const_try_conv!(3, 3, 1, 0, 1, out = 1); +const_try_conv!(3, 3, 1, 1, 1, out = 3); +const_try_conv!(5, 2, 2, 1, 2, out = 3); + +const_try_conv!(28, 6, 3, 2, 1, out = 9); // needed for a test + impl TryConv1D for (usize, Kernel) { diff --git a/dfdx-core/src/tensor_ops/conv2d/mod.rs b/dfdx-core/src/tensor_ops/conv2d/mod.rs index c5be9694..ebb0de91 100644 --- a/dfdx-core/src/tensor_ops/conv2d/mod.rs +++ b/dfdx-core/src/tensor_ops/conv2d/mod.rs @@ -116,6 +116,7 @@ pub trait TryConv2D: Sized { ) -> Result; } +#[cfg(feature = "nightly")] impl< const KERNEL: usize, const STRIDE: usize, @@ -140,6 +141,66 @@ where } } +macro_rules! const_try_conv { + ($Dim:expr, $Kernel:expr, $Stride:expr, $Padding:expr, $Dilation:expr, out=$Out_dim:expr) => { + #[cfg(not(feature = "nightly"))] + impl TryConv2D, Const<$Padding>, Const<$Dilation>, Groups> + for (Const<$Dim>, Const<$Kernel>) + { + // ($Dim + 2 * $Padding - $Dilation * ($Kernel - 1) - 1) / $Stride + 1 + // def compute_output_size(dim, kernel_size, stride, padding, dilation): + // output_size = int(int(dim + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1) + // return output_size + type Convolved = Const<$Out_dim>; + + fn try_conv2d( + self, + _: Const<$Stride>, + _: Const<$Padding>, + _: Const<$Dilation>, + _: Groups, + ) -> Result { + Ok(Const) + } + } + }; +} + +const_try_conv!(1, 2, 1, 0, 1, out = 0); +const_try_conv!(2, 2, 1, 0, 1, out = 1); +const_try_conv!(3, 2, 1, 0, 1, out = 2); + +const_try_conv!(1, 2, 1, 2, 1, out = 4); + +const_try_conv!(1, 1, 1, 1, 1, out = 3); +const_try_conv!(1, 2, 1, 1, 1, out = 2); +const_try_conv!(2, 2, 1, 1, 1, out = 3); +const_try_conv!(1, 3, 1, 1, 1, out = 1); +const_try_conv!(2, 3, 1, 1, 1, out = 2); +const_try_conv!(3, 2, 1, 1, 1, out = 4); + +const_try_conv!(5, 3, 1, 0, 1, out = 3); + +const_try_conv!(2, 2, 2, 0, 1, out = 1); +const_try_conv!(3, 2, 2, 0, 1, out = 1); +const_try_conv!(4, 2, 2, 0, 1, out = 2); + +const_try_conv!(4, 2, 1, 0, 2, out = 2); +const_try_conv!(5, 2, 1, 0, 2, out = 3); + +const_try_conv!(2, 3, 3, 4, 1, out = 3); +const_try_conv!(4, 3, 3, 4, 1, out = 4); + +const_try_conv!(6, 2, 4, 3, 1, out = 3); +const_try_conv!(7, 2, 4, 3, 1, out = 3); + +const_try_conv!(14, 3, 1, 0, 1, out = 12); +const_try_conv!(28, 6, 3, 2, 1, out = 9); + +const_try_conv!(3, 3, 1, 0, 1, out = 1); +const_try_conv!(3, 3, 1, 1, 1, out = 3); +const_try_conv!(5, 2, 2, 1, 2, out = 3); + impl TryConv2D for (usize, Kernel) { diff --git a/dfdx-core/src/tensor_ops/mod.rs b/dfdx-core/src/tensor_ops/mod.rs index 453457f4..22c6bf59 100644 --- a/dfdx-core/src/tensor_ops/mod.rs +++ b/dfdx-core/src/tensor_ops/mod.rs @@ -284,9 +284,7 @@ pub use var_to::VarTo; mod conv1d; pub use conv1d::TryConv1D; -#[cfg(feature = "nightly")] mod conv2d; -#[cfg(feature = "nightly")] pub use conv2d::TryConv2D; #[cfg(feature = "nightly")] @@ -294,7 +292,5 @@ mod convtrans2d; #[cfg(feature = "nightly")] pub use convtrans2d::TryConvTrans2D; -#[cfg(feature = "nightly")] mod pool2d; -#[cfg(feature = "nightly")] pub use pool2d::{Pool2DKind, TryPool2D}; diff --git a/dfdx-core/src/tensor_ops/pool2d/mod.rs b/dfdx-core/src/tensor_ops/pool2d/mod.rs index 150525c7..53997e73 100644 --- a/dfdx-core/src/tensor_ops/pool2d/mod.rs +++ b/dfdx-core/src/tensor_ops/pool2d/mod.rs @@ -77,6 +77,7 @@ pub trait TryPool2D: Sized { ) -> Result; } +#[cfg(feature = "nightly")] impl< const KERNEL: usize, const STRIDE: usize, @@ -100,6 +101,50 @@ where } } +macro_rules! const_try_pool { + ($Dim:expr, $Kernel:expr, $Stride:expr, $Padding:expr, $Dilation:expr, out=$Out_dim:expr) => { + #[cfg(not(feature = "nightly"))] + impl TryPool2D, Const<$Stride>, Const<$Padding>, Const<$Dilation>> + for Const<$Dim> + { + // ($Dim + 2 * $Padding - $Dilation * ($Kernel - 1) - 1) / $Stride + 1 + // def compute_output_size(dim, kernel_size, stride, padding, dilation): + // output_size = int(int(dim + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1) + // return output_size + type Pooled = Const<$Out_dim>; + + fn try_pool2d( + self, + _: Pool2DKind, + _: Const<$Kernel>, + _: Const<$Stride>, + _: Const<$Padding>, + _: Const<$Dilation>, + ) -> Result { + Ok(Const) + } + } + }; +} + +const_try_pool!(1, 2, 1, 0, 1, out = 0); +const_try_pool!(2, 2, 1, 0, 1, out = 1); +const_try_pool!(3, 2, 1, 0, 1, out = 2); +const_try_pool!(4, 2, 1, 0, 1, out = 3); + +const_try_pool!(1, 2, 2, 0, 1, out = 0); +const_try_pool!(2, 2, 2, 0, 1, out = 1); +const_try_pool!(3, 2, 2, 0, 1, out = 1); +const_try_pool!(4, 2, 2, 0, 1, out = 2); + +const_try_pool!(1, 1, 2, 0, 1, out = 1); +const_try_pool!(2, 1, 2, 0, 1, out = 1); +const_try_pool!(3, 1, 2, 0, 1, out = 2); +const_try_pool!(4, 1, 2, 0, 1, out = 2); + +const_try_pool!(4, 2, 1, 0, 2, out = 2); +const_try_pool!(5, 2, 1, 0, 2, out = 3); + impl TryPool2D for usize {