Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: add more lint tests and fix flattening of requirements #2166

Merged
merged 3 commits into from
Nov 24, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions conda_smithy/linter/lints.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
TEST_FILES,
TEST_KEYS,
_lint_recipe_name,
flatten_v1_if_else,
get_section,
is_selector_line,
jinja_lines,
Expand Down Expand Up @@ -434,6 +435,12 @@ def lint_single_space_in_pinned_requirements(
and requirements
):
requirements = requirements[0].get("from_package", [])

# we can have `if` statements in the v1 requirements and we need to
# flatten them
if recipe_version == 1:
requirements = flatten_v1_if_else(requirements)

for requirement in requirements or []:
if recipe_version == 1:
req = requirement
Expand All @@ -444,6 +451,7 @@ def lint_single_space_in_pinned_requirements(

if symbol_to_check in req:
continue
print(req)
wolfv marked this conversation as resolved.
Show resolved Hide resolved
parts = req.split()
if len(parts) > 2 and parts[1] in [
"!=",
Expand Down Expand Up @@ -803,6 +811,10 @@ def flatten_reqs(reqs):
all_run_reqs_flat += flatten_reqs(output_run_reqs)
all_contraints_flat += flatten_reqs(output_contraints)

if recipe_version == 1:
all_build_reqs = [flatten_v1_if_else(reqs) for reqs in all_build_reqs]
all_build_reqs_flat = flatten_v1_if_else(all_build_reqs_flat)

# this check needs to be done per output --> use separate (unflattened) requirements
for build_reqs in all_build_reqs:
has_compiler = any(pat_compiler_stub.match(rq) for rq in build_reqs)
Expand Down
11 changes: 11 additions & 0 deletions conda_smithy/linter/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,3 +228,14 @@ def load_linter_toml_metdata_internal(time_salt):
return None
hints_toml_str = hints_toml_req.content.decode("utf-8")
return tomllib.loads(hints_toml_str)


def flatten_v1_if_else(requirements: List[str | Dict]) -> List[str]:
flattened_requirements = []
for req in requirements:
if isinstance(req, dict):
flattened_requirements.extend(req["then"])
flattened_requirements.extend(req.get("else") or [])
else:
flattened_requirements.append(req)
return flattened_requirements
84 changes: 84 additions & 0 deletions tests/recipes/v1_recipes/ada-url.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
context:
version: "2.9.2"
build: 0

package:
name: ada-url
version: ${{ version }}

source:
url: "https://github.com/ada-url/ada/archive/refs/tags/v${{ version }}.tar.gz"
sha256: f41575ad7eec833afd9f6a0d6101ee7dc2f947fdf19ae8f1b54a71d59f4ba5ec

build:
number: ${{ build|int + (microarch_level|default('0'))|int * 100 }}
script:
- if: win
then: |
cmake -G Ninja ^
%CMAKE_ARGS% ^
-DADA_TESTING=OFF ^
-DADA_TOOLS=OFF ^
-DADA_BENCHMARKS=OFF ^
-B build -S %SRC_DIR%
cmake --build build --target install
else: |
cmake -G Ninja \
$CMAKE_ARGS \
-DADA_TESTING=OFF \
-DADA_TOOLS=OFF \
-DADA_BENCHMARKS=OFF \
-S $SRC_DIR -B build
cmake --build build --target install

requirements:
build:
- if: (microarch_level|default('0'))|int > 0
then: "${{ 'x86_64-microarch-level ' ~ (microarch_level|default('0')) }}"
- "${{ compiler('cxx') }}"
- "${{ stdlib('c') }}"
- cmake
- ninja

tests:
- script:
- if: win
then: |
cmake -GNinja %CMAKE_ARGS% -B build -S .
cmake --build build
.\build\main.exe
else: |
cmake -GNinja $CMAKE_ARGS -B build -S .
cmake --build build
./build/main
requirements:
build:
- "${{ compiler('cxx') }}"
- "${{ stdlib('c') }}"
- cmake
- ninja

files:
recipe:
- CMakeLists.txt

- package_contents:
include:
- ada.h

about:
homepage: "https://ada-url.com/"
summary: 'WHATWG-compliant and fast URL parser written in modern C++'
description: |
WHATWG-compliant and fast URL parser written in modern C++, part of
Node.js, Clickhouse, Redpanda, Kong, Telegram and Cloudflare Workers.
license: "MIT OR Apache-2.0"
license_file:
- LICENSE-APACHE
- LICENSE-MIT
documentation: https://ada-url.com/introduction/
repository: https://github.com/ada-url/ada

extra:
recipe-maintainers:
- rHermes
186 changes: 186 additions & 0 deletions tests/recipes/v1_recipes/torchaudio.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/prefix-dev/recipe-format/main/schema.json

context:
name: torchaudio
version: "2.4.1"
build_number: 2
cuda_version: ${{ env.get("CONDA_OVERRIDE_CUDA", default="None") }}
cuda: ${{ "true" if cuda_version != "None" else "false" }}
cuda_build_string: cuda_${{ cuda_version | version_to_buildstring }}
string_prefix: ${{ cuda_build_string if cuda == "true" else "cpu_" }}

package:
name: ${{ name|lower }}
version: ${{ version }}

source:
- url: https://github.com/pytorch/audio/archive/refs/tags/v${{ version }}.tar.gz
sha256: a6b7e1b606ae353b9c4db1c8e754263f53f0457e9ab538e9669c0af0c46a0d5c
patches:
- patches/0001-point-to-correct-prefix.patch
- patches/0002-use-conda-cuda.patch
- patches/0003-Apply-CMAKE_ARGS-if-set.patch
- patches/0004-Add-missing-skipIfNoFFmpeg-for-TestFileObject.patch
- patches/0005-replace-FLT_MAX-for-compatibility-with-newer-cudatoo.patch

build:
number: ${{ build_number }}
skip:
- win
- match(cuda_compiler_version, "11.8") and aarch64
string: ${{ string_prefix }}py${{ python | version_to_buildstring }}h${{ hash }}_${{ build_number }}
variant:
use_keys:
# use cuda from the variant config, e.g. to build multiple CUDA variants
- ${{ "cuda" if cuda == "true" }}
# this will down-prioritize the cuda variant versus other variants of the package
down_prioritize_variant: ${{ 1 if cuda == "true" else 0 }}
script:
file: build
env:
cuda_compiler_version: ${{ cuda_version | default('None') }}

requirements:
build:
- if: build_platform != target_platform
then:
- python
- cross-python_${{ target_platform }}
- pytorch

- if: match(cuda_version, ">=12")
then:
- cuda-driver-dev
- cuda-cudart-dev
- cuda-nvrtc-dev
- cuda-nvtx-dev
- cuda-nvml-dev
- cuda-profiler-api
- libcublas-dev
- libcufft-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev

- ${{ compiler('cxx') }}
- ${{ compiler('c') }}
- ${{ stdlib("c") }}
- cmake
- ninja
- ccache
- git

- if: cuda == "true"
then:
- ${{ compiler('cuda') }}
- cuda-version ==${{ cuda_version }}

host:
- python
- pip
- setuptools
- pytorch
- ${{ "pytorch * cuda*" if cuda == "true" }}
- ${{ "pytorch * cpu*" if cuda == "false" }}
- bzip2
- kaldi
- pybind11
# - sox
# - ffmpeg
- xz
- zlib

- if: cuda == "true"
then:
- cuda-version ==${{ cuda_version }}

- if: match(cuda_version, ">=12")
then:
- cuda-driver-dev
- cuda-cudart-dev
- cuda-nvrtc-dev
- cuda-nvtx-dev
- cuda-nvml-dev
- cuda-profiler-api
- libcublas-dev
- libcufft-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
run:
- python
- numpy
- kaldi
- ${{ "pytorch * cuda*" if cuda == "true" }}
- ${{ "pytorch * cpu*" if cuda == "false" }}

ignore_run_exports:
from_package:
- if: match(cuda_version, ">=12")
then:
- cuda-nvrtc-dev
- cuda-nvtx-dev
- libcublas-dev
- libcufft-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev

tests:
- python:
imports:
- torchaudio
- torchaudio.backend
- torchaudio.compliance
- torchaudio.datasets
- torchaudio.functional
- torchaudio.models
- torchaudio.pipelines
- torchaudio.kaldi_io
- torchaudio.utils
- torchaudio.sox_effects
- torchaudio.transforms
pip_check: true

- requirements:
run:
- pytest
- scipy
- numpy
- librosa
- expecttest
- requests
- hypothesis
- inflect
# gpu version of kaldi tries to load libcuda, which we don't have
- kaldi * cpu*
- kaldi_io
- parameterized
- pysoundfile
- transformers
- unidecode
- inflect
# - sox
- pytorch-lightning
- sentencepiece
files:
source:
- test/
- examples/
recipe:
- run_tests.sh
script:
- bash run_tests.sh

about:
homepage: https://github.com/pytorch/audio
license: BSD-2-Clause
license_file:
- LICENSE
- third_party/LICENSES_BUNDLED.txt
summary: Data manipulation and transformation for audio signal processing, powered by PyTorch

extra:
recipe-maintainers:
- Tobias-Fischer
- h-vetinari
8 changes: 8 additions & 0 deletions tests/test_lint_recipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -2687,6 +2687,14 @@ def test_v1_recipes():
lints, hints = linter.main(str(recipe_dir), return_hints=True)
assert not lints

with get_recipe_in_dir("v1_recipes/torchaudio.yaml") as recipe_dir:
lints, hints = linter.main(str(recipe_dir), return_hints=True)
assert not lints

with get_recipe_in_dir("v1_recipes/ada-url.yaml") as recipe_dir:
lints, hints = linter.main(str(recipe_dir), return_hints=True)
assert not lints


def test_v1_recipes_ignore_run_exports():
with get_recipe_in_dir(
Expand Down
Loading