Skip to content

Commit

Permalink
2024-04-02 nightly release (c731673)
Browse files Browse the repository at this point in the history
  • Loading branch information
pytorchbot committed Apr 2, 2024
1 parent 5fb326d commit efe6568
Show file tree
Hide file tree
Showing 203 changed files with 13,245 additions and 1,210 deletions.
51 changes: 51 additions & 0 deletions .ci/scripts/build_llama_android.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#!/bin/bash
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

set -exu

# shellcheck source=/dev/null
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"

install_executorch_and_backend_lib() {
echo "Installing executorch and xnnpack backend"
rm -rf cmake-android-out && mkdir cmake-android-out
ANDROID_NDK=/opt/ndk
BUCK2=buck2
ANDROID_ABI=arm64-v8a
cmake -DBUCK2="${BUCK2}" \
-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" \
-DANDROID_ABI="${ANDROID_ABI}" \
-DANDROID_PLATFORM=android-23 \
-DCMAKE_INSTALL_PREFIX=cmake-android-out \
-DCMAKE_BUILD_TYPE=Release \
-DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
-DEXECUTORCH_BUILD_XNNPACK=ON \
-DEXECUTORCH_BUILD_OPTIMIZED=ON \
-DXNNPACK_ENABLE_ARM_BF16=OFF \
-Bcmake-android-out .

cmake --build cmake-android-out -j4 --target install --config Release
}

build_llama_runner() {
echo "Building llama runner for Android..."
ANDROID_ABI=arm64-v8a
cmake -DBUCK2="${BUCK2}" \
-DCMAKE_TOOLCHAIN_FILE="$ANDROID_NDK"/build/cmake/android.toolchain.cmake \
-DANDROID_ABI="${ANDROID_ABI}" \
-DANDROID_PLATFORM=android-23 \
-DCMAKE_INSTALL_PREFIX=cmake-android-out \
-DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=python \
-DEXECUTORCH_BUILD_OPTIMIZED=ON \
-Bcmake-android-out/examples/models/llama2 examples/models/llama2

cmake --build cmake-android-out/examples/models/llama2 -j4 --config Release
}
install_flatc_from_source
install_executorch_and_backend_lib
build_llama_runner
17 changes: 15 additions & 2 deletions .ci/scripts/gather_test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
from examples.models import MODEL_NAME_TO_MODEL
from examples.xnnpack import MODEL_NAME_TO_OPTIONS


DEFAULT_RUNNERS = {
"linux": "linux.2xlarge",
"macos": "macos-m1-stable",
Expand All @@ -24,6 +23,7 @@
"w2l": "linux.12xlarge",
"ic4": "linux.12xlarge",
"resnet50": "linux.12xlarge",
"llava_encoder": "linux.4xlarge",
# This one causes timeout on smaller runner, the root cause is unclear (T161064121)
"dl3": "linux.12xlarge",
"emformer_join": "linux.12xlarge",
Expand Down Expand Up @@ -83,7 +83,17 @@ def model_should_run_on_event(model: str, event: str) -> bool:
We put higher priority and fast models to pull request and rest to push.
"""
if event == "pull_request":
return model in ["add", "ic3", "mv2", "mv3", "resnet18", "vit"]
return model in ["add", "ic3", "mv2", "mv3", "resnet18", "vit", "llava_encoder"]
return True


def model_should_run_on_target_os(model: str, target_os: str) -> bool:
"""
A helper function to decide whether a model should be tested on a target os (linux/macos).
For example, a big model can be disabled in macos due to the limited macos resources.
"""
if target_os == "macos":
return model not in ["llava_encoder"]
return True


Expand Down Expand Up @@ -119,6 +129,9 @@ def export_models_for_ci() -> dict[str, dict]:
if not model_should_run_on_event(name, event):
continue

if not model_should_run_on_target_os(name, target_os):
continue

if backend == "xnnpack":
if name not in MODEL_NAME_TO_OPTIONS:
continue
Expand Down
4 changes: 4 additions & 0 deletions .ci/scripts/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,10 @@ test_model() {
run_portable_executor_runner
rm "./${MODEL_NAME}.pte"
fi
if [[ "${MODEL_NAME}" == "llava_encoder" ]]; then
# Install requirements for llava
bash examples/models/llava_encoder/install_requirements.sh
fi
# python3 -m examples.portable.scripts.export --model_name="llama2" should works too
"${PYTHON_EXECUTABLE}" -m examples.portable.scripts.export --model_name="${MODEL_NAME}"
run_portable_executor_runner
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/android.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Build ExecuTorch Android demo apps
name: Android

on:
push:
Expand Down
15 changes: 4 additions & 11 deletions .github/workflows/apple.yml
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ jobs:
FRAMEWORKS=(
"executorch"
"coreml_backend"
"custom_backend"
"mps_backend"
"optimized_backend"
"portable_backend"
Expand All @@ -91,20 +92,12 @@ jobs:
# Build Release iOS Frameworks
PYTHON_EXECUTABLE=python ${CONDA_RUN} --no-capture-output \
build/build_apple_frameworks.sh --coreml --mps --optimized --portable --quantized --xnnpack
build/build_apple_frameworks.sh --coreml --custom --mps --optimized --portable --quantized --xnnpack
# Bundle Release iOS Frameworks
for FRAMEWORK in "${FRAMEWORKS[@]}"; do (
cd cmake-out && zip -r "${RUNNER_TEMP}/artifacts/${FRAMEWORK}-${VERSION}.zip" "${FRAMEWORK}.xcframework"
) done
# Build Debug iOS Frameworks
PYTHON_EXECUTABLE=python ${CONDA_RUN} --no-capture-output \
build/build_apple_frameworks.sh --coreml --mps --optimized --portable --quantized --xnnpack --Debug
# Bundle Debug iOS Frameworks
for FRAMEWORK in "${FRAMEWORKS[@]}"; do (
cd cmake-out && zip -r "${RUNNER_TEMP}/artifacts/${FRAMEWORK}_debug-${VERSION}.zip" "${FRAMEWORK}_debug.xcframework"
cd cmake-out && \
zip -r "${RUNNER_TEMP}/artifacts/${FRAMEWORK}-${VERSION}.zip" "${FRAMEWORK}.xcframework"
) done
popd
Expand Down
23 changes: 23 additions & 0 deletions .github/workflows/pull.yml
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,29 @@ jobs:
# Test llama2
PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh stories110M.pt "${BUILD_TOOL}" "${DTYPE}" "${MODE}"
test-llama-runner-linux-android:
name: test-llama-runner-linux-android
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
strategy:
matrix:
include:
- build-tool: cmake
fail-fast: false
with:
runner: linux.2xlarge
docker-image: executorch-ubuntu-22.04-clang12-android
submodules: 'true'
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
timeout: 90
script: |
# The generic Linux job chooses to use base env, not the one setup by the image
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
conda activate "${CONDA_ENV}"
BUILD_TOOL=${{ matrix.build-tool }}
PYTHON_EXECUTABLE=python \
bash .ci/scripts/build_llama_android.sh "${BUILD_TOOL}"
test-custom-ops-linux:
name: test-custom-ops-linux
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
Expand Down
4 changes: 4 additions & 0 deletions .github/workflows/trunk.yml
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,10 @@ jobs:
git config --global user.name "Github Executorch"
bash examples/arm/setup.sh --i-agree-to-the-contained-eula
# Increase number of files user can monitor to bypass buck failures.
# Hopefully this is high enough for this setup.
sudo sysctl fs.inotify.max_user_watches=1048576 # 1024 * 1024
# Test ethos-u delegate examples with run.sh
source examples/arm/ethos-u-scratch/setup_path.sh
PYTHON_EXECUTABLE=python bash examples/arm/run.sh examples/arm/ethos-u-scratch/ buck2
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/update-viablestrict.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,6 @@ jobs:
with:
repository: pytorch/executorch
stable-branch: viable/strict
requires: '[\"pull\", \"lint\", \"trunk\", \"Build documentation\"]'
requires: '[\"pull\", \"lint\", \"trunk\", \"Build documentation\", \"Android\", \"Apple\"]'
secret-bot-token: ${{ secrets.UPDATEBOT_TOKEN }}
rockset-api-key: ${{ secrets.ROCKSET_API_KEY }}
6 changes: 1 addition & 5 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,8 @@ cmake-android-out/
cmake-ios-out/
ethos-u-scratch/
executorch.egg-info
pip-out/
__pycache__/
build/lib/
exir/_serialize/scalar_type.fbs
exir/_serialize/program.fbs
sdk/bundled_program/serialize/bundled_program_schema.fbs
sdk/bundled_program/serialize/scalar_type.fbs

# Any exported models and profiling outputs
*.pte
Expand Down
6 changes: 6 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -59,3 +59,9 @@
path = third-party/lm-evaluation-harness
url = https://github.com/EleutherAI/lm-evaluation-harness
branch = v0.4.1
[submodule "kernels/optimized/third-party/eigen"]
path = kernels/optimized/third-party/eigen
url = https://gitlab.com/libeigen/eigen.git
[submodule "examples/third-party/LLaVA"]
path = examples/third-party/LLaVA
url = https://github.com/haotian-liu/LLaVA.git
Empty file.
81 changes: 75 additions & 6 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,16 @@
# cloning or pulling the upstream repo. Once this is done, you don't need to do
# it again until you pull from the upstream repo again.
#
# NOTE: Build options can be configured by passing arguments to cmake. For
# example, to enable the EXECUTORCH_BUILD_XNNPACK option, change the cmake
# command to 'cmake -DEXECUTORCH_BUILD_XNNPACK=ON ..'.
#[[
(rm -rf cmake-out \
&& mkdir cmake-out \
&& cd cmake-out \
&& cmake ..)
]]
#
# ### Build ###
#
# NOTE: The `-j` argument specifies how many jobs/processes to use when
Expand Down Expand Up @@ -45,6 +55,7 @@ endif()
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Debug)
endif()

# ------------------------------ OPTIONS -------------------------------------
# WARNING: Please don't add example specific options in this CMakeLists.txt.
# Instead please use `find_package(executorch REQUIRED)` in the example
Expand Down Expand Up @@ -131,6 +142,8 @@ option(EXECUTORCH_BUILD_ARM_BAREMETAL

option(EXECUTORCH_BUILD_COREML "Build the Core ML backend" OFF)

option(EXECUTORCH_BUILD_CUSTOM "Build the custom kernels" OFF)

option(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER "Build the Data Loader extension"
OFF)

Expand Down Expand Up @@ -159,6 +172,61 @@ option(EXECUTORCH_BUILD_XNNPACK "Build the XNNPACK backend" OFF)

option(EXECUTORCH_BUILD_VULKAN "Build the Vulkan backend" OFF)

#
# pthreadpool: build pthreadpool library. Disable on unsupported platforms
#
cmake_dependent_option(EXECUTORCH_BUILD_PTHREADPOOL "Build pthreadpool library."
ON "NOT EXECUTORCH_BUILD_ARM_BAREMETAL" OFF)

#
# cpuinfo: build cpuinfo library. Disable on unsupported platforms
#
cmake_dependent_option(EXECUTORCH_BUILD_CPUINFO "Build cpuinfo library." ON
"NOT EXECUTORCH_BUILD_ARM_BAREMETAL" OFF)

if(EXECUTORCH_BUILD_CPUINFO)
# --- cpuinfo
set(CPUINFO_SOURCE_DIR "backends/xnnpack/third-party/cpuinfo")
set(CPUINFO_BUILD_TOOLS
OFF
CACHE BOOL "")
set(CPUINFO_BUILD_UNIT_TESTS
OFF
CACHE BOOL "")
set(CPUINFO_BUILD_MOCK_TESTS
OFF
CACHE BOOL "")
set(CPUINFO_BUILD_BENCHMARKS
OFF
CACHE BOOL "")
set(CPUINFO_LIBRARY_TYPE
"static"
CACHE STRING "")
set(CPUINFO_LOG_LEVEL
"error"
CACHE STRING "")
set(CLOG_SOURCE_DIR "${CPUINFO_SOURCE_DIR}/deps/clog")
add_subdirectory("${CPUINFO_SOURCE_DIR}")
endif()

if(EXECUTORCH_BUILD_PTHREADPOOL)
# --- pthreadpool
set(PTHREADPOOL_SOURCE_DIR "backends/xnnpack/third-party/pthreadpool")
set(PTHREADPOOL_BUILD_TESTS
OFF
CACHE BOOL "")
set(PTHREADPOOL_BUILD_BENCHMARKS
OFF
CACHE BOOL "")
set(PTHREADPOOL_LIBRARY_TYPE
"static"
CACHE STRING "")
set(PTHREADPOOL_ALLOW_DEPRECATED_API
ON
CACHE BOOL "")
add_subdirectory("${PTHREADPOOL_SOURCE_DIR}")
endif()

if(NOT PYTHON_EXECUTABLE)
resolve_python_executable()
endif()
Expand Down Expand Up @@ -305,6 +373,12 @@ endif()
#
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/kernels/portable)

if(EXECUTORCH_BUILD_CUSTOM)
# TODO: move all custom kernels to ${CMAKE_CURRENT_SOURCE_DIR}/kernels/custom
add_subdirectory(
${CMAKE_CURRENT_SOURCE_DIR}/examples/models/llama2/custom_ops)
endif()

if(EXECUTORCH_BUILD_OPTIMIZED)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/kernels/optimized)
endif()
Expand Down Expand Up @@ -398,11 +472,6 @@ if(EXECUTORCH_BUILD_VULKAN)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/vulkan)
endif()

if(EXECUTORCH_BUILD_ANDROID_JNI)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/examples/models/llama2/runner)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/android)
endif()

if(EXECUTORCH_BUILD_QNN)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/qualcomm)
endif()
Expand Down Expand Up @@ -445,7 +514,7 @@ if(EXECUTORCH_BUILD_PYBIND)
# find pytorch lib, to allow pybind to take at::Tensor as input/output
find_package(Torch CONFIG REQUIRED)
find_library(TORCH_PYTHON_LIBRARY torch_python
PATHS "${TORCH_INSTALL_PREFIX}/lib")
PATHS "${TORCH_INSTALL_PREFIX}/lib")

# compile options for pybind

Expand Down
19 changes: 12 additions & 7 deletions Package.swift
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,14 @@ import PackageDescription

let url = "https://ossci-ios.s3.amazonaws.com/executorch"
let version = "0.1.0"
let coreml_sha256 = "e8c5000a389bdc98274aa0b359350a47e6d0cccb8af5efc46f814feac6afaf86"
let executorch_sha256 = "e6c5d798b614a03ab8a4891caeaa8a7adf8d58ba29e767079321691ec9f1ffb4"
let mps_sha256 = "3e54e3166b5e739cb3f76b2bc6f7b1982a0401821ab785a93120bacfde4bc1ee"
let optimized_sha256 = "4d353f44badd321cf29fe548db9d66b493b93c6233a7e023988e256f0eefeaa1"
let portable_sha256 = "c501f9b644a3e8a7bab62600b7802e4a9752fb789ba4fd02f46bec47858cec07"
let quantized_sha256 = "4fb5f7216abc0ee16ece91a4bce822b06d67b52ca985c9eecbf9d3f8bd1ea1ba"
let xnnpack_sha256 = "e610904cfd6e96f8f738c25a7bb4f6d7b86995b2cfeb72fc1f30523630dbb285"
let coreml_sha256 = "78d853d87be478696e56e658aa4ff17d47ae185a9a6a36316c821fa8b2d3aacd"
let custom_sha256 = "f059f6716298403dff89a952a70e323c54911be140d05f2467bd5cc61aaefae3"
let executorch_sha256 = "ba9a0c2b061afaedbc3c5454040a598b1371170bd9d9a30b7163c20e23339841"
let mps_sha256 = "39542a8671cca1aa627102aa47785d0f6e2dfe9a40e2c22288a755057b00fbfa"
let optimized_sha256 = "1d84fa16197bb6f0dec01aaa29d2a140c0e14d8e5e92630a7b4dd6f48012506d"
let portable_sha256 = "4993904f89ecb4476677ff3c072ed1a314a608170f10d364cfd23947851ccbf3"
let quantized_sha256 = "8d35ee0e7ca77c19782eaea07a1888f576cda679f8a4a5edb03d80ebe858047e"
let xnnpack_sha256 = "380e5185c4c48ede7cc0d0f0657ffb26df83cd9f55813d78593aea8a93942caf"

struct Framework {
let name: String
Expand Down Expand Up @@ -57,6 +58,10 @@ let frameworks = [
"sqlite3",
]
),
Framework(
name: "custom_backend",
checksum: custom_sha256
),
Framework(
name: "executorch",
checksum: executorch_sha256
Expand Down
Loading

0 comments on commit efe6568

Please sign in to comment.