diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..c20c2ab
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+__pycache__
+
diff --git a/README.md b/README.md
index 4e4b6e9..15d62da 100644
--- a/README.md
+++ b/README.md
@@ -1,2 +1,127 @@
-# RT-AK-plugin-tflm
-RT-AK plugin for Tensorflow Lite Micro
+
+
+
RT-AK 之 tflm
+
+ [中文](./README.md)
+
+- [简介](#简介)
+- [目录结构](#目录结构)
+- [命令行参数详细说明](#命令行参数详细说明)
+- [插件安装](#插件安装)
+- [命令行运行 RT-AK](#命令行运行-rt-ak)
+- [插件内部工作流程](#插件内部工作流程)
+
+## 简介
+
+> Date: 2021/09/29
+>
+> ```shell
+> # 模板
+> python aitools.py --project --model_name --platform tflm
+>
+> # 示例
+> python aitools.py --project="D:\Code\Project\imx6ullrtak\imx6ull" --model="./Models/facelandmark.tflite" --platform=tflm
+
+> ```
+>
+
+> Date: 2021/09/29
+>
+> Update: 该版本插件尚未支持模型转化功能,使用的是转化好的头文件数组,后续将继续完成
+
+*本项目归属于 `RT-AK` 主项目中的一个子模块。*
+
+使用 `tensorflow lite micro` 作为插件推理后端。
+
+- 模型支持:`TFLite`
+- 算子支持:reference算子
+
+该插件应用示例参考:
+
+- 教程:docs/RT-AK之tflm快速上手.md
+
+## 目录结构
+
+```shell
+% tree -L 2 tflm
+imx6ull
+├── backend_plugin_tflm
+│ ├── backend_tflm.c
+│ ├── backend_tflm.h
+│ └── readme.md
+├── config.py # 生成 `rt_ai__model.h` 的一些配置信息,保存在 /applications
+├── generate_rt_ai_model_h.py # 生成 `rt_ai__model.h` ,保存在 /applications
+├── gen_rt_ai_model_c.py # 生成 `rt_ai__model.c` ,保存在 /applications
+├── __init__.py
+├── plugin_tflm_parser.py # `imx6ull` 平台插件运行所需的参数
+├── plugin_tflm.py # `imx6ull` 平台插件运行主函数
+├── prepare_work.py # 生成两个文件夹,存放 TFLite库和 c-model 文件; 加载对应的 Sconscript
+├── README.md
+├── Sconscripts # 模型转换之后,参与到项目 `scons` 编译的脚本文件
+│ ├── Middlewares
+│ └── TFLite
+└── TensorflowLiteMicro # `TFLite推理后端源码`
+```
+
+## 命令行参数详细说明
+
+$$
+RT-AK 命令行的参数 = (RT-AK 基础参数 + imx6ull 插件参数)
+$$
+
+- RT-AK 基础参数,[链接](https://github.com/RT-Thread/RT-AK/tree/main/RT-AK/rt_ai_tools#0x03-%E5%8F%82%E6%95%B0%E8%AF%B4%E6%98%8E)
+
+- 该部分是 RT-AK 之 tflm 插件的参数说明,详见 `plugin_tflm_parser.py`
+
+其中需要注意的是加粗部分的三个参数,请注意看相关描述。
+
+
+| Parameter | Description |
+| ------------------- | ------------------------------------------------------------ |
+| `--tflite` | 运行所需的推理后端,默认为`./platforms/plugin_imx6ull/TensorflowLiteMicro` |
+| `--tflm_out` | 产生的中间文件夹路径,默认是当天的时间戳命名 |
+| **--enable_rt_lib** | **在 `project/rtconfgi.h` 中打开宏定义,默认是 `RT_AI_USE_imx6ull`** |
+| --clear | 是否需要删除生成的中间文件夹 `stm_out` ,默认为`False` |
+
+## 插件安装
+
+该插件无需主动安装,
+
+只需要克隆主项目工程:[RT-AK](https://github.com/RT-Thread/RT-AK)
+
+进入到 `RT-AK/rt_ai_tools` 路径下,
+
+**仅需要**在执行 `python aitools.py --xxx` 的同时指定 `platform` 参数为 `tflm` 即可,插件会自动下载。
+
+## 命令行运行 RT-AK
+
+请在 `xxx/RTAK/tools` 路径下运行该程序。
+
+![](https://gitee.com/lebhoryi/PicGoPictureBed/raw/master/img/20210223145923.png)
+
+```shell
+# 基础运行命令
+python aitools.py --project --model_name --platform tflm
+
+# 示例
+python aitools.py --project="D:\Code\Project\imx6ullrtak\imx6ull" --model="./Models/speech_case.tflite" --platform=tflm --clear
+```
+
+![run_example](./doc/../docs/pic/run_example.png)
+
+
+完整的项目实战例程,请阅读:[RT-AK之tflm快速上手.md](./docs/RT-AK之tflm快速上手.md)
+
+## 插件内部工作流程
+
+- [ ] 模型量化
+
+- [ ] 判断模型是否支持
+- [ ] 判断 `CPU` 是否支持,目前是写死为tflm
+- [x] 在 `stm_out` 下生成后端推理框架和存放 `c-model` 的文件夹
+- [x] 将模型转换成 `c-model`,保存在 `/TFLite` 路径下
+- [x] 生成 `rt_ai__model.h` 文件,保存在 `project/applications`
+- [x] 生成 `rt_ai__model.c` 文件,保存在 `project/applications`
+- [x] 把 `stm_out` 内的两个关键文件夹加载到 `project` 下
+- [x] 判断是否删除 `stm_out`
+- [ ] 解析模型的输入输出维度
diff --git a/Sconscripts/Middlewares b/Sconscripts/Middlewares
new file mode 100644
index 0000000..72b1289
--- /dev/null
+++ b/Sconscripts/Middlewares
@@ -0,0 +1,53 @@
+# for module compiling
+import os
+Import('RTT_ROOT')
+from building import *
+
+cwd = GetCurrentDir()
+objs = []
+list = os.listdir(cwd)
+
+for d in list:
+ path = os.path.join(cwd, d)
+ if os.path.isfile(os.path.join(path, 'SConscript')):
+ objs = objs + SConscript(os.path.join(d, 'SConscript'))
+
+Return('objs')
+"""
+import os, sys
+import rtconfig
+from building import *
+
+cwd = GetCurrentDir()
+CPPPATH = [cwd + '/TF']
+#LIBPATH = [cwd + '/ST/AI/Lib']
+#lib_file_path = os.listdir(LIBPATH[0])
+# split filenames without suffix, only stem
+#lib_files = list()
+# lib_files =
+# ['libNetworkRuntime520_CM4_GCC_PIC', 'NetworkRuntime520_CM4_IAR', 'NetworkRuntime520_CM4_Keil']
+#for i in range(len(lib_file_path)):
+# stem = os.path.splitext(lib_file_path[i])[0]
+# lib_files.append(stem)
+
+# cross_tool provides the cross compiler
+# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
+
+for item in lib_files:
+ if "GCC" in item and rtconfig.CROSS_TOOL == 'gcc':
+ # libNetworkRuntime520_CM7_GCC_PIC --> NetworkRuntime520_CM7_GCC_PIC
+ LIBS = [item[3:]]
+ elif "Keil" in item and rtconfig.CROSS_TOOL == 'keil':
+ LIBS = [item]
+ elif "IAR" in item and rtconfig.CROSS_TOOL == 'iar':
+ LIBS = [item]
+
+
+src = []
+
+#group = DefineGroup('Middlewares', src, depend = [''], CPPPATH = CPPPATH, LIBS = LIBS, LIBPATH = LIBPATH)
+group = DefineGroup('Middlewares', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')
+
+"""
\ No newline at end of file
diff --git a/Sconscripts/TFLite b/Sconscripts/TFLite
new file mode 100644
index 0000000..bf71f6c
--- /dev/null
+++ b/Sconscripts/TFLite
@@ -0,0 +1,10 @@
+import rtconfig
+from building import *
+
+cwd = GetCurrentDir()
+CPPPATH = [cwd, cwd + '/App']
+src = Glob('App/*.c')
+
+group = DefineGroup('TFLite', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')
diff --git a/TensorflowLiteMicro/.gitignore b/TensorflowLiteMicro/.gitignore
new file mode 100644
index 0000000..7049028
--- /dev/null
+++ b/TensorflowLiteMicro/.gitignore
@@ -0,0 +1,53 @@
+# Prerequisites
+*.d
+
+# Object files
+*.o
+*.ko
+*.obj
+*.elf
+
+# Linker output
+*.ilk
+*.map
+*.exp
+
+# Precompiled Headers
+*.gch
+*.pch
+
+# Libraries
+*.lib
+*.a
+*.la
+*.lo
+
+# Shared objects (inc. Windows DLLs)
+*.dll
+*.so
+*.so.*
+*.dylib
+
+# Executables
+*.exe
+*.out
+*.app
+*.i*86
+*.x86_64
+*.hex
+
+# Debug files
+*.dSYM/
+*.su
+*.idb
+*.pdb
+.vscode
+
+# Kernel Module Compile Results
+*.mod*
+*.cmd
+.tmp_versions/
+modules.order
+Module.symvers
+Mkfile.old
+dkms.conf
diff --git a/TensorflowLiteMicro/LICENSE b/TensorflowLiteMicro/LICENSE
new file mode 100644
index 0000000..40f8c34
--- /dev/null
+++ b/TensorflowLiteMicro/LICENSE
@@ -0,0 +1,203 @@
+Copyright 2019 The TensorFlow Authors. All rights reserved.
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/TensorflowLiteMicro/SConscript b/TensorflowLiteMicro/SConscript
new file mode 100644
index 0000000..af1d558
--- /dev/null
+++ b/TensorflowLiteMicro/SConscript
@@ -0,0 +1,16 @@
+# RT-Thread building script for bridge
+
+import os
+from building import *
+
+cwd = GetCurrentDir()
+objs = []
+list = os.listdir(cwd)
+
+print("TF in")
+for d in list:
+ path = os.path.join(cwd, d)
+ if os.path.isfile(os.path.join(path, 'SConscript')):
+ objs = objs + SConscript(os.path.join(d, 'SConscript'))
+
+Return('objs')
diff --git a/TensorflowLiteMicro/docs/ModelConvert.md b/TensorflowLiteMicro/docs/ModelConvert.md
new file mode 100644
index 0000000..3a9190f
--- /dev/null
+++ b/TensorflowLiteMicro/docs/ModelConvert.md
@@ -0,0 +1,202 @@
+# 模型转换器(Converter)的 Python API 指南
+
+此md提供了一个关于在 TensorFlow 2.0 中如何使用 TensorFlow Lite 转换器(TensorFlow Lite converter) Python API 的示例。
+Python API
+
+在 TensorFlow 2.0 中,用来将原始的 TensorFlow 模型格式转换为 TensorFlow Lite 的 Python API 是 tf.lite.TFLiteConverter。在 TFLiteConverter 中有以下的类方法(classmethod):
+
+ TFLiteConverter.from_saved_model():用来转换 SavedModel 格式模型。
+ TFLiteConverter.from_keras_model():用来转换 tf.keras 模型。
+ TFLiteConverter.from_concrete_functions():用来转换 concrete functions。
+
+注意: 在 TensorFlow Lite 2.0 中有一个不同版本的 TFLiteConverter API, 该 API 只包含了 from_concrete_function。 本文中用到的的新版本 API 可以通过 pip 安装 tf-nightly-2.0-preview。
+
+本文展示了 API 的 示例用法,不同 TensorFlow 版本的 API 详细列表请看 1.X 版本到 2.0 版本 API 的改变,和 安装 TensorFlow 来安装和使用。
+## 示例
+
+### 转换 SavedModel 格式模型
+
+以下示例展示了如何将一个 SavedModel 转换为 TensorFlow Lite 中的 FlatBuffer格式。
+
+ import tensorflow as tf
+
+ # 建立一个简单的模型。
+ root = tf.train.Checkpoint()
+ root.v1 = tf.Variable(3.)
+ root.v2 = tf.Variable(2.)
+ root.f = tf.function(lambda x: root.v1 * root.v2 * x)
+
+ # 保存模型。
+ export_dir = "/tmp/test_saved_model"
+ input_data = tf.constant(1., shape=[1, 1])
+ to_save = root.f.get_concrete_function(input_data)
+ tf.saved_model.save(root, export_dir, to_save)
+
+ # 转换模型。
+ converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
+ tflite_model = converter.convert()
+
+此 API 不支持指定输入向量的维度。 如果您的模型需要指定输入向量的维度,请使用 `from_concrete_functions` 来完成。 示例:
+
+ model = tf.saved_model.load(export_dir)
+ concrete_func = model.signatures[
+ tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
+ concrete_func.inputs[0].set_shape([1, 256, 256, 3])
+ converter = TFLiteConverter.from_concrete_functions([concrete_func])
+
+### 转换 Keras 模型
+
+以下示例展示了如何将一个 tf.keras 模型 转换为 TensorFlow Lite 中的 FlatBuffer 格式。
+
+ import tensorflow as tf
+
+ # 创建一个简单的 Keras 模型。
+ x = [-1, 0, 1, 2, 3, 4]
+ y = [-3, -1, 1, 3, 5, 7]
+
+ model = tf.keras.models.Sequential(
+ [tf.keras.layers.Dense(units=1, input_shape=[1])])
+ model.compile(optimizer='sgd', loss='mean_squared_error')
+ model.fit(x, y, epochs=50)
+
+ # 转换模型。
+ converter = tf.lite.TFLiteConverter.from_keras_model(model)
+ tflite_model = converter.convert()
+
+### 转换 `concrete function`
+
+以下示例展示了如何将 TensorFlow 中的` concrete function` 转换为TensorFlow Lite 中的 FlatBuffer 格式。
+
+ import tensorflow as tf
+
+ # 建立一个模型。
+ root = tf.train.Checkpoint()
+ root.v1 = tf.Variable(3.)
+ root.v2 = tf.Variable(2.)
+ root.f = tf.function(lambda x: root.v1 * root.v2 * x)
+
+ # 生成 concrete function。
+ input_data = tf.constant(1., shape=[1, 1])
+ concrete_func = root.f.get_concrete_function(input_data)
+
+ # 转换模型。
+ ## `from_concrete_function` 的传入参数被设计为一个个 concrete functio的列表,然而
+ # 现阶段仅支持每次调用时仅接受一个concrete function。
+ # 同时转换多个concrete function的功能正在开发中。
+ converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
+ tflite_model = converter.convert()
+
+## 端到端 MobileNet 转换
+
+以下示例展示了如何将将一个提前训练好的 tf.keras MobileNet 模型转换为 TensorFlow Lite 支持的类型并运行推断 (inference)。 随机数据分别在 TensorFlow 和 TensorFlow Lite 模型中运行的结果将被比较。如果是从文件加载模型,请使用 model_path 来代替 model_content。
+
+ import numpy as np
+ import tensorflow as tf
+
+ # 加载 MobileNet tf.keras 模型。
+ model = tf.keras.applications.MobileNetV2(
+ weights="imagenet", input_shape=(224, 224, 3))
+
+ # 转换模型。
+ converter = tf.lite.TFLiteConverter.from_keras_model(model)
+ tflite_model = converter.convert()
+
+ # 加载 TFLite 模型并分配张量(tensor)。
+ interpreter = tf.lite.Interpreter(model_content=tflite_model)
+ interpreter.allocate_tensors()
+
+ # 获取输入和输出张量。
+ input_details = interpreter.get_input_details()
+ output_details = interpreter.get_output_details()
+
+ # 使用随机数据作为输入测试 TensorFlow Lite 模型。
+ input_shape = input_details[0]['shape']
+ input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
+ interpreter.set_tensor(input_details[0]['index'], input_data)
+
+ interpreter.invoke()
+
+ # 函数 `get_tensor()` 会返回一份张量的拷贝。
+ # 使用 `tensor()` 获取指向张量的指针。
+ tflite_results = interpreter.get_tensor(output_details[0]['index'])
+
+ # 使用随机数据作为输入测试 TensorFlow 模型。
+ tf_results = model(tf.constant(input_data))
+
+ # 对比结果。
+ for tf_result, tflite_result in zip(tf_results, tflite_results):
+ np.testing.assert_almost_equal(tf_result, tflite_result, decimal=5)
+
+## 总结 1.X 版本到 2.0 版本 API 的改变
+
+本节总结了从 1.X 到 2.0 版本 Python API 的改变。 如果对某些改动有异议,请提交 GitHub issue。
+
+### TFLite转换器 支持的格式类型
+
+`TFLite转换器`在 2.0 版本中支持由` 1.X `版本和` 2.0 `版本生成的 SavedModels 和 Keras 模型。但是,转换过程不再支持由 1.X 版本冻结的 GraphDefs。 开发者可通过调用 tf.compat.v1.lite.TFLiteConverter 来把冻结的 GraphDefs 转换到 TensorFlow Lite 版本。
+
+#### 量化感知训练(Quantization-aware training)
+
+以下与 量化感知训练(Quantization-aware training) 有关的属性和方法在 TensorFlow 2.0 中从TFLiteConverter 中被移除。
+
+ inference_type
+ inference_input_type
+ quantized_input_stats
+ default_ranges_stats
+ reorder_across_fake_quant
+ change_concat_input_ranges
+ post_training_quantize - 在 1.X API 中被弃用
+ get_input_arrays()
+
+支持量化感知训练的重写器(rewriter)函数不支持由 TensorFlow 2.0 生成的模型。此外,TensorFlow Lite 的量化 API 已按支持 Keras 中量化感知训练 API 的思路重新设计和精简。 在新的量化 API 部署前,这些属性将不会出现在 2.0 的 API 中。开发者可以使用 tf.compat.v1.lite.TFLiteConverter 来转换由重写器函数生成的模型。
+
+#### 关于 TFLiteConverter 中属性的改变
+
+属性 target_ops 已成为 TargetSpec 中的属性且作为未来对优化框架的补充被重命名为 supported_ops。
+
+此外,以下属性被移除:
+
+- drop_control_dependency (default: True) - TFLite 现不支持控制流(control flow),所以此属性将恒为 True。
+- Graph visualization - 在 TensorFlow 2.0 中,推荐使用 visualize.py 实现对 TensorFlow Lite 图(graph)的可视化。 不同于 GraphViz, 它支持开发者对已进行过 post training 量化的图(graph)可视化。以下与图可视化的属性将被移除:
+- output_format
+ - dump_graphviz_dir
+ - dump_graphviz_video
+
+## 通用 API 的改变
+#### 转换方法
+
+以下在 1.X 中被弃用的方法不会在 2.0 中出现:
+
+ lite.toco_convert
+ lite.TocoConverter
+
+#### lite.constants
+
+在 2.0 中,为了减少 TensorFlow 和 TensorFlow Lite 之间的重复移除了` lite.constants` API。以下的列表展示了` lite.constant `中的类型在 TensorFlow 中对应的类型:
+
+ lite.constants.FLOAT: tf.float32
+ lite.constants.INT8: tf.int8
+ lite.constants.INT32: tf.int32
+ lite.constants.INT64: tf.int64
+ lite.constants.STRING: tf.string
+ lite.constants.QUANTIZED_UINT8: tf.uint8
+
+此外,lite.constants.TFLITE 和 lite.constants.GRAPHVIZ_DOT 被移除(由于 TFLiteConverter 中的 flage output_format被移除)。
+lite.OpHint
+
+由于 API OpHint 与 2.0 的 API 不兼容,故暂不可用。 此 API可用于转换基于 LSTM 的模型。 在 2.0 中对 LSTMs 的支持正在被探究。所有与 lite.experimental 有关的 API 都因此被移除。
+
+### 安装 TensorFlow
+#### 安装 TensorFlow 2.0 nightly
+
+可用以下命令安装 TensorFlow 2.0 nightly:
+
+ pip install tf-nightly-2.0-preview
+
+#### 在已安装的 1.X 中使用 TensorFlow 2.0
+
+可通过以下代码片段从最近安装的 1.X 中使用 TensorFlow 2.0。
+
+ import tensorflow.compat.v2 as tf
+
+ tf.enable_v2_behavior()
\ No newline at end of file
diff --git a/TensorflowLiteMicro/docs/api.md b/TensorflowLiteMicro/docs/api.md
new file mode 100644
index 0000000..8259c26
--- /dev/null
+++ b/TensorflowLiteMicro/docs/api.md
@@ -0,0 +1,59 @@
+# Tensorflow Lite Micro API
+
+## 目前软件包支持的算子(共52个算子)
+
+每个算子定义的详细内容可以参见[`all_ops_resolver.cc`](../tensorflow/lite/micro/all_ops_resolver.cc)
+
+| 算子名称 | 描述 |
+|:-------------------------------------|--------|
+|`ABS()` | 元素级取绝对值 |
+| `ADD()` | 元素级求和 |
+| `ARG_MAX()` | 获得最大值对应的下标 |
+| `ARG_MIN()` | 获得最小值对应的下标 |
+| `AVERAGE_POOL_2D()` | 二维均值池化算子 |
+| `CEIL()` | 对tensor向上取整 |
+| `CONCATENATION()` | tensor按第一维进行聚合 |
+| `CONV_2D()` | 二维卷积 |
+| `COS()` | cos(x)函数 |
+| `DEPTHWISE_CONV_2D()` | 深度可分离二维卷积 |
+| `DEQUANTIZE()` | 反量化算子 |
+| `EQUAL()` | 量化算子 |
+| `FLOOR()` | 对tensor向下取整 |
+| `FULLY_CONNECTED()` | 全连接层 |
+| `GREATER()` | |
+| `GREATER_EQUAL()` | |
+| `L2_NORMALIZATION()` | 应用欧式距离进行归一化 |
+| `LESS()` | |
+| `LESS_EQUAL()` | |
+| `LOG()` | 对tensor进行log运算 |
+| `AND()` | 元素级与 |
+| `NOT()` | 元素级取反 |
+| `OR()` | 元素级或 |
+| `LOGISTIC()` | 对tensor引用逻辑回归函数 |
+| `MAX_POOL_2D()` | 最大值池化 |
+| `MAXIMUM()` | 对两个输入tensor求较大的tensor |
+| `MEAN()` | |
+| `MINIMUM()` | |
+| `MUL()` | |
+| `NEG()` | |
+| `NOT_EQUAL()` | |
+| `PACK()` | |
+| `PAD()` | |
+| `PADV2()` | |
+| `PRELU()` | |
+| `QUANTIZE()` | |
+| `RELU()` | RELU激活函数 |
+| `RELU6()` | |
+| `RESHAPE()` | 对tensor维度进行重新设置 |
+| `RESIZE_NEAREST_NEIGHBOR()` | |
+| `ROUND()` | |
+| `RSQRT()` | |
+| `SIN()` | sin(x)函数 |
+| `SOFTMAX()` | |
+| `SPLIT()` | |
+| `SQRT()` | |
+| `SQUARE()` | |
+| `STRIDED_SLICE()` | |
+| `SVDF()` | |
+| `TANH()` | tan(x)函数 |
+| `UNPACK()` | |
diff --git a/TensorflowLiteMicro/docs/introduction.md b/TensorflowLiteMicro/docs/introduction.md
new file mode 100644
index 0000000..2ea763d
--- /dev/null
+++ b/TensorflowLiteMicro/docs/introduction.md
@@ -0,0 +1,54 @@
+# TensorFlow Lite Micro软件包
+
+TensorFlow Lite Micro 是 TensorFlow Lite 的实验性端口,专门用于在嵌入式设备和其他只有几千字节内存的设备上运行机器学习模型。
+
+它不需要操作系统支持、任何标准 C/C++ 库或动态内存分配。核心运行时在 Arm Cortex M3 上占用 16 KB 的内存,并且具有足够多的运算符来运行语音关键字检测模型,运行时总共占用 22 KB 的内存。
+
+一些示例应用演示了如何使用微控制器执行唤醒字词检测,根据加速度计数据进行手势分类(待移植),以及使用相机数据进行图像分类(待移植)等任务。
+
+## 使用入门
+
+如需试用示例应用并了解如何使用该 API,请参阅[用户指导](user-guide.md)。
+
+## 为什么要在嵌入式设备中使用机器学习模型
+
+嵌入式设备通常是小型低功耗计算设备,往往嵌入到需要执行基本计算的硬件(包括家用电器和物联网设备)中。微控制芯片的年生产量高达数十亿。
+
+微控制器芯片通常经过优化以实现低能耗和小尺寸,但处理能力、内存和存储空间会受到影响。同时一些微控制器是为了优化机器学习任务的性能而被设计出来的。
+
+通过在微控制器上运行机器学习推断,开发者可以在不依靠网络连接的情况下向各种硬件设备添加 AI 功能,而依赖网络的AI功能通常会受到带宽和功率限制,并且会导致长时间延迟。由于数据无需离开设备,因此在设备端运行推断也有助于保护隐私。
+
+## 开发者工作流程
+
+为了将 TensorFlow 模型部署到微控制器,您需要遵循以下流程:
+
+1. **创建或获取 TensorFlow 模型**
+
+ 该模型必须足够小,在转换后适合目标设备,并且只能使用[支持的操作](https://tensorflow.google.cn/lite/microcontrollers/build_convert#operation_support)。如果您想使用目前不受支持的操作,可以提供自己的实现。
+
+2. **将模型转换为 TensorFlow Lite FlatBuffer**
+
+ 您可以使用 [TensorFlow Lite 转换器](https://tensorflow.google.cn/lite/microcontrollers/build_convert#model_conversion)将模型转换为标准 TensorFlow Lite 格式。您可能希望输出量化模型,因为它们更小而且执行效率更高。
+
+3. **将 FlatBuffer 转换为 C 字节数组**
+
+ 模型保存在只读程序内存中,并以简单 C 文件的形式提供。您可以使用标准工具[将 FlatBuffer 转换为 C 数组](https://tensorflow.google.cn/lite/microcontrollers/build_convert#convert_to_a_c_array)。
+
+4. **集成适用于微控制器的 TensorFlow Lite C++ 库**
+
+ 编写微控制器代码以收集数据、使用 [C++ 库](https://tensorflow.google.cn/lite/microcontrollers/library)执行推断,并利用结果。
+
+5. **部署到您的设备**
+
+ 构建程序并将其部署到您的设备。
+
+## 限制
+
+适用于微控制器的 TensorFlow Lite 专为满足微控制器开发的特定限制条件而设计。如果您使用的是更强大的设备(例如 Raspberry Pi 等嵌入式 Linux 设备),那么标准 TensorFlow Lite 框架可能更易于集成。
+
+应考虑以下限制条件:
+
+- 支持的 TensorFlow 操作[有限](https://tensorflow.google.cn/lite/microcontrollers/build_convert#operation_support)
+- 支持的设备有限
+- 需要手动内存管理的低阶 C++ API
+- 不支持训练
\ No newline at end of file
diff --git a/TensorflowLiteMicro/docs/principle.md b/TensorflowLiteMicro/docs/principle.md
new file mode 100644
index 0000000..45f4753
--- /dev/null
+++ b/TensorflowLiteMicro/docs/principle.md
@@ -0,0 +1,2 @@
+# Tensorflow Lite Micro 工作原理
+
diff --git a/TensorflowLiteMicro/docs/samples.md b/TensorflowLiteMicro/docs/samples.md
new file mode 100644
index 0000000..4b571c6
--- /dev/null
+++ b/TensorflowLiteMicro/docs/samples.md
@@ -0,0 +1,167 @@
+# Tensorflow Lite Micro示例程序 #
+
+此示例使用一个简单的 [音频识别模型](https://tensorflow.google.cn/tutorials/sequences/audio_recognition) 来识别语音中的关键字。示例代码从设备的麦克风中捕获音频。模型通过对该音频进行实时分类来确定是否说过“是”或“否一词。
+
+## 运行推断
+
+以下部分将介绍[微语音](https://tensorflow.google.cn/lite/microcontrollers/get_started#微语音示例)示例中的 [main.cc](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/micro_speech/main.cc) 文件并解释了它如何使用用于微控制器的 Tensorflow Lite 来运行推断。
+
+### 包含项
+
+要使用库,必须包含以下头文件:
+
+```C++
+#include "tensorflow/lite/micro/kernels/all_ops_resolver.h"
+#include "tensorflow/lite/micro/micro_error_reporter.h"
+#include "tensorflow/lite/micro/micro_interpreter.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+#include "tensorflow/lite/version.h"
+```
+
+- [`all_ops_resolver.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/kernels/all_ops_resolver.h) 提供给解释器(interpreter)用于运行模型的操作。
+- [`micro_error_reporter.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/micro_error_reporter.h) 输出调试信息。
+- [`micro_interpreter.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/micro_interpreter.h) 包含处理和运行模型的代码。
+- [`schema_generated.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/schema/schema_generated.h) 包含 TensorFlow Lite [`FlatBuffer`](https://google.github.io/flatbuffers/) 模型文件格式的模式。
+- [`version.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/version.h) 提供 Tensorflow Lite 架构的版本信息。
+
+示例还包括其他一些文件。以下这些是最重要的:
+
+```C++
+#include "tensorflow/lite/micro/examples/micro_speech/feature_provider.h"
+#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
+#include "tensorflow/lite/micro/examples/micro_speech/micro_features/tiny_conv_micro_features_model_data.h"
+```
+
+- [`feature_provider.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/micro_speech/micro_features/feature_provider.h) 包含从音频流中提取要输入到模型中的特征的代码。
+- [`tiny_conv_micro_features_model_data.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/micro_speech/micro_features/tiny_conv_micro_features_model_data.h) 包含存储为 `char` 数组的模型。阅读 [“构建与转换模型”](https://tensorflow.google.cn/lite/microcontrollers/build_convert) 来了解如何将 Tensorflow Lite 模型转换为该格式。
+- [`micro_model_settings.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h) 定义与模型相关的各种常量。
+
+### 设置日志记录
+
+要设置日志记录,需要使用一个指向 `tflite::MicroErrorReporter` 实例的指针来创建一个 `tflite::ErrorReporter` 指针:
+
+```C++
+tflite::MicroErrorReporter micro_error_reporter;
+tflite::ErrorReporter* error_reporter = µ_error_reporter;
+```
+
+该变量被传递到解释器(interpreter)中,解释器允许它写日志。由于微控制器通常具有多种日志记录机制,`tflite::MicroErrorReporter` 的实现是为您的特定设备所定制的。
+
+### 加载模型
+
+在以下代码中,模型是从一个 `char` 数组中实例化的,`g_tiny_conv_micro_features_model_data` (要了解其是如何构建的,请参见[“构建与转换模型”](https://tensorflow.google.cn/lite/microcontrollers/build_convert))。 随后我们检查模型来确保其架构版本与我们使用的版本所兼容:
+
+```C++
+const tflite::Model* model =
+ ::tflite::GetModel(g_tiny_conv_micro_features_model_data);
+if (model->version() != TFLITE_SCHEMA_VERSION) {
+ error_reporter->Report(
+ "Model provided is schema version %d not equal "
+ "to supported version %d.\n",
+ model->version(), TFLITE_SCHEMA_VERSION);
+ return 1;
+}
+```
+
+### 实例化操作解析器
+
+解释器(interpreter)需要一个 [`AllOpsResolver`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/kernels/all_ops_resolver.h) 实例来访问 Tensorflow 操作。可以扩展此类以向您的项目添加自定义操作:
+
+```C++
+tflite::ops::micro::AllOpsResolver resolver;
+```
+
+### 分配内存
+
+我们需要预先为输入、输出以及中间数组分配一定的内存。该预分配的内存是一个大小为 `tensor_arena_size` 的 `uint8_t` 数组,它被传递给 `tflite::SimpleTensorAllocator` 实例:
+
+```C++
+const int tensor_arena_size = 10 * 1024;
+uint8_t tensor_arena[tensor_arena_size];
+tflite::SimpleTensorAllocator tensor_allocator(tensor_arena,
+ tensor_arena_size);
+```
+
+注意:所需内存大小取决于您使用的模型,可能需要通过实验来确定。
+
+### 实例化解释器(Interpreter)
+
+我们创建一个 `tflite::MicroInterpreter` 实例,传递给之前创建的变量:
+
+```C++
+tflite::MicroInterpreter interpreter(model, resolver, &tensor_allocator,
+ error_reporter);
+```
+
+### 验证输入维度
+
+`MicroInterpreter` 实例可以通过调用 `.input(0)` 为我们提供一个指向模型输入张量的指针,其中 `0` 代表第一个(也是唯一一个)输入张量。我们检查这个张量以确认它的维度与类型是我们所期望的:
+
+```C++
+TfLiteTensor* model_input = interpreter.input(0);
+if ((model_input->dims->size != 4) || (model_input->dims->data[0] != 1) ||
+ (model_input->dims->data[1] != kFeatureSliceCount) ||
+ (model_input->dims->data[2] != kFeatureSliceSize) ||
+ (model_input->type != kTfLiteUInt8)) {
+ error_reporter->Report("Bad input tensor parameters in model");
+ return 1;
+}
+```
+
+在这个代码段中,变量 `kFeatureSliceCount` 和 `kFeatureSliceSize` 与输入的属性相关,它们定义在 [`micro_model_settings.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h) 中。枚举值 `kTfLiteUInt8` 是对 Tensorflow Lite 某一数据类型的引用,它定义在 [`c_api_internal.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/c/c_api_internal.h) 中。
+
+### 生成特征
+
+我们输入到模型中的数据必须由微控制器的音频输入生成。[`feature_provider.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/micro_speech/micro_features/feature_provider.h) 中定义的 `FeatureProvider` 类捕获音频并将其转换为一组将被传入模型的特征集合。当该类被实例化时,我们用之前获取的 `TfLiteTensor` 来传入一个指向输入数组的指针。`FeatureProvider` 使用它来填充将传递给模型的输入数据:
+
+```C++
+ FeatureProvider feature_provider(kFeatureElementCount,
+ model_input->data.uint8);
+```
+
+以下代码使 `FeatureProvider` 从最近一秒的音频生成一组特征并填充进输入张量:
+
+```C++
+TfLiteStatus feature_status = feature_provider.PopulateFeatureData(
+ error_reporter, previous_time, current_time, &how_many_new_slices);
+```
+
+在此例子中,特征生成和推断是在一个循环中发生的,因此设备能够不断地捕捉和处理新的音频。
+
+当在编写自己的程序时,您可能会以其它的方式生成特征,但您总需要在运行模型之前就用数据填充输入张量。
+
+### 运行模型
+
+要运行模型,我们可以在 `tflite::MicroInterpreter` 实例上调用 `Invoke()`:
+
+```C++
+TfLiteStatus invoke_status = interpreter.Invoke();
+if (invoke_status != kTfLiteOk) {
+ error_reporter->Report("Invoke failed");
+ return 1;
+}
+```
+
+我们可以检查返回值 `TfLiteStatus` 以确定运行是否成功。在 [`c_api_internal.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/c/c_api_internal.h) 中定义的 `TfLiteStatus` 的可能值有 `kTfLiteOk` 和 `kTfLiteError`。
+
+### 获取输出
+
+模型的输出张量可以通过在 `tflite::MicroIntepreter` 上调用 `output(0)` 获得,其中 `0` 代表第一个(也是唯一一个)输出张量。
+
+在示例中,输出是一个数组,表示输入属于不同类别(“是”(yes)、“否”(no)、“未知”(unknown)以及“静默”(silence))的概率。由于它们是按照集合顺序排列的,我们可以使用简单的逻辑来确定概率最高的类别:
+
+```C++
+ TfLiteTensor* output = interpreter.output(0);
+ uint8_t top_category_score = 0;
+ int top_category_index;
+ for (int category_index = 0; category_index < kCategoryCount;
+ ++category_index) {
+ const uint8_t category_score = output->data.uint8[category_index];
+ if (category_score > top_category_score) {
+ top_category_score = category_score;
+ top_category_index = category_index;
+ }
+ }
+```
+
+在示例其他部分中,使用了一个更加复杂的算法来平滑多帧的识别结果。该部分在 [recognize_commands.h](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/micro_speech/recognize_commands.h) 中有所定义。在处理任何连续的数据流时,也可以使用相同的技术来提高可靠性。
\ No newline at end of file
diff --git a/TensorflowLiteMicro/docs/user-guide.md b/TensorflowLiteMicro/docs/user-guide.md
new file mode 100644
index 0000000..279966e
--- /dev/null
+++ b/TensorflowLiteMicro/docs/user-guide.md
@@ -0,0 +1,239 @@
+# Tensorflow Lite Micro使用说明
+
+`Tensorflow Lite Micro` 是` TensorFlow Lite `的实验性端口,专门用于在微控制器和其他只有几千字节内存的设备上运行机器学习模型。
+
+它不需要操作系统支持、任何标准 C/C++ 库或动态内存分配。核心运行时在 Arm Cortex M3 上占用 16 KB 的内存,并且具有足够多的运算符来运行语音关键字检测模型,总共占用 22 KB 的内存。
+
+# 建立与转换模型
+
+由于嵌入式设备具有有限的 RAM 和存储空间,因此限制了深度学习模型的规模。此外,本TensorFlow Lite Micro 目前只支持有限的一部分运算,因此并非所有的模型结构都是可行的。
+
+本部分将介绍由 TensorFlow 模型转换为可在嵌入式设备中上运行的过程。本部分也概述了可支持的运算,并对设计与训练一个模型以使其符合内存限制给出了一些指导。
+
+## 模型转换
+
+将一个已训练好的 TensorFlow 模型转换为可以在嵌入式设备中运行的Tensorflow Lite模型可以使用 [TensorFlow Lite 转换器 Python API](ModelConvert.md) 。它能够将模型转换成 [`FlatBuffer`](https://google.github.io/flatbuffers/) 格式,减小模型规模,并修改模型以使用 TensorFlow Lite 支持的运算。
+
+### 量化
+
+为了获得尽可能小的模型规模,你应该考虑使用[训练后量化](https://tensorflow.google.cn/lite/performance/post_training_quantization)。它会降低你模型中数字的精度,从而减小模型规模。不过,这种操作可能会导致模型推理准确性的下降,对于小规模模型来说尤为如此, 所有我们需要在量化前后分析模型的准确性变换以确保这种损失在可接受范围内。
+
+以下这段 Python 代码片段展示了如何使用预训练量化进行模型转换:
+
+```python
+import tensorflow as tf
+converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
+converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
+tflite_quant_model = converter.convert()
+open("converted_model.tflite", "wb").write(tflite_quant_model)
+```
+
+### 转换为一个 C 数组
+
+许多微控制器平台没有本地文件系统的支持。从程序中使用一个模型最简单的方式是将其以一个 C 数组的形式并将其编译进你的程序。
+
+以下的 unix 命令会生成一个以 `char` 数组形式包含 TensorFlow Lite 模型的 C 源文件:
+
+```bash
+xxd -i converted_model.tflite > model_data.cc
+```
+
+其输出类似如下:
+
+```c
+unsigned char converted_model_tflite[] = {
+ 0x18, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c, 0x33, 0x00, 0x00, 0x0e, 0x00,
+ //
+};
+unsigned int converted_model_tflite_len = 18200;
+```
+
+在生成了此文件之后,你可以将它包含到你的程序。在嵌入式平台上,我们需要将该数组声明为 `const` 类型以获得更好的内存效率。
+
+一个如何在你的程序中包含及使用模型的例子,请见微型语音示例中的 [`model.h`](tensorflow/lite/micro/examples/micro_speech/micro_features/model.h) 。
+
+## 模型结构与训练
+
+在设计一个面向微控制器的模型时,考虑模型的规模、工作负载,以及用到的运算是非常重要的。
+
+### 模型规模
+
+一个模型必须在二进制和运行时方面都足够小,以使其可以和你程序的其他部分一起符合你目标设备的内存限制。
+
+为了创建一个更小的模型,你可以在你的结构里使用更少和更小的层。然而,小规模的模型更易面临欠拟合问题。这意味着对于许多问题,尝试并使用符合内存限制的尽可能大规模的模型是有意义的。但是,使用更大规模的模型也会导致处理器工作负载的增加。
+
+注:在一个 Cortex M3 上,面向微控制器的 TensorFlow Lite 的核心运行时占 16 KB。
+
+### 工作负载
+
+工作负载受到模型规模与复杂度的影响。大规模、复杂的模型可能会导致更高的占空比,即导致你所用设备处理器的工作时间增长、空闲时间缩短。视你的应用,这种情况所带来的电力消耗与热量输出的增加可能会成为一个问题。
+
+### 运算支持
+
+面向微控制器的 TensorFlow Lite 目前仅支持有限的部分 TensorFlow 运算,这影响了可以运行的模型结构。我们正致力于在参考实现和针对特定结构的优化方面扩展运算支持。
+
+已支持的运算可以在文件 [`all_ops_resolver.cc`](../tensorflow/lite/micro/all_ops_resolver.cc) 中看到。
+
+## 运行推断
+
+以下部分将介绍软件包自带语音历程中的 [main_functions.cc](../tensorflow/lite/micro/examples/micro_speech/main_functions.cc) 文件并解释了它如何使用用于微控制器的 Tensorflow Lite 来运行推断。
+
+### 包含项
+
+要使用库,必须包含以下头文件:
+
+```C++
+#include "tensorflow/lite/micro/kernels/micro_ops.h"
+#include "tensorflow/lite/micro/micro_error_reporter.h"
+#include "tensorflow/lite/micro/micro_interpreter.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+#include "tensorflow/lite/version.h"
+```
+
+- [`micro_ops.h`](../tensorflow/lite/micro/kernels/micro_ops.h) 提供给解释器(interpreter)用于运行模型的操作。
+- [`micro_error_reporter.h`](../tensorflow/lite/micro/micro_error_reporter.h) 输出调试信息。
+- [`micro_interpreter.h`](../tensorflow/lite/micro/micro_interpreter.h) 包含处理和运行模型的代码。
+- [`schema_generated.h`](../tensorflow/lite/schema/schema_generated.h) 包含 TensorFlow Lite [`FlatBuffer`](https://google.github.io/flatbuffers/) 模型文件格式的模式。
+- [`version.h`](../tensorflow/lite/version.h) 提供 Tensorflow Lite 架构的版本信息。
+
+示例还包括其他一些文件。以下这些是最重要的:
+
+```C++
+#include "tensorflow/lite/micro/examples/micro_speech/feature_provider.h"
+#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
+#include "tensorflow/lite/micro/examples/micro_speech/micro_features/model.h"
+```
+
+- [`feature_provider.h`](../tensorflow/lite/micro/examples/micro_speech/feature_provider.h) 包含从音频流中提取要输入到模型中的特征的代码。
+- [`model.h`](../tensorflow/lite/micro/examples/micro_speech/micro_features/model.h) 包含存储为 `char` 数组的模型。阅读 [“构建与转换模型”](ModelConvert.md)来了解如何将 Tensorflow Lite 模型转换为该格式。
+- [`micro_model_settings.h`](../tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h) 定义与模型相关的各种常量。
+
+### 设置日志记录
+
+要设置日志记录,需要使用一个指向 `tflite::MicroErrorReporter` 实例的指针来创建一个 `tflite::ErrorReporter` 指针:
+
+```C++
+tflite::MicroErrorReporter micro_error_reporter;
+tflite::ErrorReporter* error_reporter = µ_error_reporter;
+```
+
+该变量被传递到解释器(interpreter)中,解释器允许它写日志。由于微控制器通常具有多种日志记录机制,`tflite::MicroErrorReporter` 的实现是为您的特定设备所定制的。
+
+### 加载模型
+
+在以下代码中,模型是从一个 `char` 数组中实例化的,`g_tiny_conv_micro_features_model_data` (要了解其是如何构建的,请参见[“构建与转换模型”](ModelConvert.md))。 随后我们检查模型来确保其架构版本与我们使用的版本所兼容:
+
+```C++
+const tflite::Model* model =
+ ::tflite::GetModel(g_tiny_conv_micro_features_model_data);
+if (model->version() != TFLITE_SCHEMA_VERSION) {
+ error_reporter->Report(
+ "Model provided is schema version %d not equal "
+ "to supported version %d.\n",
+ model->version(), TFLITE_SCHEMA_VERSION);
+ return 1;
+}
+```
+
+### 实例化操作解析器
+
+解释器(interpreter)需要一个 [`micro_ops`](../tensorflow/lite/micro/kernels/micro_ops.h) 实例来访问 Tensorflow 操作。可以扩展此类以向您的项目添加自定义操作:
+
+```C++
+tflite::ops::micro::micro_op_resolver resolver;
+```
+
+### 分配内存
+
+我们需要预先为输入、输出以及中间数组分配一定的内存。该预分配的内存是一个大小为 `tensor_arena_size` 的 `uint8_t` 数组,它被传递给 `tflite::SimpleTensorAllocator` 实例:
+
+```C++
+const int tensor_arena_size = 10 * 1024;
+uint8_t tensor_arena[tensor_arena_size];
+tflite::SimpleTensorAllocator tensor_allocator(tensor_arena,
+ tensor_arena_size);
+```
+
+注意:所需内存大小取决于您使用的模型,可能需要通过实验来确定。
+
+### 实例化解释器(Interpreter)
+
+我们创建一个 `tflite::MicroInterpreter` 实例,传递给之前创建的变量:
+
+```C++
+tflite::MicroInterpreter interpreter(model, resolver, &tensor_allocator,
+ error_reporter);
+```
+
+### 验证输入维度
+
+`MicroInterpreter` 实例可以通过调用 `.input(0)` 为我们提供一个指向模型输入张量的指针,其中 `0` 代表第一个(也是唯一一个)输入张量。我们检查这个张量以确认它的维度与类型是我们所期望的:
+
+```C++
+TfLiteTensor* model_input = interpreter.input(0);
+if ((model_input->dims->size != 4) || (model_input->dims->data[0] != 1) ||
+ (model_input->dims->data[1] != kFeatureSliceCount) ||
+ (model_input->dims->data[2] != kFeatureSliceSize) ||
+ (model_input->type != kTfLiteUInt8)) {
+ error_reporter->Report("Bad input tensor parameters in model");
+ return 1;
+}
+```
+
+在这个代码段中,变量 `kFeatureSliceCount` 和 `kFeatureSliceSize` 与输入的属性相关,它们定义在 [`micro_model_settings.h`](../tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h) 中。枚举值 `kTfLiteUInt8` 是对 Tensorflow Lite 某一数据类型的引用,它定义在 [`common.h`](../tenserflow/lite/c/common.h) 中。
+
+### 生成特征
+
+我们输入到模型中的数据必须由微控制器的音频输入生成。[`feature_provider.h`](../tensorflow/lite/micro/examples/micro_speech/feature_provider.h) 中定义的 `FeatureProvider` 类捕获音频并将其转换为一组将被传入模型的特征集合。当该类被实例化时,我们用之前获取的 `TfLiteTensor` 来传入一个指向输入数组的指针。`FeatureProvider` 使用它来填充将传递给模型的输入数据:
+
+```C++
+ FeatureProvider feature_provider(kFeatureElementCount,
+ model_input->data.uint8);
+```
+
+以下代码使 `FeatureProvider` 从最近一秒的音频生成一组特征并填充进输入张量:
+
+```C++
+TfLiteStatus feature_status = feature_provider.PopulateFeatureData(
+ error_reporter, previous_time, current_time, &how_many_new_slices);
+```
+
+在此例子中,特征生成和推断是在一个循环中发生的,因此设备能够不断地捕捉和处理新的音频。
+
+当在编写自己的程序时,您可能会以其它的方式生成特征,但您总需要在运行模型之前就用数据填充输入张量。
+
+### 运行模型
+
+要运行模型,我们可以在 `tflite::MicroInterpreter` 实例上调用 `Invoke()`:
+
+```C++
+TfLiteStatus invoke_status = interpreter.Invoke();
+if (invoke_status != kTfLiteOk) {
+ error_reporter->Report("Invoke failed");
+ return 1;
+}
+```
+
+我们可以检查返回值 `TfLiteStatus` 以确定运行是否成功。在 [`common.h`](../tenserflow/lite/c/common.h) 中定义的 `TfLiteStatus` 的可能值有 `kTfLiteOk` 和 `kTfLiteError`。
+
+### 获取输出
+
+模型的输出张量可以通过在 `tflite::MicroIntepreter` 上调用 `output(0)` 获得,其中 `0` 代表第一个(也是唯一一个)输出张量。
+
+在示例中,输出是一个数组,表示输入属于不同类别(“是”(yes)、“否”(no)、“未知”(unknown)以及“静默”(silence))的概率。由于它们是按照集合顺序排列的,我们可以使用简单的逻辑来确定概率最高的类别:
+
+```C++
+ TfLiteTensor* output = interpreter.output(0);
+ uint8_t top_category_score = 0;
+ int top_category_index;
+ for (int category_index = 0; category_index < kCategoryCount;
+ ++category_index) {
+ const uint8_t category_score = output->data.uint8[category_index];
+ if (category_score > top_category_score) {
+ top_category_score = category_score;
+ top_category_index = category_index;
+ }
+ }
+```
+
+在示例其他部分中,使用了一个更加复杂的算法来平滑多帧的识别结果。该部分在 [recognize_commands.h](../tensorflow/lite/micro/examples/micro_speech/recognize_commands.h) 中有所定义。在处理任何连续的数据流时,也可以使用相同的技术来提高可靠性。
\ No newline at end of file
diff --git a/TensorflowLiteMicro/docs/version.md b/TensorflowLiteMicro/docs/version.md
new file mode 100644
index 0000000..031d2a9
--- /dev/null
+++ b/TensorflowLiteMicro/docs/version.md
@@ -0,0 +1,6 @@
+# 版本和修订 #
+
+| Date | Version | Author | Note |
+| -------- | :-----: | :---- | :---- |
+| 2020-09-25 | v0.1 | QingChuanWS | 初始版本 |
+| 2020-10-10 | v1.0.0 | QingChuanWS | 第一版稳定版本 |
\ No newline at end of file
diff --git a/TensorflowLiteMicro/examples/audio_main.cc b/TensorflowLiteMicro/examples/audio_main.cc
new file mode 100644
index 0000000..a77719a
--- /dev/null
+++ b/TensorflowLiteMicro/examples/audio_main.cc
@@ -0,0 +1,33 @@
+/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include
+#include
+#include
+#include "tflite/micro/examples/micro_speech/main_functions.h"
+
+// This is the default main used on systems that have the standard C entry
+// point. Other devices (for example FreeRTOS or ESP32) that have different
+// requirements for entry code (like an app_main function) should specialize
+// this main.cc file in a target-specific subfolder.
+int main(int argc, char* argv[]) {
+ setup();
+ rt_kprintf("model load successfully!!\n");
+ while (true) {
+ loop();
+ }
+
+ return 0;
+}
diff --git a/TensorflowLiteMicro/tensorflow/SConscript b/TensorflowLiteMicro/tensorflow/SConscript
new file mode 100644
index 0000000..4c815c4
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/SConscript
@@ -0,0 +1,15 @@
+# RT-Thread building script for bridge
+
+import os
+from building import *
+
+cwd = GetCurrentDir()
+objs = []
+list = os.listdir(cwd)
+
+for d in list:
+ path = os.path.join(cwd, d)
+ if os.path.isfile(os.path.join(path, 'SConscript')):
+ objs = objs + SConscript(os.path.join(d, 'SConscript'))
+
+Return('objs')
diff --git a/TensorflowLiteMicro/tensorflow/core/public/version.h b/TensorflowLiteMicro/tensorflow/core/public/version.h
new file mode 100644
index 0000000..431784a
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/core/public/version.h
@@ -0,0 +1,139 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CORE_PUBLIC_VERSION_H_
+#define TENSORFLOW_CORE_PUBLIC_VERSION_H_
+
+// TensorFlow uses semantic versioning, see http://semver.org/.
+
+// Also update tensorflow/tensorflow.bzl and
+// tensorflow/tools/pip_package/setup.py
+#define TF_MAJOR_VERSION 2
+#define TF_MINOR_VERSION 4
+#define TF_PATCH_VERSION 0
+
+// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
+// "-beta", "-rc", "-rc.1")
+#define TF_VERSION_SUFFIX ""
+
+#define TF_STR_HELPER(x) #x
+#define TF_STR(x) TF_STR_HELPER(x)
+
+// e.g. "0.5.0" or "0.6.0-alpha".
+#define TF_VERSION_STRING \
+ (TF_STR(TF_MAJOR_VERSION) "." TF_STR(TF_MINOR_VERSION) "." TF_STR( \
+ TF_PATCH_VERSION) TF_VERSION_SUFFIX)
+
+// GraphDef compatibility versions (the versions field in graph.proto).
+//
+// Each graph has producer and min_consumer versions, and each
+// consumer has its own version and a min_producer. In addition, graphs can
+// mark specific consumer versions as bad (to prevent bugs from executing).
+// A consumer will execute a graph if the consumer's version is at least the
+// graph's min_consumer, the graph's producer version is at least the consumer's
+// min_producer, and the consumer version isn't specifically disallowed by the
+// graph.
+//
+// By default, newly created graphs have producer version TF_GRAPH_DEF_VERSION
+// min_consumer TF_GRAPH_DEF_MIN_CONSUMER, and no other bad consumer versions.
+//
+// Version history:
+//
+// 0. Graphs created before GraphDef versioning
+// 1. First real version (2dec2015)
+// 2. adjust_contrast only takes float, doesn't perform clamping (11dec2015)
+// 3. Remove TileGrad, since it was equivalent to reduce_sum (30dec2015)
+// 4. When support for this version is removed, we can safely make AttrValue
+// parsing more strict with respect to empty list values (see
+// 111635679, 7jan2016).
+// 5. Graphs are wholly-validated during Session::Create() (7jan2016).
+// 6. TensorFlow is scalar strict within Google (27jan2016).
+// 7. Remove TopK in favor of TopKV2 (5feb2016).
+// 8. Replace RandomCrop from C++ with pure Python (5feb2016).
+// 9. Deprecate batch_norm_with_global_normalization (16feb2016).
+// 10. Deprecate conv3d_backprop_{filter,input} (10jun2016).
+// 11. Deprecate {batch}_self_adjoint_eig (3aug2016).
+// 12. Graph consumers understand the node_def field of FunctionDef (22aug2016).
+// 13. Deprecate multiple batch linear algebra ops (9sep2016).
+// 14. Deprecate batch_matrix_* ops. (10sep2016).
+// 15. Deprecate batch_fft_* ops. (14sep2016).
+// 16. Deprecate tensor_array (v1) ops in favor of v2 (10nov2016).
+// 17. Deprecate inv (11nov2016).
+// 17. Expose reverse_v2 (10nov2016)
+// 18. Add VariableV2 (30nov2016)
+// 19. Deprecated ops created by models moved out of core SkipGram, NegTrain.
+// (08dec2016)
+// 20. Catch all version 1.0 changes to Python API generation. SplitV is now
+// used for tf.split, ReverseV2 is now used by tf.reverse, ConcatV2 is
+// now used by tf.concat. Graphs use flooring
+// division and mod semantics. TensorArrayV3. (12dec2016)
+// Also considered the version for when it is required for reduction
+// ops' indices to be scalar or vector, and not higher rank.
+// Some earlier graph def versions allowed this.
+// 21. Dropped FunctionDef.Node support, switched to node_def introduced
+// in version 12. (11jan2017)
+// 22. Placeholder now can specify and enforce scalar and partial
+// shapes, particularly when restoring a graph from GraphDef
+// produced at version 22 or later. (04/10/2016)
+// 23. Remove NonMaxSuppression in favor of NonMaxSuppressionV2.
+// 24. Deprecate lookup ops (v1) ops in favor of v2 (30may2017)
+// 25. Deprecate stack (v1) ops in favor of v2 (2017/6/15).
+// 25. Deprecate RandomPoisson (v1) ops in favor of v2 (2017/10/25).
+// 26. Add a bool 'stripped_default_attrs' to MetaInfoDef indicating
+// whether default-valued attrs have been stripped from the nodes in the
+// GraphDef. (7dec2017)
+// 27. Deprecate TensorArray ops v2 in favor of v3 and deprecated io_ops
+// deprecated in favor of V2 ops. (2018/01/23)
+// 28. Deprecate MatrixExponential op in favor of Python implementation.
+// (2018/08/21).
+// (2019/02/15). Added `control_ret` field to FunctionDef proto, and
+// `control_output` field to OpDef proto.
+// 29. Deprecate StatefulStandardNormal op in favor of StatefulStandardNormalV2.
+// (2019/03/25).
+// (2019/04/17). Added `arg_attr` field to FunctionDefProto.
+// 30. (2019/05/09) First date based GraphDef version. GraphDef
+// versions advance by 1 each day after this point.
+
+#define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0
+#define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0
+#define TF_GRAPH_DEF_VERSION 485 // Updated: 2020/8/6
+
+// Checkpoint compatibility versions (the versions field in SavedSliceMeta).
+//
+// The checkpoint versions have the same semantics as GraphDef versions, but the
+// numbering scheme is separate. We have no plans to ever deprecate checkpoint
+// versions, but it's good to have this in place in case we ever need to.
+//
+// Version history:
+//
+// 0. Checkpoints saved before checkpoint versioning.
+// 1. First real version (10feb2015).
+#define TF_CHECKPOINT_VERSION_MIN_PRODUCER 0
+#define TF_CHECKPOINT_VERSION_MIN_CONSUMER 0
+#define TF_CHECKPOINT_VERSION 1
+
+/// Version query functions (defined in generated version_info.cc)
+
+// Host compiler version (declared elsewhere to be __VERSION__)
+extern const char* tf_compiler_version();
+// The git commit designator when tensorflow was built
+// If no git repository, this will be "internal".
+extern const char* tf_git_version();
+// Value of the _GLIBCXX_USE_CXX11_ABI flag, or 0 if it's not set.
+extern int tf_cxx11_abi_flag();
+// Returns 1 if build is monolithic, or 0 otherwise.
+extern int tf_monolithic_build();
+
+#endif // TENSORFLOW_CORE_PUBLIC_VERSION_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/SConscript b/TensorflowLiteMicro/tensorflow/lite/SConscript
new file mode 100644
index 0000000..4c815c4
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/SConscript
@@ -0,0 +1,15 @@
+# RT-Thread building script for bridge
+
+import os
+from building import *
+
+cwd = GetCurrentDir()
+objs = []
+list = os.listdir(cwd)
+
+for d in list:
+ path = os.path.join(cwd, d)
+ if os.path.isfile(os.path.join(path, 'SConscript')):
+ objs = objs + SConscript(os.path.join(d, 'SConscript'))
+
+Return('objs')
diff --git a/TensorflowLiteMicro/tensorflow/lite/c/SConscript b/TensorflowLiteMicro/tensorflow/lite/c/SConscript
new file mode 100644
index 0000000..85a4112
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/c/SConscript
@@ -0,0 +1,31 @@
+from building import *
+import os
+
+cwd = GetCurrentDir()
+src = Glob('*.c') + Glob('*.cc')
+
+#.
+root = str(Dir('#'))
+
+packages = os.path.join(root, 'Middlewares')
+file_list = os.listdir(packages)
+for f in file_list:
+ if(f.split('-')[0] == 'TF'):
+ tflm_pkg = os.path.join(packages, f)
+ break
+
+#./third_party/flatbuffer/include
+flatbuffer = os.path.join(tflm_pkg, "third_party/flatbuffers/include")
+#./third_party/gemmlowp
+gemmlowp = os.path.join(tflm_pkg, "third_party/gemmlowp")
+#./third_party/kissfft
+kissfft = os.path.join(tflm_pkg, "third_party/kissfft")
+#./third_party/ruy
+ruy = os.path.join(tflm_pkg, "third_party/ruy")
+
+
+CPPPATH = [tflm_pkg, flatbuffer, gemmlowp, kissfft, ruy]
+
+group = DefineGroup('lite/c', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')
diff --git a/TensorflowLiteMicro/tensorflow/lite/c/builtin_op_data.h b/TensorflowLiteMicro/tensorflow/lite/c/builtin_op_data.h
new file mode 100644
index 0000000..e205f07
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/c/builtin_op_data.h
@@ -0,0 +1,472 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_
+#define TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_
+
+#include
+
+#include "tensorflow/lite/c/common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif // __cplusplus
+
+// TfLiteReshapeParams can't have dynamic data so we fix the maximum possible
+// number of dimensions.
+#define TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT 8
+
+// TODO(aselle): Consider using "if this then that" for testing.
+
+// Useful placeholder to put in otherwise empty structs to avoid size warnings.
+typedef struct {
+ char dummy;
+} EmptyStructPlaceholder;
+
+// IMPORTANT: All new members of structs must be added at the end to ensure
+// backwards compatibility.
+
+// Possible padding types (for convolutions)
+typedef enum {
+ kTfLitePaddingUnknown = 0,
+ kTfLitePaddingSame,
+ kTfLitePaddingValid,
+} TfLitePadding;
+
+typedef enum {
+ kTfLiteMirrorPaddingUnknown = 0,
+ kTfLiteMirrorPaddingReflect,
+ kTfLiteMirrorPaddingSymmetric,
+} TfLiteMirrorPaddingMode;
+
+// TODO(b/130259536): We should move this out of builtin_op_data.
+typedef struct {
+ int width;
+ int height;
+ int width_offset;
+ int height_offset;
+} TfLitePaddingValues;
+
+typedef struct {
+ TfLiteMirrorPaddingMode mode;
+} TfLiteMirrorPaddingParams;
+
+// Possible fused activation functions.
+// TODO(aselle): rename to TfLiteActivation
+typedef enum {
+ kTfLiteActNone = 0,
+ kTfLiteActRelu,
+ kTfLiteActReluN1To1, // min(max(-1, x), 1)
+ kTfLiteActRelu1 = kTfLiteActReluN1To1, // kTfLiteActRelu1 will be deprecated.
+ kTfLiteActRelu6, // min(max(0, x), 6)
+ kTfLiteActTanh,
+ kTfLiteActSignBit,
+ kTfLiteActSigmoid,
+} TfLiteFusedActivation;
+
+typedef struct {
+ // Parameters for CONV_2D version 1.
+ TfLitePadding padding;
+ int stride_width;
+ int stride_height;
+ TfLiteFusedActivation activation;
+
+ // Parameters for CONV_2D version 2.
+ // Note: Version 2 supports dilation values not equal to 1.
+ int dilation_width_factor;
+ int dilation_height_factor;
+} TfLiteConvParams;
+
+typedef struct {
+ TfLitePadding padding;
+ int stride_width;
+ int stride_height;
+ int filter_width;
+ int filter_height;
+ TfLiteFusedActivation activation;
+ struct {
+ TfLitePaddingValues padding;
+ } computed;
+} TfLitePoolParams;
+
+typedef struct {
+ // Parameters for DepthwiseConv version 1 or above.
+ TfLitePadding padding;
+ int stride_width;
+ int stride_height;
+ // `depth_multiplier` is redundant. It's used by CPU kernels in
+ // TensorFlow 2.0 or below, but ignored in versions above.
+ //
+ // The information can be deduced from the shape of input and the shape of
+ // weights. Since the TFLiteConverter toolchain doesn't support partially
+ // specified shapes, relying on `depth_multiplier` stops us from supporting
+ // graphs with dynamic shape tensors.
+ //
+ // Note: Some of the delegates (e.g. NNAPI, GPU) are still relying on this
+ // field.
+ int depth_multiplier;
+ TfLiteFusedActivation activation;
+ // Parameters for DepthwiseConv version 2 or above.
+ int dilation_width_factor;
+ int dilation_height_factor;
+} TfLiteDepthwiseConvParams;
+
+typedef struct {
+ int rank;
+ TfLiteFusedActivation activation;
+
+ // Parameter for SVDF version 4.
+ bool asymmetric_quantize_inputs;
+} TfLiteSVDFParams;
+
+typedef struct {
+ TfLiteFusedActivation activation;
+
+ // Parameter for RNN version 3.
+ bool asymmetric_quantize_inputs;
+} TfLiteRNNParams;
+
+typedef struct {
+ bool time_major;
+ TfLiteFusedActivation activation;
+
+ // Parameter for Sequence RNN version 3.
+ bool asymmetric_quantize_inputs;
+} TfLiteSequenceRNNParams;
+
+typedef struct {
+ bool time_major;
+ TfLiteFusedActivation activation;
+ bool merge_outputs;
+
+ // Parameter for Bidirectional RNN verison 3.
+ bool asymmetric_quantize_inputs;
+} TfLiteBidirectionalSequenceRNNParams;
+
+typedef enum {
+ kTfLiteFullyConnectedWeightsFormatDefault = 0,
+ kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8 = 1,
+} TfLiteFullyConnectedWeightsFormat;
+
+typedef struct {
+ // Parameters for FullyConnected version 1 or above.
+ TfLiteFusedActivation activation;
+
+ // Parameters for FullyConnected version 2 or above.
+ TfLiteFullyConnectedWeightsFormat weights_format;
+
+ // Parameters for FullyConnected version 5 or above.
+ // If set to true, then the number of dimensions in the input and the output
+ // tensors are the same. Furthermore, all but the last dimension of the input
+ // and output shapes will be equal.
+ bool keep_num_dims;
+
+ // Parameters for FullyConnected version 7 or above.
+ // If set to true and the weights are quantized, then non constant inputs
+ // are quantized at evaluation time with asymmetric quantization.
+ bool asymmetric_quantize_inputs;
+} TfLiteFullyConnectedParams;
+
+typedef enum {
+ kTfLiteLshProjectionUnknown = 0,
+ kTfLiteLshProjectionSparse = 1,
+ kTfLiteLshProjectionDense = 2,
+} TfLiteLSHProjectionType;
+
+typedef struct {
+ TfLiteLSHProjectionType type;
+} TfLiteLSHProjectionParams;
+
+typedef struct {
+ float beta;
+} TfLiteSoftmaxParams;
+
+typedef struct {
+ int axis;
+ TfLiteFusedActivation activation;
+} TfLiteConcatenationParams;
+
+typedef struct {
+ TfLiteFusedActivation activation;
+ // Parameter added for the version 4.
+ bool pot_scale_int16;
+} TfLiteAddParams;
+
+typedef struct {
+ EmptyStructPlaceholder placeholder;
+} TfLiteSpaceToBatchNDParams;
+
+typedef struct {
+ EmptyStructPlaceholder placeholder;
+} TfLiteBatchToSpaceNDParams;
+
+typedef struct {
+ bool adj_x;
+ bool adj_y;
+} TfLiteBatchMatMulParams;
+
+typedef struct {
+ TfLiteFusedActivation activation;
+} TfLiteMulParams;
+
+typedef struct {
+ TfLiteFusedActivation activation;
+ // Parameter added for the version 5.
+ bool pot_scale_int16;
+} TfLiteSubParams;
+
+typedef struct {
+ TfLiteFusedActivation activation;
+} TfLiteDivParams;
+
+typedef struct {
+ TfLiteFusedActivation activation;
+} TfLiteL2NormParams;
+
+typedef struct {
+ int radius;
+ float bias;
+ float alpha;
+ float beta;
+} TfLiteLocalResponseNormParams;
+
+typedef enum {
+ kTfLiteLSTMFullKernel = 0,
+ kTfLiteLSTMBasicKernel
+} TfLiteLSTMKernelType;
+
+typedef struct {
+ // Parameters for LSTM version 1.
+ TfLiteFusedActivation activation;
+ float cell_clip;
+ float proj_clip;
+
+ // Parameters for LSTM version 2.
+ // kTfLiteLSTMBasicKernel is only supported in version 2 or above.
+ TfLiteLSTMKernelType kernel_type;
+
+ // Parameters for LSTM version 4.
+ bool asymmetric_quantize_inputs;
+} TfLiteLSTMParams;
+
+typedef struct {
+ // Parameters needed for the underlying LSTM.
+ TfLiteFusedActivation activation;
+ float cell_clip;
+ float proj_clip;
+
+ // If set to true then the first dimension is time, otherwise batch.
+ bool time_major;
+
+ // Parameter for unidirectional sequence RNN version 3.
+ bool asymmetric_quantize_inputs;
+} TfLiteUnidirectionalSequenceLSTMParams;
+
+typedef struct {
+ // Parameters supported by version 1:
+ // Parameters inherited for the LSTM kernel.
+ TfLiteFusedActivation activation;
+ float cell_clip;
+ float proj_clip;
+
+ // If true, store the outputs of both directions in the first output.
+ bool merge_outputs;
+
+ // Parameters supported by version 2:
+ // If set to true then the first dimension is time, otherwise batch.
+ bool time_major;
+
+ // Parameters supported by version 4:
+ // If set to true, then hybrid ops use asymmetric quantization for inputs.
+ bool asymmetric_quantize_inputs;
+} TfLiteBidirectionalSequenceLSTMParams;
+
+typedef struct {
+ bool align_corners;
+ // half_pixel_centers assumes pixels are of half the actual dimensions, and
+ // yields more accurate resizes. Corresponds to the same argument for the
+ // original TensorFlow op in TF2.0.
+ bool half_pixel_centers;
+} TfLiteResizeBilinearParams;
+
+typedef struct {
+ bool align_corners;
+ bool half_pixel_centers;
+} TfLiteResizeNearestNeighborParams;
+
+typedef struct {
+ EmptyStructPlaceholder placeholder;
+} TfLitePadParams;
+
+typedef struct {
+ EmptyStructPlaceholder placeholder;
+} TfLitePadV2Params;
+
+typedef struct {
+ // TODO(ahentz): We can't have dynamic data in this struct, at least not yet.
+ // For now we will fix the maximum possible number of dimensions.
+ int shape[TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT];
+ int num_dimensions;
+} TfLiteReshapeParams;
+
+typedef struct {
+ int ngram_size;
+ int max_skip_size;
+ bool include_all_ngrams;
+} TfLiteSkipGramParams;
+
+typedef struct {
+ int block_size;
+} TfLiteSpaceToDepthParams;
+
+typedef struct {
+ int block_size;
+} TfLiteDepthToSpaceParams;
+
+typedef struct {
+ TfLiteType in_data_type;
+ TfLiteType out_data_type;
+} TfLiteCastParams;
+
+typedef enum {
+ kTfLiteCombinerTypeSum = 0,
+ kTfLiteCombinerTypeMean = 1,
+ kTfLiteCombinerTypeSqrtn = 2,
+} TfLiteCombinerType;
+
+typedef struct {
+ TfLiteCombinerType combiner;
+} TfLiteEmbeddingLookupSparseParams;
+
+typedef struct {
+ int axis;
+} TfLiteGatherParams;
+
+typedef struct {
+ EmptyStructPlaceholder placeholder;
+} TfLiteTransposeParams;
+
+typedef struct {
+ bool keep_dims;
+} TfLiteReducerParams;
+
+typedef struct {
+ int num_splits;
+} TfLiteSplitParams;
+
+typedef struct {
+ int num_splits;
+} TfLiteSplitVParams;
+
+typedef struct {
+ // TODO(ahentz): We can't have dynamic data in this struct, at least not yet.
+ // For now we will fix the maximum possible number of dimensions.
+ int squeeze_dims[8];
+ int num_squeeze_dims;
+} TfLiteSqueezeParams;
+
+typedef struct {
+ int begin_mask;
+ int end_mask;
+ int ellipsis_mask;
+ int new_axis_mask;
+ int shrink_axis_mask;
+} TfLiteStridedSliceParams;
+
+typedef struct {
+ TfLiteType output_type;
+} TfLiteArgMaxParams;
+
+typedef struct {
+ TfLiteType output_type;
+} TfLiteArgMinParams;
+
+typedef struct {
+ TfLitePadding padding;
+ int stride_width;
+ int stride_height;
+} TfLiteTransposeConvParams;
+
+typedef struct {
+ bool validate_indices;
+} TfLiteSparseToDenseParams;
+
+typedef struct {
+ TfLiteType out_type;
+} TfLiteShapeParams;
+
+typedef struct {
+ EmptyStructPlaceholder placeholder;
+} TfLiteRankParams;
+
+typedef struct {
+ // Parameters supported by version 1:
+ float min;
+ float max;
+ int num_bits;
+
+ // Parameters supported by version 2:
+ bool narrow_range;
+} TfLiteFakeQuantParams;
+
+typedef struct {
+ int values_count;
+ int axis;
+} TfLitePackParams;
+
+typedef struct {
+ int axis;
+} TfLiteOneHotParams;
+
+typedef struct {
+ int num;
+ int axis;
+} TfLiteUnpackParams;
+
+typedef struct {
+ float alpha;
+} TfLiteLeakyReluParams;
+
+typedef struct {
+ TfLiteType index_out_type;
+} TfLiteUniqueParams;
+
+typedef struct {
+ int seq_dim;
+ int batch_dim;
+} TfLiteReverseSequenceParams;
+
+typedef struct {
+ EmptyStructPlaceholder placeholder;
+} TfLiteMatrixDiagParams;
+
+typedef struct {
+ EmptyStructPlaceholder placeholder;
+} TfLiteMatrixSetDiagParams;
+
+typedef struct {
+ int then_subgraph_index;
+ int else_subgraph_index;
+} TfLiteIfParams;
+
+typedef struct {
+ int cond_subgraph_index;
+ int body_subgraph_index;
+} TfLiteWhileParams;
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+
+#endif // TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/c/common.c b/TensorflowLiteMicro/tensorflow/lite/c/common.c
new file mode 100644
index 0000000..0264f42
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/c/common.c
@@ -0,0 +1,232 @@
+/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/c/common.h"
+#ifndef TF_LITE_STATIC_MEMORY
+#include
+#include
+#endif // TF_LITE_STATIC_MEMORY
+
+int TfLiteIntArrayGetSizeInBytes(int size) {
+ static TfLiteIntArray dummy;
+ return sizeof(dummy) + sizeof(dummy.data[0]) * size;
+}
+
+int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) {
+ if (a == b) return 1;
+ if (a == NULL || b == NULL) return 0;
+ return TfLiteIntArrayEqualsArray(a, b->size, b->data);
+}
+
+int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
+ const int b_data[]) {
+ if (a == NULL) return (b_size == 0);
+ if (a->size != b_size) return 0;
+ int i = 0;
+ for (; i < a->size; i++)
+ if (a->data[i] != b_data[i]) return 0;
+ return 1;
+}
+
+#ifndef TF_LITE_STATIC_MEMORY
+
+TfLiteIntArray* TfLiteIntArrayCreate(int size) {
+ TfLiteIntArray* ret =
+ (TfLiteIntArray*)malloc(TfLiteIntArrayGetSizeInBytes(size));
+ ret->size = size;
+ return ret;
+}
+
+TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src) {
+ if (!src) return NULL;
+ TfLiteIntArray* ret = TfLiteIntArrayCreate(src->size);
+ if (ret) {
+ memcpy(ret->data, src->data, src->size * sizeof(int));
+ }
+ return ret;
+}
+
+void TfLiteIntArrayFree(TfLiteIntArray* a) { free(a); }
+
+#endif // TF_LITE_STATIC_MEMORY
+
+int TfLiteFloatArrayGetSizeInBytes(int size) {
+ static TfLiteFloatArray dummy;
+ return sizeof(dummy) + sizeof(dummy.data[0]) * size;
+}
+
+#ifndef TF_LITE_STATIC_MEMORY
+
+TfLiteFloatArray* TfLiteFloatArrayCreate(int size) {
+ TfLiteFloatArray* ret =
+ (TfLiteFloatArray*)malloc(TfLiteFloatArrayGetSizeInBytes(size));
+ ret->size = size;
+ return ret;
+}
+
+void TfLiteFloatArrayFree(TfLiteFloatArray* a) { free(a); }
+
+void TfLiteTensorDataFree(TfLiteTensor* t) {
+ if (t->allocation_type == kTfLiteDynamic ||
+ t->allocation_type == kTfLitePersistentRo) {
+ free(t->data.raw);
+ }
+ t->data.raw = NULL;
+}
+
+void TfLiteQuantizationFree(TfLiteQuantization* quantization) {
+ if (quantization->type == kTfLiteAffineQuantization) {
+ TfLiteAffineQuantization* q_params =
+ (TfLiteAffineQuantization*)(quantization->params);
+ if (q_params->scale) {
+ TfLiteFloatArrayFree(q_params->scale);
+ q_params->scale = NULL;
+ }
+ if (q_params->zero_point) {
+ TfLiteIntArrayFree(q_params->zero_point);
+ q_params->zero_point = NULL;
+ }
+ free(q_params);
+ }
+ quantization->params = NULL;
+ quantization->type = kTfLiteNoQuantization;
+}
+
+void TfLiteSparsityFree(TfLiteSparsity* sparsity) {
+ if (sparsity == NULL) {
+ return;
+ }
+
+ if (sparsity->traversal_order) {
+ TfLiteIntArrayFree(sparsity->traversal_order);
+ sparsity->traversal_order = NULL;
+ }
+
+ if (sparsity->block_map) {
+ TfLiteIntArrayFree(sparsity->block_map);
+ sparsity->block_map = NULL;
+ }
+
+ if (sparsity->dim_metadata) {
+ int i = 0;
+ for (; i < sparsity->dim_metadata_size; i++) {
+ TfLiteDimensionMetadata metadata = sparsity->dim_metadata[i];
+ if (metadata.format == kTfLiteDimSparseCSR) {
+ TfLiteIntArrayFree(metadata.array_segments);
+ metadata.array_segments = NULL;
+ TfLiteIntArrayFree(metadata.array_indices);
+ metadata.array_indices = NULL;
+ }
+ }
+ free(sparsity->dim_metadata);
+ sparsity->dim_metadata = NULL;
+ }
+
+ free(sparsity);
+}
+
+void TfLiteTensorFree(TfLiteTensor* t) {
+ TfLiteTensorDataFree(t);
+ if (t->dims) TfLiteIntArrayFree(t->dims);
+ t->dims = NULL;
+
+ if (t->dims_signature) {
+ TfLiteIntArrayFree((TfLiteIntArray *) t->dims_signature);
+ }
+ t->dims_signature = NULL;
+
+ TfLiteQuantizationFree(&t->quantization);
+ TfLiteSparsityFree(t->sparsity);
+ t->sparsity = NULL;
+}
+
+void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
+ TfLiteQuantizationParams quantization, char* buffer,
+ size_t size, TfLiteAllocationType allocation_type,
+ const void* allocation, bool is_variable,
+ TfLiteTensor* tensor) {
+ TfLiteTensorFree(tensor);
+ tensor->type = type;
+ tensor->name = name;
+ tensor->dims = dims;
+ tensor->params = quantization;
+ tensor->data.raw = buffer;
+ tensor->bytes = size;
+ tensor->allocation_type = allocation_type;
+ tensor->allocation = allocation;
+ tensor->is_variable = is_variable;
+
+ tensor->quantization.type = kTfLiteNoQuantization;
+ tensor->quantization.params = NULL;
+}
+
+void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) {
+ if (tensor->allocation_type != kTfLiteDynamic &&
+ tensor->allocation_type != kTfLitePersistentRo) {
+ return;
+ }
+ // TODO(b/145340303): Tensor data should be aligned.
+ if (!tensor->data.raw) {
+ tensor->data.raw = malloc(num_bytes);
+ } else if (num_bytes > tensor->bytes) {
+ tensor->data.raw = realloc(tensor->data.raw, num_bytes);
+ }
+ tensor->bytes = num_bytes;
+}
+#endif // TF_LITE_STATIC_MEMORY
+
+const char* TfLiteTypeGetName(TfLiteType type) {
+ switch (type) {
+ case kTfLiteNoType:
+ return "NOTYPE";
+ case kTfLiteFloat32:
+ return "FLOAT32";
+ case kTfLiteInt16:
+ return "INT16";
+ case kTfLiteInt32:
+ return "INT32";
+ case kTfLiteUInt8:
+ return "UINT8";
+ case kTfLiteInt8:
+ return "INT8";
+ case kTfLiteInt64:
+ return "INT64";
+ case kTfLiteBool:
+ return "BOOL";
+ case kTfLiteComplex64:
+ return "COMPLEX64";
+ case kTfLiteComplex128:
+ return "COMPLEX128";
+ case kTfLiteString:
+ return "STRING";
+ case kTfLiteFloat16:
+ return "FLOAT16";
+ case kTfLiteFloat64:
+ return "FLOAT64";
+ }
+ return "Unknown type";
+}
+
+TfLiteDelegate TfLiteDelegateCreate() {
+ TfLiteDelegate d = {
+ .data_ = NULL,
+ .Prepare = NULL,
+ .CopyFromBufferHandle = NULL,
+ .CopyToBufferHandle = NULL,
+ .FreeBufferHandle = NULL,
+ .flags = kTfLiteDelegateFlagsNone,
+ };
+ return d;
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/c/common.h b/TensorflowLiteMicro/tensorflow/lite/c/common.h
new file mode 100644
index 0000000..7ef173c
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/c/common.h
@@ -0,0 +1,936 @@
+/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// This file defines common C types and APIs for implementing operations,
+// delegates and other constructs in TensorFlow Lite. The actual operations and
+// delegates can be defined using C++, but the interface between the interpreter
+// and the operations are C.
+//
+// Summary of abstractions
+// TF_LITE_ENSURE - Self-sufficient error checking
+// TfLiteStatus - Status reporting
+// TfLiteIntArray - stores tensor shapes (dims),
+// TfLiteContext - allows an op to access the tensors
+// TfLiteTensor - tensor (a multidimensional array)
+// TfLiteNode - a single node or operation
+// TfLiteRegistration - the implementation of a conceptual operation.
+// TfLiteDelegate - allows delegation of nodes to alternative backends.
+//
+// Some abstractions in this file are created and managed by Interpreter.
+//
+// NOTE: The order of values in these structs are "semi-ABI stable". New values
+// should be added only to the end of structs and never reordered.
+
+#ifndef TENSORFLOW_LITE_C_COMMON_H_
+#define TENSORFLOW_LITE_C_COMMON_H_
+
+#include
+#include
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif // __cplusplus
+
+typedef enum TfLiteStatus {
+ kTfLiteOk = 0,
+ kTfLiteError = 1,
+ kTfLiteDelegateError = 2
+} TfLiteStatus;
+
+// The list of external context types known to TF Lite. This list exists solely
+// to avoid conflicts and to ensure ops can share the external contexts they
+// need. Access to the external contexts is controlled by one of the
+// corresponding support files.
+typedef enum TfLiteExternalContextType {
+ kTfLiteEigenContext = 0, // include eigen_support.h to use.
+ kTfLiteGemmLowpContext = 1, // include gemm_support.h to use.
+ kTfLiteEdgeTpuContext = 2, // Placeholder for Edge TPU support.
+ kTfLiteCpuBackendContext = 3, // include cpu_backend_context.h to use.
+ kTfLiteMaxExternalContexts = 4
+} TfLiteExternalContextType;
+
+// Forward declare so dependent structs and methods can reference these types
+// prior to the struct definitions.
+struct TfLiteContext;
+struct TfLiteDelegate;
+struct TfLiteRegistration;
+
+// An external context is a collection of information unrelated to the TF Lite
+// framework, but useful to a subset of the ops. TF Lite knows very little
+// about about the actual contexts, but it keeps a list of them, and is able to
+// refresh them if configurations like the number of recommended threads
+// change.
+typedef struct TfLiteExternalContext {
+ TfLiteExternalContextType type;
+ TfLiteStatus (*Refresh)(struct TfLiteContext* context);
+} TfLiteExternalContext;
+
+#define kTfLiteOptionalTensor (-1)
+
+// Fixed size list of integers. Used for dimensions and inputs/outputs tensor
+// indices
+typedef struct TfLiteIntArray {
+ int size;
+// gcc 6.1+ have a bug where flexible members aren't properly handled
+// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
+#if (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
+ __GNUC_MINOR__ >= 1) || \
+ defined(HEXAGON)
+ int data[0];
+#else
+ int data[];
+#endif
+} TfLiteIntArray;
+
+// Given the size (number of elements) in a TfLiteIntArray, calculate its size
+// in bytes.
+int TfLiteIntArrayGetSizeInBytes(int size);
+
+#ifndef TF_LITE_STATIC_MEMORY
+// Create a array of a given `size` (uninitialized entries).
+// This returns a pointer, that you must free using TfLiteIntArrayFree().
+TfLiteIntArray* TfLiteIntArrayCreate(int size);
+#endif
+
+// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise.
+int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b);
+
+// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise.
+int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
+ const int b_data[]);
+
+#ifndef TF_LITE_STATIC_MEMORY
+// Create a copy of an array passed as `src`.
+// You are expected to free memory with TfLiteIntArrayFree
+TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src);
+
+// Free memory of array `a`.
+void TfLiteIntArrayFree(TfLiteIntArray* a);
+#endif // TF_LITE_STATIC_MEMORY
+
+// Fixed size list of floats. Used for per-channel quantization.
+typedef struct TfLiteFloatArray {
+ int size;
+// gcc 6.1+ have a bug where flexible members aren't properly handled
+// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
+// This also applies to the toolchain used for Qualcomm Hexagon DSPs.
+#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
+ __GNUC_MINOR__ >= 1
+ float data[0];
+#else
+ float data[];
+#endif
+} TfLiteFloatArray;
+
+// Given the size (number of elements) in a TfLiteFloatArray, calculate its size
+// in bytes.
+int TfLiteFloatArrayGetSizeInBytes(int size);
+
+#ifndef TF_LITE_STATIC_MEMORY
+// Create a array of a given `size` (uninitialized entries).
+// This returns a pointer, that you must free using TfLiteFloatArrayFree().
+TfLiteFloatArray* TfLiteFloatArrayCreate(int size);
+
+// Free memory of array `a`.
+void TfLiteFloatArrayFree(TfLiteFloatArray* a);
+#endif // TF_LITE_STATIC_MEMORY
+
+// Since we must not depend on any libraries, define a minimal subset of
+// error macros while avoiding names that have pre-conceived meanings like
+// assert and check.
+
+// Try to make all reporting calls through TF_LITE_KERNEL_LOG rather than
+// calling the context->ReportError function directly, so that message strings
+// can be stripped out if the binary size needs to be severely optimized.
+#ifndef TF_LITE_STRIP_ERROR_STRINGS
+#define TF_LITE_KERNEL_LOG(context, ...) \
+ do { \
+ (context)->ReportError((context), __VA_ARGS__); \
+ } while (false)
+
+#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) \
+ do { \
+ if ((context) != nullptr) { \
+ (context)->ReportError((context), __VA_ARGS__); \
+ } \
+ } while (false)
+#else // TF_LITE_STRIP_ERROR_STRINGS
+#define TF_LITE_KERNEL_LOG(context, ...)
+#define TF_LITE_MAYBE_KERNEL_LOG(context, ...)
+#endif // TF_LITE_STRIP_ERROR_STRINGS
+
+// Check whether value is true, and if not return kTfLiteError from
+// the current function (and report the error string msg).
+#define TF_LITE_ENSURE_MSG(context, value, msg) \
+ do { \
+ if (!(value)) { \
+ TF_LITE_KERNEL_LOG((context), __FILE__ " " msg); \
+ return kTfLiteError; \
+ } \
+ } while (0)
+
+// Check whether the value `a` is true, and if not return kTfLiteError from
+// the current function, while also reporting the location of the error.
+#define TF_LITE_ENSURE(context, a) \
+ do { \
+ if (!(a)) { \
+ TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, \
+ __LINE__, #a); \
+ return kTfLiteError; \
+ } \
+ } while (0)
+
+#define TF_LITE_ENSURE_STATUS(a) \
+ do { \
+ const TfLiteStatus s = (a); \
+ if (s != kTfLiteOk) { \
+ return s; \
+ } \
+ } while (0)
+
+// Check whether the value `a == b` is true, and if not return kTfLiteError from
+// the current function, while also reporting the location of the error.
+// `a` and `b` may be evaluated more than once, so no side effects or
+// extremely expensive computations should be done.
+// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes.
+#define TF_LITE_ENSURE_EQ(context, a, b) \
+ do { \
+ if ((a) != (b)) { \
+ TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%d != %d)", __FILE__, \
+ __LINE__, #a, #b, (a), (b)); \
+ return kTfLiteError; \
+ } \
+ } while (0)
+
+#define TF_LITE_ENSURE_TYPES_EQ(context, a, b) \
+ do { \
+ if ((a) != (b)) { \
+ TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%s != %s)", __FILE__, \
+ __LINE__, #a, #b, TfLiteTypeGetName(a), \
+ TfLiteTypeGetName(b)); \
+ return kTfLiteError; \
+ } \
+ } while (0)
+
+#define TF_LITE_ENSURE_OK(context, status) \
+ do { \
+ const TfLiteStatus s = (status); \
+ if ((s) != kTfLiteOk) { \
+ return s; \
+ } \
+ } while (0)
+
+// Define TFL_CAPI_EXPORT macro to export a function properly with a shared
+// library.
+#ifdef SWIG
+#define TFL_CAPI_EXPORT
+#else
+#if defined(_WIN32)
+#ifdef TFL_COMPILE_LIBRARY
+#define TFL_CAPI_EXPORT __declspec(dllexport)
+#else
+#define TFL_CAPI_EXPORT __declspec(dllimport)
+#endif // TFL_COMPILE_LIBRARY
+#else
+#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
+#endif // _WIN32
+#endif // SWIG
+
+// Single-precision complex data type compatible with the C99 definition.
+typedef struct TfLiteComplex64 {
+ float re, im; // real and imaginary parts, respectively.
+} TfLiteComplex64;
+
+// Double-precision complex data type compatible with the C99 definition.
+typedef struct TfLiteComplex128 {
+ double re, im; // real and imaginary parts, respectively.
+} TfLiteComplex128;
+
+// Half precision data type compatible with the C99 definition.
+typedef struct TfLiteFloat16 {
+ uint16_t data;
+} TfLiteFloat16;
+
+// Types supported by tensor
+typedef enum {
+ kTfLiteNoType = 0,
+ kTfLiteFloat32 = 1,
+ kTfLiteInt32 = 2,
+ kTfLiteUInt8 = 3,
+ kTfLiteInt64 = 4,
+ kTfLiteString = 5,
+ kTfLiteBool = 6,
+ kTfLiteInt16 = 7,
+ kTfLiteComplex64 = 8,
+ kTfLiteInt8 = 9,
+ kTfLiteFloat16 = 10,
+ kTfLiteFloat64 = 11,
+ kTfLiteComplex128 = 12,
+} TfLiteType;
+
+// Return the name of a given type, for error reporting purposes.
+const char* TfLiteTypeGetName(TfLiteType type);
+
+// SupportedQuantizationTypes.
+typedef enum TfLiteQuantizationType {
+ // No quantization.
+ kTfLiteNoQuantization = 0,
+ // Affine quantization (with support for per-channel quantization).
+ // Corresponds to TfLiteAffineQuantization.
+ kTfLiteAffineQuantization = 1,
+} TfLiteQuantizationType;
+
+// Structure specifying the quantization used by the tensor, if-any.
+typedef struct TfLiteQuantization {
+ // The type of quantization held by params.
+ TfLiteQuantizationType type;
+ // Holds a reference to one of the quantization param structures specified
+ // below.
+ void* params;
+} TfLiteQuantization;
+
+// Legacy. Will be deprecated in favor of TfLiteAffineQuantization.
+// If per-layer quantization is specified this field will still be populated in
+// addition to TfLiteAffineQuantization.
+// Parameters for asymmetric quantization. Quantized values can be converted
+// back to float using:
+// real_value = scale * (quantized_value - zero_point)
+typedef struct TfLiteQuantizationParams {
+ float scale;
+ int32_t zero_point;
+} TfLiteQuantizationParams;
+
+// Parameters for asymmetric quantization across a dimension (i.e per output
+// channel quantization).
+// quantized_dimension specifies which dimension the scales and zero_points
+// correspond to.
+// For a particular value in quantized_dimension, quantized values can be
+// converted back to float using:
+// real_value = scale * (quantized_value - zero_point)
+typedef struct TfLiteAffineQuantization {
+ TfLiteFloatArray* scale;
+ TfLiteIntArray* zero_point;
+ int32_t quantized_dimension;
+} TfLiteAffineQuantization;
+
+/* A union of pointers that points to memory for a given tensor. */
+typedef union TfLitePtrUnion {
+ /* Do not access these members directly, if possible, use
+ * GetTensorData(tensor) instead, otherwise only access .data, as other
+ * members are deprecated. */
+ int32_t* i32;
+ int64_t* i64;
+ float* f;
+ TfLiteFloat16* f16;
+ double* f64;
+ char* raw;
+ const char* raw_const;
+ uint8_t* uint8;
+ bool* b;
+ int16_t* i16;
+ TfLiteComplex64* c64;
+ TfLiteComplex128* c128;
+ int8_t* int8;
+ /* Only use this member. */
+ void* data;
+} TfLitePtrUnion;
+
+// Memory allocation strategies.
+// * kTfLiteMmapRo: Read-only memory-mapped data, or data externally allocated.
+// * kTfLiteArenaRw: Arena allocated with no guarantees about persistence,
+// and available during eval.
+// * kTfLiteArenaRwPersistent: Arena allocated but persistent across eval, and
+// only available during eval.
+// * kTfLiteDynamic: Allocated during eval, or for string tensors.
+// * kTfLitePersistentRo: Allocated and populated during prepare. This is
+// useful for tensors that can be computed during prepare and treated
+// as constant inputs for downstream ops (also in prepare).
+typedef enum TfLiteAllocationType {
+ kTfLiteMemNone = 0,
+ kTfLiteMmapRo,
+ kTfLiteArenaRw,
+ kTfLiteArenaRwPersistent,
+ kTfLiteDynamic,
+ kTfLitePersistentRo,
+} TfLiteAllocationType;
+
+// The delegates should use zero or positive integers to represent handles.
+// -1 is reserved from unallocated status.
+typedef int TfLiteBufferHandle;
+enum {
+ kTfLiteNullBufferHandle = -1,
+};
+
+// Storage format of each dimension in a sparse tensor.
+typedef enum TfLiteDimensionType {
+ kTfLiteDimDense = 0,
+ kTfLiteDimSparseCSR,
+} TfLiteDimensionType;
+
+// Metadata to encode each dimension in a sparse tensor.
+typedef struct TfLiteDimensionMetadata {
+ TfLiteDimensionType format;
+ int dense_size;
+ TfLiteIntArray* array_segments;
+ TfLiteIntArray* array_indices;
+} TfLiteDimensionMetadata;
+
+// Parameters used to encode a sparse tensor. For detailed explanation of each
+// field please refer to lite/schema/schema.fbs.
+typedef struct TfLiteSparsity {
+ TfLiteIntArray* traversal_order;
+ TfLiteIntArray* block_map;
+ TfLiteDimensionMetadata* dim_metadata;
+ int dim_metadata_size;
+} TfLiteSparsity;
+
+// An tensor in the interpreter system which is a wrapper around a buffer of
+// data including a dimensionality (or NULL if not currently defined).
+#ifndef TF_LITE_STATIC_MEMORY
+typedef struct TfLiteTensor {
+ // The data type specification for data stored in `data`. This affects
+ // what member of `data` union should be used.
+ TfLiteType type;
+ // A union of data pointers. The appropriate type should be used for a typed
+ // tensor based on `type`.
+ TfLitePtrUnion data;
+ // A pointer to a structure representing the dimensionality interpretation
+ // that the buffer should have. NOTE: the product of elements of `dims`
+ // and the element datatype size should be equal to `bytes` below.
+ TfLiteIntArray* dims;
+ // Quantization information.
+ TfLiteQuantizationParams params;
+ // How memory is mapped
+ // kTfLiteMmapRo: Memory mapped read only.
+ // i.e. weights
+ // kTfLiteArenaRw: Arena allocated read write memory
+ // (i.e. temporaries, outputs).
+ TfLiteAllocationType allocation_type;
+ // The number of bytes required to store the data of this Tensor. I.e.
+ // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if
+ // type is kTfLiteFloat32 and dims = {3, 2} then
+ // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
+ size_t bytes;
+
+ // An opaque pointer to a tflite::MMapAllocation
+ const void* allocation;
+
+ // Null-terminated name of this tensor.
+ const char* name;
+
+ // The delegate which knows how to handle `buffer_handle`.
+ // WARNING: This is an experimental interface that is subject to change.
+ struct TfLiteDelegate* delegate;
+
+ // An integer buffer handle that can be handled by `delegate`.
+ // The value is valid only when delegate is not null.
+ // WARNING: This is an experimental interface that is subject to change.
+ TfLiteBufferHandle buffer_handle;
+
+ // If the delegate uses its own buffer (e.g. GPU memory), the delegate is
+ // responsible to set data_is_stale to true.
+ // `delegate->CopyFromBufferHandle` can be called to copy the data from
+ // delegate buffer.
+ // WARNING: This is an // experimental interface that is subject to change.
+ bool data_is_stale;
+
+ // True if the tensor is a variable.
+ bool is_variable;
+
+ // Quantization information. Replaces params field above.
+ TfLiteQuantization quantization;
+
+ // Parameters used to encode a sparse tensor.
+ // This is optional. The field is NULL if a tensor is dense.
+ // WARNING: This is an experimental interface that is subject to change.
+ TfLiteSparsity* sparsity;
+
+ // Optional. Encodes shapes with unknown dimensions with -1. This field is
+ // only populated when unknown dimensions exist in a read-write tensor (i.e.
+ // an input or output tensor). (e.g. `dims` contains [1, 1, 1, 3] and
+ // `dims_signature` contains [1, -1, -1, 3]).
+ const TfLiteIntArray* dims_signature;
+} TfLiteTensor;
+
+// A structure representing an instance of a node.
+// This structure only exhibits the inputs, outputs and user defined data, not
+// other features like the type.
+typedef struct TfLiteNode {
+ // Inputs to this node expressed as indices into the simulator's tensors.
+ TfLiteIntArray* inputs;
+
+ // Outputs to this node expressed as indices into the simulator's tensors.
+ TfLiteIntArray* outputs;
+
+ // intermediate tensors to this node expressed as indices into the simulator's
+ // tensors.
+ TfLiteIntArray* intermediates;
+
+ // Temporary tensors uses during the computations. This usually contains no
+ // tensors, but ops are allowed to change that if they need scratch space of
+ // any sort.
+ TfLiteIntArray* temporaries;
+
+ // Opaque data provided by the node implementer through `Registration.init`.
+ void* user_data;
+
+ // Opaque data provided to the node if the node is a builtin. This is usually
+ // a structure defined in builtin_op_data.h
+ void* builtin_data;
+
+ // Custom initial data. This is the opaque data provided in the flatbuffer.
+ // WARNING: This is an experimental interface that is subject to change.
+ const void* custom_initial_data;
+ int custom_initial_data_size;
+
+ // The pointer to the delegate. This is non-null only when the node is
+ // created by calling `interpreter.ModifyGraphWithDelegate`.
+ // WARNING: This is an experimental interface that is subject to change.
+ struct TfLiteDelegate* delegate;
+} TfLiteNode;
+#else // defined(TF_LITE_STATIC_MEMORY)?
+// NOTE: This flag is opt-in only at compile time.
+//
+// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct
+// contains only the minimum fields required to initialize and prepare a micro
+// inference graph. The fields in this struct have been ordered from
+// largest-to-smallest for optimal struct sizeof.
+//
+// This struct does not use:
+// - allocation
+// - buffer_handle
+// - data_is_stale
+// - delegate
+// - dims_signature
+// - name
+// - sparsity
+typedef struct TfLiteTensor {
+ // TODO(b/155784997): Consider consolidating these quantization fields:
+ // Quantization information. Replaces params field above.
+ TfLiteQuantization quantization;
+
+ // Quantization information.
+ TfLiteQuantizationParams params;
+
+ // A union of data pointers. The appropriate type should be used for a typed
+ // tensor based on `type`.
+ TfLitePtrUnion data;
+
+ // A pointer to a structure representing the dimensionality interpretation
+ // that the buffer should have. NOTE: the product of elements of `dims`
+ // and the element datatype size should be equal to `bytes` below.
+ TfLiteIntArray* dims;
+
+ // The number of bytes required to store the data of this Tensor. I.e.
+ // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if
+ // type is kTfLiteFloat32 and dims = {3, 2} then
+ // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
+ size_t bytes;
+
+ // The data type specification for data stored in `data`. This affects
+ // what member of `data` union should be used.
+ TfLiteType type;
+
+ // How memory is mapped
+ // kTfLiteMmapRo: Memory mapped read only.
+ // i.e. weights
+ // kTfLiteArenaRw: Arena allocated read write memory
+ // (i.e. temporaries, outputs).
+ TfLiteAllocationType allocation_type;
+
+ // True if the tensor is a variable.
+ bool is_variable;
+} TfLiteTensor;
+
+// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains
+// only the minimum fields required to represent a node.
+//
+// This struct does not use:
+// - delegate
+// - intermediates
+// - temporaries
+typedef struct TfLiteNode {
+ // Inputs to this node expressed as indices into the simulator's tensors.
+ TfLiteIntArray* inputs;
+
+ // Outputs to this node expressed as indices into the simulator's tensors.
+ TfLiteIntArray* outputs;
+
+ // Opaque data provided by the node implementer through `Registration.init`.
+ void* user_data;
+
+ // Opaque data provided to the node if the node is a builtin. This is usually
+ // a structure defined in builtin_op_data.h
+ void* builtin_data;
+
+ // Custom initial data. This is the opaque data provided in the flatbuffer.
+ // WARNING: This is an experimental interface that is subject to change.
+ const void* custom_initial_data;
+ int custom_initial_data_size;
+} TfLiteNode;
+#endif // TF_LITE_STATIC_MEMORY
+
+// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount
+// of information required for a kernel to run during TfLiteRegistration::Eval.
+// TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM
+// builds with this flag by default internally.
+typedef struct TfLiteEvalTensor {
+ // A union of data pointers. The appropriate type should be used for a typed
+ // tensor based on `type`.
+ TfLitePtrUnion data;
+
+ // A pointer to a structure representing the dimensionality interpretation
+ // that the buffer should have.
+ TfLiteIntArray* dims;
+
+ // The data type specification for data stored in `data`. This affects
+ // what member of `data` union should be used.
+ TfLiteType type;
+} TfLiteEvalTensor;
+
+#ifndef TF_LITE_STATIC_MEMORY
+// Free data memory of tensor `t`.
+void TfLiteTensorDataFree(TfLiteTensor* t);
+
+// Free quantization data.
+void TfLiteQuantizationFree(TfLiteQuantization* quantization);
+
+// Free sparsity parameters.
+void TfLiteSparsityFree(TfLiteSparsity* sparsity);
+
+// Free memory of tensor `t`.
+void TfLiteTensorFree(TfLiteTensor* t);
+
+// Set all of a tensor's fields (and free any previously allocated data).
+void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
+ TfLiteQuantizationParams quantization, char* buffer,
+ size_t size, TfLiteAllocationType allocation_type,
+ const void* allocation, bool is_variable,
+ TfLiteTensor* tensor);
+
+// Resize the allocated data of a (dynamic) tensor. Tensors with allocation
+// types other than kTfLiteDynamic will be ignored.
+void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
+#endif // TF_LITE_STATIC_MEMORY
+
+// WARNING: This is an experimental interface that is subject to change.
+//
+// Currently, TfLiteDelegateParams has to be allocated in a way that it's
+// trivially destructable. It will be stored as `builtin_data` field in
+// `TfLiteNode` of the delegate node.
+//
+// See also the `CreateDelegateParams` function in `interpreter.cc` details.
+typedef struct TfLiteDelegateParams {
+ struct TfLiteDelegate* delegate;
+ TfLiteIntArray* nodes_to_replace;
+ TfLiteIntArray* input_tensors;
+ TfLiteIntArray* output_tensors;
+} TfLiteDelegateParams;
+
+typedef struct TfLiteContext {
+ // Number of tensors in the context.
+ size_t tensors_size;
+
+ // The execution plan contains a list of the node indices in execution
+ // order. execution_plan->size is the current number of nodes. And,
+ // execution_plan->data[0] is the first node that needs to be run.
+ // TfLiteDelegates can traverse the current execution plan by iterating
+ // through each member of this array and using GetNodeAndRegistration() to
+ // access details about a node. i.e.
+ // TfLiteIntArray* execution_plan;
+ // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
+ // for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) {
+ // int node_index = execution_plan->data[exec_index];
+ // TfLiteNode* node;
+ // TfLiteRegistration* reg;
+ // context->GetNodeAndRegistration(context, node_index, &node, ®);
+ // }
+ // WARNING: This is an experimental interface that is subject to change.
+ TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context,
+ TfLiteIntArray** execution_plan);
+
+ // An array of tensors in the interpreter context (of length `tensors_size`)
+ TfLiteTensor* tensors;
+
+ // opaque full context ptr (an opaque c++ data structure)
+ void* impl_;
+
+ // Request memory pointer be resized. Updates dimensions on the tensor.
+ // NOTE: ResizeTensor takes ownership of newSize.
+ TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor,
+ TfLiteIntArray* new_size);
+ // Request that an error be reported with format string msg.
+ void (*ReportError)(struct TfLiteContext*, const char* msg, ...);
+
+ // Add `tensors_to_add` tensors, preserving pre-existing Tensor entries. If
+ // non-null, the value pointed to by `first_new_tensor_index` will be set to
+ // the index of the first new tensor.
+ TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add,
+ int* first_new_tensor_index);
+
+ // Get a Tensor node by node_index.
+ // WARNING: This is an experimental interface that is subject to change.
+ TfLiteStatus (*GetNodeAndRegistration)(
+ struct TfLiteContext*, int node_index, TfLiteNode** node,
+ struct TfLiteRegistration** registration);
+
+ // Replace ops with one or more stub delegate operations. This function
+ // does not take ownership of `nodes_to_replace`.
+ TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)(
+ struct TfLiteContext*, struct TfLiteRegistration registration,
+ const TfLiteIntArray* nodes_to_replace, struct TfLiteDelegate* delegate);
+
+ // Number of threads that are recommended to subsystems like gemmlowp and
+ // eigen.
+ int recommended_num_threads;
+
+ // Access external contexts by type.
+ // WARNING: This is an experimental interface that is subject to change.
+ TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*,
+ TfLiteExternalContextType);
+ // Set the value of a external context. Does not take ownership of the
+ // pointer.
+ // WARNING: This is an experimental interface that is subject to change.
+ void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType,
+ TfLiteExternalContext*);
+
+ // Flag for allowing float16 precision for FP32 calculation.
+ // default: false.
+ // WARNING: This is an experimental API and subject to change.
+ bool allow_fp32_relax_to_fp16;
+
+ // Pointer to the op-level profiler, if set; nullptr otherwise.
+ void* profiler;
+
+ // Allocate persistent buffer which has the same life time as the interpreter.
+ // Returns nullptr on failure.
+ // The memory is allocated from heap for TFL, and from tail in TFLM.
+ // This method is only available in Init or Prepare stage.
+ // WARNING: This is an experimental interface that is subject to change.
+ void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes);
+
+ // Allocate a buffer which will be deallocated right after invoke phase.
+ // The memory is allocated from heap in TFL, and from volatile arena in TFLM.
+ // This method is only available in invoke stage.
+ // NOTE: If possible use RequestScratchBufferInArena method to avoid memory
+ // allocation during inference time.
+ // WARNING: This is an experimental interface that is subject to change.
+ TfLiteStatus (*AllocateBufferForEval)(struct TfLiteContext* ctx, size_t bytes,
+ void** ptr);
+
+ // Request a scratch buffer in the arena through static memory planning.
+ // This method is only available in Prepare stage and the buffer is allocated
+ // by the interpreter between Prepare and Eval stage. In Eval stage,
+ // GetScratchBuffer API can be used to fetch the address.
+ // WARNING: This is an experimental interface that is subject to change.
+ TfLiteStatus (*RequestScratchBufferInArena)(struct TfLiteContext* ctx,
+ size_t bytes, int* buffer_idx);
+
+ // Get the scratch buffer pointer.
+ // This method is only available in Eval stage.
+ // WARNING: This is an experimental interface that is subject to change.
+ void* (*GetScratchBuffer)(struct TfLiteContext* ctx, int buffer_idx);
+
+ // Resize the memory pointer of the `tensor`. This method behaves the same as
+ // `ResizeTensor`, except that it makes a copy of the shape array internally
+ // so the shape array could be deallocated right afterwards.
+ // WARNING: This is an experimental interface that is subject to change.
+ TfLiteStatus (*ResizeTensorExplicit)(struct TfLiteContext* ctx,
+ TfLiteTensor* tensor, int dims,
+ const int* shape);
+
+ // This method provides a preview of post-delegation partitioning. Each
+ // TfLiteDelegateParams in the referenced array corresponds to one instance of
+ // the delegate kernel.
+ // Example usage:
+ //
+ // TfLiteIntArray* nodes_to_replace = ...;
+ // TfLiteDelegateParams* params_array;
+ // int num_partitions = 0;
+ // TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning(
+ // context, delegate, nodes_to_replace, ¶ms_array, &num_partitions));
+ // for (int idx = 0; idx < num_partitions; idx++) {
+ // const auto& partition_params = params_array[idx];
+ // ...
+ // }
+ //
+ // NOTE: The context owns the memory referenced by partition_params_array. It
+ // will be cleared with another call to PreviewDelegateParitioning, or after
+ // TfLiteDelegateParams::Prepare returns.
+ //
+ // WARNING: This is an experimental interface that is subject to change.
+ TfLiteStatus (*PreviewDelegatePartitioning)(
+ struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
+ TfLiteDelegateParams** partition_params_array, int* num_partitions);
+
+ // Returns a TfLiteTensor struct for a given index.
+ // WARNING: This is an experimental interface that is subject to change.
+ // WARNING: This method may not be available on all platforms.
+ TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context,
+ int tensor_idx);
+
+ // Returns a TfLiteEvalTensor struct for a given index.
+ // WARNING: This is an experimental interface that is subject to change.
+ // WARNING: This method may not be available on all platforms.
+ TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context,
+ int tensor_idx);
+} TfLiteContext;
+
+typedef struct TfLiteRegistration {
+ // Initializes the op from serialized data.
+ // If a built-in op:
+ // `buffer` is the op's params data (TfLiteLSTMParams*).
+ // `length` is zero.
+ // If custom op:
+ // `buffer` is the op's `custom_options`.
+ // `length` is the size of the buffer.
+ //
+ // Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer
+ // or an instance of a struct).
+ //
+ // The returned pointer will be stored with the node in the `user_data` field,
+ // accessible within prepare and invoke functions below.
+ // NOTE: if the data is already in the desired format, simply implement this
+ // function to return `nullptr` and implement the free function to be a no-op.
+ void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
+
+ // The pointer `buffer` is the data previously returned by an init invocation.
+ void (*free)(TfLiteContext* context, void* buffer);
+
+ // prepare is called when the inputs this node depends on have been resized.
+ // context->ResizeTensor() can be called to request output tensors to be
+ // resized.
+ //
+ // Returns kTfLiteOk on success.
+ TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node);
+
+ // Execute the node (should read node->inputs and output to node->outputs).
+ // Returns kTfLiteOk on success.
+ TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node);
+
+ // profiling_string is called during summarization of profiling information
+ // in order to group executions together. Providing a value here will cause a
+ // given op to appear multiple times is the profiling report. This is
+ // particularly useful for custom ops that can perform significantly
+ // different calculations depending on their `user-data`.
+ const char* (*profiling_string)(const TfLiteContext* context,
+ const TfLiteNode* node);
+
+ // Builtin codes. If this kernel refers to a builtin this is the code
+ // of the builtin. This is so we can do marshaling to other frameworks like
+ // NN API.
+ // Note: It is the responsibility of the registration binder to set this
+ // properly.
+ int32_t builtin_code;
+
+ // Custom op name. If the op is a builtin, this will be null.
+ // Note: It is the responsibility of the registration binder to set this
+ // properly.
+ // WARNING: This is an experimental interface that is subject to change.
+ const char* custom_name;
+
+ // The version of the op.
+ // Note: It is the responsibility of the registration binder to set this
+ // properly.
+ int version;
+} TfLiteRegistration;
+
+// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the
+// values should be 1, 2, 4, 8, ...etc.
+typedef enum TfLiteDelegateFlags {
+ kTfLiteDelegateFlagsNone = 0,
+ // The flag is set if the delegate can handle dynamic sized tensors.
+ // For example, the output shape of a `Resize` op with non-constant shape
+ // can only be inferred when the op is invoked.
+ // In this case, the Delegate is responsible for calling
+ // `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling
+ // `ResizeTensor` when invoking the op.
+ //
+ // If the delegate isn't capable to handle dynamic tensors, this flag need
+ // to be set to false.
+ kTfLiteDelegateFlagsAllowDynamicTensors = 1,
+
+ // This flag can be used by delegates (that allow dynamic tensors) to ensure
+ // applicable tensor shapes are automatically propagated in the case of tensor
+ // resizing.
+ // This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors
+ // of a delegate kernel will have correct shapes before its Prepare() method
+ // is called. The runtime leverages TFLite builtin ops in the original
+ // execution plan to propagate shapes.
+ //
+ // A few points to note:
+ // 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is
+ // false, this one is redundant since the delegate kernels are re-initialized
+ // every time tensors are resized.
+ // 2. Enabling this flag adds some overhead to AllocateTensors(), since extra
+ // work is required to prepare the original execution plan.
+ // 3. This flag requires that the original execution plan only have ops with
+ // valid registrations (and not 'dummy' custom ops like with Flex).
+ // WARNING: This feature is experimental and subject to change.
+ kTfLiteDelegateFlagsRequirePropagatedShapes = 2
+} TfLiteDelegateFlags;
+
+// WARNING: This is an experimental interface that is subject to change.
+typedef struct TfLiteDelegate {
+ // Data that delegate needs to identify itself. This data is owned by the
+ // delegate. The delegate is owned in the user code, so the delegate is
+ // responsible for doing this when it is destroyed.
+ void* data_;
+
+ // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the
+ // delegate a view of the current graph through TfLiteContext*. It typically
+ // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels()
+ // to ask the TensorFlow lite runtime to create macro-nodes to represent
+ // delegated subgraphs of the original graph.
+ TfLiteStatus (*Prepare)(TfLiteContext* context,
+ struct TfLiteDelegate* delegate);
+
+ // Copy the data from delegate buffer handle into raw memory of the given
+ // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as
+ // long as it follows the rules for kTfLiteDynamic tensors, in which case this
+ // cannot be null.
+ TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context,
+ struct TfLiteDelegate* delegate,
+ TfLiteBufferHandle buffer_handle,
+ TfLiteTensor* tensor);
+
+ // Copy the data from raw memory of the given 'tensor' to delegate buffer
+ // handle. This can be null if the delegate doesn't use its own buffer.
+ TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context,
+ struct TfLiteDelegate* delegate,
+ TfLiteBufferHandle buffer_handle,
+ TfLiteTensor* tensor);
+
+ // Free the Delegate Buffer Handle. Note: This only frees the handle, but
+ // this doesn't release the underlying resource (e.g. textures). The
+ // resources are either owned by application layer or the delegate.
+ // This can be null if the delegate doesn't use its own buffer.
+ void (*FreeBufferHandle)(TfLiteContext* context,
+ struct TfLiteDelegate* delegate,
+ TfLiteBufferHandle* handle);
+
+ // Bitmask flags. See the comments in `TfLiteDelegateFlags`.
+ int64_t flags;
+} TfLiteDelegate;
+
+// Build a 'null' delegate, with all the fields properly set to their default
+// values.
+TfLiteDelegate TfLiteDelegateCreate();
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+#endif // TENSORFLOW_LITE_C_COMMON_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/core/SConscript b/TensorflowLiteMicro/tensorflow/lite/core/SConscript
new file mode 100644
index 0000000..d4a00f7
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/core/SConscript
@@ -0,0 +1,16 @@
+# RT-Thread building script for bridge
+
+import os
+from building import *
+
+cwd = GetCurrentDir()
+objs = []
+list = os.listdir(cwd)
+
+
+for d in list:
+ path = os.path.join(cwd, d)
+ if os.path.isfile(os.path.join(path, 'SConscript')):
+ objs = objs + SConscript(os.path.join(d, 'SConscript'))
+
+Return('objs')
diff --git a/TensorflowLiteMicro/tensorflow/lite/core/api/SConscript b/TensorflowLiteMicro/tensorflow/lite/core/api/SConscript
new file mode 100644
index 0000000..d71fe04
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/core/api/SConscript
@@ -0,0 +1,29 @@
+from building import *
+import os
+
+cwd = GetCurrentDir()
+src = Glob('*.c') + Glob('*.cc')
+
+#.
+root = str(Dir('#'))
+packages = os.path.join(root, 'Middlewares')
+file_list = os.listdir(packages)
+for f in file_list:
+ if(f.split('-')[0] == 'TF'):
+ tflm_pkg = os.path.join(packages, f)
+ break
+#./third_party/flatbuffer/include
+flatbuffer = os.path.join(tflm_pkg, "third_party/flatbuffers/include")
+#./third_party/gemmlowp
+gemmlowp = os.path.join(tflm_pkg, "third_party/gemmlowp")
+#./third_party/kissfft
+kissfft = os.path.join(tflm_pkg, "third_party/kissfft")
+#./third_party/ruy
+ruy = os.path.join(tflm_pkg, "third_party/ruy")
+
+
+CPPPATH = [tflm_pkg, flatbuffer, gemmlowp, kissfft, ruy]
+
+group = DefineGroup('lite/core', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')
diff --git a/TensorflowLiteMicro/tensorflow/lite/core/api/error_reporter.cc b/TensorflowLiteMicro/tensorflow/lite/core/api/error_reporter.cc
new file mode 100644
index 0000000..7070eaa
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/core/api/error_reporter.cc
@@ -0,0 +1,38 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/core/api/error_reporter.h"
+#include
+
+namespace tflite {
+
+int ErrorReporter::Report(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ int code = Report(format, args);
+ va_end(args);
+ return code;
+}
+
+// TODO(aselle): Make the name of ReportError on context the same, so
+// we can use the ensure functions w/o a context and w/ a reporter.
+int ErrorReporter::ReportError(void*, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ int code = Report(format, args);
+ va_end(args);
+ return code;
+}
+
+} // namespace tflite
diff --git a/TensorflowLiteMicro/tensorflow/lite/core/api/error_reporter.h b/TensorflowLiteMicro/tensorflow/lite/core/api/error_reporter.h
new file mode 100644
index 0000000..05839a6
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/core/api/error_reporter.h
@@ -0,0 +1,59 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
+#define TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
+
+#include
+
+namespace tflite {
+
+/// A functor that reports error to supporting system. Invoked similar to
+/// printf.
+///
+/// Usage:
+/// ErrorReporter foo;
+/// foo.Report("test %d", 5);
+/// or
+/// va_list args;
+/// foo.Report("test %d", args); // where args is va_list
+///
+/// Subclass ErrorReporter to provide another reporting destination.
+/// For example, if you have a GUI program, you might redirect to a buffer
+/// that drives a GUI error log box.
+class ErrorReporter {
+ public:
+ virtual ~ErrorReporter() {}
+ virtual int Report(const char* format, va_list args) = 0;
+ int Report(const char* format, ...);
+ int ReportError(void*, const char* format, ...);
+};
+
+} // namespace tflite
+
+// You should not make bare calls to the error reporter, instead use the
+// TF_LITE_REPORT_ERROR macro, since this allows message strings to be
+// stripped when the binary size has to be optimized. If you are looking to
+// reduce binary size, define TF_LITE_STRIP_ERROR_STRINGS when compiling and
+// every call will be stubbed out, taking no memory.
+#ifndef TF_LITE_STRIP_ERROR_STRINGS
+#define TF_LITE_REPORT_ERROR(reporter, ...) \
+ do { \
+ static_cast(reporter)->Report(__VA_ARGS__); \
+ } while (false)
+#else // TF_LITE_STRIP_ERROR_STRINGS
+#define TF_LITE_REPORT_ERROR(reporter, ...)
+#endif // TF_LITE_STRIP_ERROR_STRINGS
+
+#endif // TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/core/api/flatbuffer_conversions.cc b/TensorflowLiteMicro/tensorflow/lite/core/api/flatbuffer_conversions.cc
new file mode 100644
index 0000000..7fb04f5
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/core/api/flatbuffer_conversions.cc
@@ -0,0 +1,1739 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
+
+#include
+#include
+#include
+
+#include "flatbuffers/flatbuffers.h" // from @flatbuffers
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+
+namespace tflite {
+
+namespace {
+
+// Utility class for safely allocating POD data. This is useful for avoiding
+// leaks in cases where op params are allocated but fail to propagate to the
+// parsed op data (e.g., when model parameters are invalid).
+class SafeBuiltinDataAllocator {
+ public:
+ class BuiltinDataDeleter {
+ public:
+ explicit BuiltinDataDeleter(BuiltinDataAllocator* allocator)
+ : allocator_(allocator) {}
+
+ void operator()(void* data) { allocator_->Deallocate(data); }
+
+ private:
+ BuiltinDataAllocator* allocator_;
+ };
+
+ template
+ using BuiltinDataPtr = std::unique_ptr;
+
+ explicit SafeBuiltinDataAllocator(BuiltinDataAllocator* allocator)
+ : allocator_(allocator) {}
+
+ template
+ BuiltinDataPtr Allocate() {
+ return BuiltinDataPtr(allocator_->AllocatePOD(),
+ BuiltinDataDeleter(allocator_));
+ }
+
+ private:
+ BuiltinDataAllocator* allocator_;
+};
+
+// All the Parse functions take some pointers as params and this function has
+// the common DCHECKs to catch if any of those are nullptr.
+void CheckParsePointerParams(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ TFLITE_DCHECK(op != nullptr);
+ TFLITE_DCHECK(error_reporter != nullptr);
+ TFLITE_DCHECK(allocator != nullptr);
+ TFLITE_DCHECK(builtin_data != nullptr);
+}
+
+// Copies the contents from the flatbuffer int vector `flatbuffer` into the
+// int array `buffer`. `flat_vector` and `buffer` represent the same
+// configuration operation for a given operation.
+TfLiteStatus FlatBufferIntVectorToArray(
+ int max_size_of_buffer, const flatbuffers::Vector* flat_vector,
+ int* buffer, ErrorReporter* error_reporter, const char* op_name) {
+ if (!flat_vector) {
+ TF_LITE_REPORT_ERROR(error_reporter,
+ "Input array not provided for operation '%s'.\n",
+ op_name);
+ return kTfLiteError;
+ } else {
+ size_t num_dimensions = flat_vector->size();
+ if (num_dimensions > max_size_of_buffer / sizeof(int)) {
+ TF_LITE_REPORT_ERROR(
+ error_reporter,
+ "Found too many dimensions in the input array of operation '%s'.\n",
+ op_name);
+ return kTfLiteError;
+ } else {
+ for (size_t i = 0; i < num_dimensions; ++i) {
+ buffer[i] = flat_vector->Get(i);
+ }
+ }
+ }
+ return kTfLiteOk;
+}
+
+// Converts the flatbuffer activation to what is used at runtime.
+TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {
+ switch (activation) {
+ case ActivationFunctionType_NONE:
+ return kTfLiteActNone;
+ case ActivationFunctionType_RELU:
+ return kTfLiteActRelu;
+ case ActivationFunctionType_RELU_N1_TO_1:
+ return kTfLiteActReluN1To1;
+ case ActivationFunctionType_RELU6:
+ return kTfLiteActRelu6;
+ case ActivationFunctionType_TANH:
+ return kTfLiteActTanh;
+ case ActivationFunctionType_SIGN_BIT:
+ return kTfLiteActSignBit;
+ }
+ return kTfLiteActNone;
+}
+
+// Converts the flatbuffer padding enum to what is used at runtime.
+TfLitePadding ConvertPadding(Padding padding) {
+ switch (padding) {
+ case Padding_SAME:
+ return kTfLitePaddingSame;
+ case Padding_VALID:
+ return kTfLitePaddingValid;
+ }
+ return kTfLitePaddingUnknown;
+}
+
+#ifndef TF_LITE_STATIC_MEMORY
+TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ auto parseLSHProjectionType = [](LSHProjectionType type) {
+ switch (type) {
+ case LSHProjectionType_SPARSE:
+ return kTfLiteLshProjectionSparse;
+ case LSHProjectionType_DENSE:
+ return kTfLiteLshProjectionDense;
+ default:
+ return kTfLiteLshProjectionUnknown;
+ }
+ };
+ auto parseCombinerType = [](CombinerType type) {
+ switch (type) {
+ case CombinerType_MEAN:
+ return kTfLiteCombinerTypeMean;
+ case CombinerType_SQRTN:
+ return kTfLiteCombinerTypeSqrtn;
+ case CombinerType_SUM:
+ default:
+ return kTfLiteCombinerTypeSum;
+ }
+ };
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ *builtin_data = nullptr;
+ switch (op_type) {
+ case BuiltinOperator_ABS: {
+ return ParseAbs(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_ADD: {
+ return ParseAdd(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_ARG_MAX: {
+ return ParseArgMax(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_ARG_MIN: {
+ return ParseArgMin(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_AVERAGE_POOL_2D: {
+ return ParsePool(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_CEIL: {
+ return ParseCeil(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_CONCATENATION: {
+ return ParseConcatenation(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_CONV_2D: {
+ return ParseConv2D(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_DEPTHWISE_CONV_2D: {
+ return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_DEQUANTIZE: {
+ return ParseDequantize(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_FLOOR: {
+ return ParseFloor(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_FULLY_CONNECTED: {
+ return ParseFullyConnected(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_GREATER: {
+ return ParseGreater(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_GREATER_EQUAL: {
+ return ParseGreaterEqual(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_HARD_SWISH: {
+ return ParseHardSwish(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_L2_NORMALIZATION: {
+ return ParseL2Normalization(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_L2_POOL_2D: {
+ return ParsePool(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LESS: {
+ return ParseLess(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LESS_EQUAL: {
+ return ParseLessEqual(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LOG: {
+ return ParseLog(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LOGICAL_AND: {
+ return ParseLogicalAnd(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LOGICAL_NOT: {
+ return ParseLogicalNot(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LOGICAL_OR: {
+ return ParseLogicalOr(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LOGISTIC: {
+ return ParseLogistic(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_MAXIMUM: {
+ return ParseMaximum(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_MAX_POOL_2D: {
+ return ParsePool(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_MEAN: {
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_MINIMUM: {
+ return ParseMinimum(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_MUL: {
+ return ParseMul(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_NEG: {
+ return ParseNeg(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_NOT_EQUAL: {
+ return ParseNotEqual(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_PACK: {
+ return ParsePack(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_PAD: {
+ return ParsePad(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_PADV2: {
+ return ParsePadV2(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_PRELU: {
+ return ParsePrelu(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_QUANTIZE: {
+ return ParseQuantize(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_REDUCE_ANY: {
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_REDUCE_MAX: {
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_REDUCE_MIN: {
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_REDUCE_PROD: {
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_RELU: {
+ return ParseRelu(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_RELU6: {
+ return ParseRelu6(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_RESHAPE: {
+ return ParseReshape(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
+ return ParseResizeNearestNeighbor(op, error_reporter, allocator,
+ builtin_data);
+ }
+
+ case BuiltinOperator_ROUND: {
+ return ParseRound(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_RSQRT: {
+ return ParseRsqrt(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SIN: {
+ return ParseSin(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SOFTMAX: {
+ return ParseSoftmax(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SPLIT: {
+ return ParseSplit(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SQRT: {
+ return ParseSqrt(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SQUARE: {
+ return ParseSquare(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_STRIDED_SLICE: {
+ return ParseStridedSlice(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SUB: {
+ return ParseSub(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SUM: {
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SVDF: {
+ return ParseSvdf(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_TANH: {
+ return ParseTanh(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_UNPACK: {
+ return ParseUnpack(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_CAST: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* schema_params = op->builtin_options_as_CastOptions()) {
+ TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->in_data_type(),
+ ¶ms->in_data_type,
+ error_reporter));
+ TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(),
+ ¶ms->out_data_type,
+ error_reporter));
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_LSH_PROJECTION: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* lshParams =
+ op->builtin_options_as_LSHProjectionOptions()) {
+ params->type = parseLSHProjectionType(lshParams->type());
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* sequence_rnn_params =
+ op->builtin_options_as_SequenceRNNOptions()) {
+ params->activation =
+ ConvertActivation(sequence_rnn_params->fused_activation_function());
+ params->time_major = sequence_rnn_params->time_major();
+ params->asymmetric_quantize_inputs =
+ sequence_rnn_params->asymmetric_quantize_inputs();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {
+ auto params =
+ safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* bidi_sequence_rnn_params =
+ op->builtin_options_as_BidirectionalSequenceRNNOptions()) {
+ params->activation = ConvertActivation(
+ bidi_sequence_rnn_params->fused_activation_function());
+ params->time_major = bidi_sequence_rnn_params->time_major();
+ params->merge_outputs = bidi_sequence_rnn_params->merge_outputs();
+ params->asymmetric_quantize_inputs =
+ bidi_sequence_rnn_params->asymmetric_quantize_inputs();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_RNN: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* rnn_params = op->builtin_options_as_RNNOptions()) {
+ params->activation =
+ ConvertActivation(rnn_params->fused_activation_function());
+ params->asymmetric_quantize_inputs =
+ rnn_params->asymmetric_quantize_inputs();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_EMBEDDING_LOOKUP_SPARSE: {
+ auto params =
+ safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* embedding_params =
+ op->builtin_options_as_EmbeddingLookupSparseOptions()) {
+ params->combiner = parseCombinerType(embedding_params->combiner());
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+
+ case BuiltinOperator_HASHTABLE_LOOKUP:
+ // no-op.
+ return kTfLiteOk;
+ case BuiltinOperator_DIV: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* schema_params = op->builtin_options_as_DivOptions()) {
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* schema_params =
+ op->builtin_options_as_LocalResponseNormalizationOptions()) {
+ params->radius = schema_params->radius();
+ params->bias = schema_params->bias();
+ params->alpha = schema_params->alpha();
+ params->beta = schema_params->beta();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_LSTM: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) {
+ params->activation =
+ ConvertActivation(lstm_params->fused_activation_function());
+ params->cell_clip = lstm_params->cell_clip();
+ params->proj_clip = lstm_params->proj_clip();
+ switch (lstm_params->kernel_type()) {
+ case LSTMKernelType_FULL:
+ params->kernel_type = kTfLiteLSTMFullKernel;
+ break;
+ case LSTMKernelType_BASIC:
+ params->kernel_type = kTfLiteLSTMBasicKernel;
+ break;
+ default:
+ TF_LITE_REPORT_ERROR(error_reporter,
+ "Unhandled LSTM kernel type: %d",
+ lstm_params->kernel_type());
+ return kTfLiteError;
+ }
+ params->asymmetric_quantize_inputs =
+ lstm_params->asymmetric_quantize_inputs();
+ } else {
+ TF_LITE_REPORT_ERROR(error_reporter,
+ "No valid LSTM builtin options exist");
+ return kTfLiteError;
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
+ auto params =
+ safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* seq_lstm_params =
+ op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
+ params->activation =
+ ConvertActivation(seq_lstm_params->fused_activation_function());
+ params->cell_clip = seq_lstm_params->cell_clip();
+ params->proj_clip = seq_lstm_params->proj_clip();
+ params->time_major = seq_lstm_params->time_major();
+ params->asymmetric_quantize_inputs =
+ seq_lstm_params->asymmetric_quantize_inputs();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
+ auto params =
+ safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* bidi_lstm_params =
+ op->builtin_options_as_BidirectionalSequenceLSTMOptions()) {
+ params->activation =
+ ConvertActivation(bidi_lstm_params->fused_activation_function());
+ params->cell_clip = bidi_lstm_params->cell_clip();
+ params->proj_clip = bidi_lstm_params->proj_clip();
+ params->merge_outputs = bidi_lstm_params->merge_outputs();
+ params->time_major = bidi_lstm_params->time_major();
+ params->asymmetric_quantize_inputs =
+ bidi_lstm_params->asymmetric_quantize_inputs();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_RESIZE_BILINEAR: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* schema_params =
+ op->builtin_options_as_ResizeBilinearOptions()) {
+ params->align_corners = schema_params->align_corners();
+ params->half_pixel_centers = schema_params->half_pixel_centers();
+ } else {
+ // Some older models did not populate the ResizeBilinearOptions field in
+ // the flatbuffer, so ensure it's set to a sensible default.
+ params->align_corners = false;
+ params->half_pixel_centers = false;
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_SKIP_GRAM: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* skip_gram_params =
+ op->builtin_options_as_SkipGramOptions()) {
+ params->ngram_size = skip_gram_params->ngram_size();
+ params->max_skip_size = skip_gram_params->max_skip_size();
+ params->include_all_ngrams = skip_gram_params->include_all_ngrams();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_SPACE_TO_DEPTH: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* schema_params =
+ op->builtin_options_as_SpaceToDepthOptions()) {
+ params->block_size = schema_params->block_size();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_DEPTH_TO_SPACE: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* schema_params =
+ op->builtin_options_as_DepthToSpaceOptions()) {
+ params->block_size = schema_params->block_size();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_GATHER: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ params->axis = 0;
+ if (const auto* gather_params = op->builtin_options_as_GatherOptions()) {
+ params->axis = gather_params->axis();
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_SPLIT_V: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* schema_params = op->builtin_options_as_SplitVOptions()) {
+ params->num_splits = schema_params->num_splits();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_SQUEEZE: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* schema_params = op->builtin_options_as_SqueezeOptions()) {
+ const auto* squeeze_dims = schema_params->squeeze_dims();
+ TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
+ sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,
+ error_reporter, "squeeze"));
+ params->num_squeeze_dims = squeeze_dims->size();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_TRANSPOSE_CONV: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* transpose_conv_params =
+ op->builtin_options_as_TransposeConvOptions()) {
+ params->padding = ConvertPadding(transpose_conv_params->padding());
+ params->stride_width = transpose_conv_params->stride_w();
+ params->stride_height = transpose_conv_params->stride_h();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_SPARSE_TO_DENSE: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* sparse_to_dense_params =
+ op->builtin_options_as_SparseToDenseOptions()) {
+ params->validate_indices = sparse_to_dense_params->validate_indices();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_SHAPE: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* schema_params = op->builtin_options_as_ShapeOptions()) {
+ TF_LITE_ENSURE_STATUS(ConvertTensorType(
+ schema_params->out_type(), ¶ms->out_type, error_reporter));
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_DELEGATE: {
+ // TODO(ycling): Revisit when supporting saving delegated models.
+ TF_LITE_REPORT_ERROR(error_reporter,
+ "DELEGATE op shouldn't exist in model.");
+ return kTfLiteError;
+ }
+ case BuiltinOperator_FAKE_QUANT: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* schema_params =
+ op->builtin_options_as_FakeQuantOptions()) {
+ params->min = schema_params->min();
+ params->max = schema_params->max();
+ params->num_bits = schema_params->num_bits();
+ params->narrow_range = schema_params->narrow_range();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_ONE_HOT: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* schema_params = op->builtin_options_as_OneHotOptions()) {
+ params->axis = schema_params->axis();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_LEAKY_RELU: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* leaky_relu_params =
+ op->builtin_options_as_LeakyReluOptions()) {
+ params->alpha = leaky_relu_params->alpha();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_MIRROR_PAD: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ const auto* mirror_pad_params = op->builtin_options_as_MirrorPadOptions();
+ if (mirror_pad_params != nullptr) {
+ params->mode =
+ mirror_pad_params->mode() == tflite::MirrorPadMode_REFLECT
+ ? TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect
+ : TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric;
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_UNIQUE: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ const auto* unique_params = op->builtin_options_as_UniqueOptions();
+ if (unique_params != nullptr) {
+ params->index_out_type =
+ unique_params->idx_out_type() == tflite::TensorType_INT64
+ ? TfLiteType::kTfLiteInt64
+ : TfLiteType::kTfLiteInt32;
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_REVERSE_SEQUENCE: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* reverse_seq_params =
+ op->builtin_options_as_ReverseSequenceOptions()) {
+ params->seq_dim = reverse_seq_params->seq_dim();
+ params->batch_dim = reverse_seq_params->batch_dim();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_IF: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* if_params = op->builtin_options_as_IfOptions()) {
+ params->then_subgraph_index = if_params->then_subgraph_index();
+ params->else_subgraph_index = if_params->else_subgraph_index();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_WHILE: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* while_params = op->builtin_options_as_WhileOptions()) {
+ params->cond_subgraph_index = while_params->cond_subgraph_index();
+ params->body_subgraph_index = while_params->body_subgraph_index();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_BATCH_MATMUL: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* bmm_params =
+ op->builtin_options_as_BatchMatMulOptions()) {
+ params->adj_x = bmm_params->adj_x();
+ params->adj_y = bmm_params->adj_y();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ // Below are the ops with no builtin_data structure.
+ case BuiltinOperator_BATCH_TO_SPACE_ND:
+ // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
+ // ok for now, since there is no call implementation either.
+ case BuiltinOperator_CALL:
+ case BuiltinOperator_CONCAT_EMBEDDINGS:
+ case BuiltinOperator_COS:
+ case BuiltinOperator_CUSTOM:
+ case BuiltinOperator_ELU:
+ case BuiltinOperator_EMBEDDING_LOOKUP:
+ case BuiltinOperator_EQUAL:
+ case BuiltinOperator_EXP:
+ case BuiltinOperator_EXPAND_DIMS:
+ case BuiltinOperator_LOG_SOFTMAX:
+ case BuiltinOperator_MATRIX_DIAG:
+ case BuiltinOperator_MATRIX_SET_DIAG:
+ case BuiltinOperator_RELU_N1_TO_1:
+ case BuiltinOperator_SELECT:
+ case BuiltinOperator_SELECT_V2:
+ case BuiltinOperator_SLICE:
+ case BuiltinOperator_SPACE_TO_BATCH_ND:
+ case BuiltinOperator_TILE:
+ case BuiltinOperator_TOPK_V2:
+ case BuiltinOperator_TRANSPOSE:
+ case BuiltinOperator_POW:
+ case BuiltinOperator_FLOOR_DIV:
+ case BuiltinOperator_ZEROS_LIKE:
+ case BuiltinOperator_FILL:
+ case BuiltinOperator_FLOOR_MOD:
+ case BuiltinOperator_RANGE:
+ case BuiltinOperator_SQUARED_DIFFERENCE:
+ case BuiltinOperator_REVERSE_V2:
+ case BuiltinOperator_ADD_N:
+ case BuiltinOperator_GATHER_ND:
+ case BuiltinOperator_WHERE:
+ case BuiltinOperator_RANK:
+ case BuiltinOperator_NON_MAX_SUPPRESSION_V4:
+ case BuiltinOperator_NON_MAX_SUPPRESSION_V5:
+ case BuiltinOperator_SCATTER_ND:
+ case BuiltinOperator_DENSIFY:
+ case BuiltinOperator_SEGMENT_SUM:
+ return kTfLiteOk;
+ }
+ return kTfLiteError;
+} // NOLINT[readability/fn_size]
+#endif // !defined(TF_LITE_STATIC_MEMORY)
+} // namespace
+
+TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
+ ErrorReporter* error_reporter) {
+ switch (tensor_type) {
+ case TensorType_FLOAT16:
+ *type = kTfLiteFloat16;
+ return kTfLiteOk;
+ case TensorType_FLOAT32:
+ *type = kTfLiteFloat32;
+ return kTfLiteOk;
+ case TensorType_FLOAT64:
+ *type = kTfLiteFloat64;
+ return kTfLiteOk;
+ case TensorType_INT16:
+ *type = kTfLiteInt16;
+ return kTfLiteOk;
+ case TensorType_INT32:
+ *type = kTfLiteInt32;
+ return kTfLiteOk;
+ case TensorType_UINT8:
+ *type = kTfLiteUInt8;
+ return kTfLiteOk;
+ case TensorType_INT8:
+ *type = kTfLiteInt8;
+ return kTfLiteOk;
+ case TensorType_INT64:
+ *type = kTfLiteInt64;
+ return kTfLiteOk;
+ case TensorType_STRING:
+ *type = kTfLiteString;
+ return kTfLiteOk;
+ case TensorType_BOOL:
+ *type = kTfLiteBool;
+ return kTfLiteOk;
+ case TensorType_COMPLEX64:
+ *type = kTfLiteComplex64;
+ return kTfLiteOk;
+ case TensorType_COMPLEX128:
+ *type = kTfLiteComplex128;
+ return kTfLiteOk;
+ default:
+ *type = kTfLiteNoType;
+ TF_LITE_REPORT_ERROR(error_reporter,
+ "Unsupported data type %d in tensor\n", tensor_type);
+ return kTfLiteError;
+ }
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const AddOptions* schema_params = op->builtin_options_as_AddOptions();
+
+ if (schema_params != nullptr) {
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ params->pot_scale_int16 = schema_params->pot_scale_int16();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ArgMaxOptions* schema_params = op->builtin_options_as_ArgMaxOptions();
+
+ if (schema_params != nullptr) {
+ TF_LITE_ENSURE_STATUS(ConvertTensorType(
+ schema_params->output_type(), ¶ms->output_type, error_reporter));
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ArgMinOptions* schema_params = op->builtin_options_as_ArgMinOptions();
+
+ if (schema_params != nullptr) {
+ TF_LITE_ENSURE_STATUS(ConvertTensorType(
+ schema_params->output_type(), ¶ms->output_type, error_reporter));
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseConcatenation(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ConcatenationOptions* schema_params =
+ op->builtin_options_as_ConcatenationOptions();
+
+ if (schema_params != nullptr) {
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ params->axis = schema_params->axis();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions();
+
+ if (schema_params != nullptr) {
+ params->padding = ConvertPadding(schema_params->padding());
+ params->stride_width = schema_params->stride_w();
+ params->stride_height = schema_params->stride_h();
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+
+ params->dilation_width_factor = schema_params->dilation_w_factor();
+ params->dilation_height_factor = schema_params->dilation_h_factor();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const DepthwiseConv2DOptions* schema_params =
+ op->builtin_options_as_DepthwiseConv2DOptions();
+
+ if (schema_params != nullptr) {
+ params->padding = ConvertPadding(schema_params->padding());
+ params->stride_width = schema_params->stride_w();
+ params->stride_height = schema_params->stride_h();
+ params->depth_multiplier = schema_params->depth_multiplier();
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+
+ params->dilation_width_factor = schema_params->dilation_w_factor();
+ params->dilation_height_factor = schema_params->dilation_h_factor();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseFullyConnected(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const FullyConnectedOptions* schema_params =
+ op->builtin_options_as_FullyConnectedOptions();
+
+ if (schema_params != nullptr) {
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ params->keep_num_dims = schema_params->keep_num_dims();
+ params->asymmetric_quantize_inputs =
+ schema_params->asymmetric_quantize_inputs();
+
+ switch (schema_params->weights_format()) {
+ case FullyConnectedOptionsWeightsFormat_DEFAULT:
+ params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
+ break;
+ case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
+ params->weights_format =
+ kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
+ break;
+ default:
+ TF_LITE_REPORT_ERROR(error_reporter,
+ "Unhandled fully-connected weights format.");
+ return kTfLiteError;
+ }
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseGreater(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseGreaterEqual(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseL2Normalization(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const L2NormOptions* schema_params = op->builtin_options_as_L2NormOptions();
+
+ if (schema_params != nullptr) {
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLess(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLessEqual(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLog(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLogicalAnd(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLogicalNot(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLogicalOr(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseMaximum(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const MulOptions* schema_params = op->builtin_options_as_MulOptions();
+
+ if (schema_params != nullptr) {
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseNeg(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseNotEqual(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const PackOptions* schema_params = op->builtin_options_as_PackOptions();
+
+ if (schema_params != nullptr) {
+ params->values_count = schema_params->values_count();
+ params->axis = schema_params->axis();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParsePad(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParsePadV2(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const Pool2DOptions* schema_params = op->builtin_options_as_Pool2DOptions();
+
+ if (schema_params != nullptr) {
+ params->padding = ConvertPadding(schema_params->padding());
+ params->stride_width = schema_params->stride_w();
+ params->stride_height = schema_params->stride_h();
+ params->filter_width = schema_params->filter_width();
+ params->filter_height = schema_params->filter_height();
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ReducerOptions* schema_params = op->builtin_options_as_ReducerOptions();
+
+ if (schema_params != nullptr) {
+ params->keep_dims = schema_params->keep_dims();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseRelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseRelu6(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions();
+
+ if (schema_params != nullptr) {
+ const flatbuffers::Vector* new_shape = schema_params->new_shape();
+ // TODO(b/147203660): We need to figure out when dynamic reshape
+ // (new_shape is a tensor) happens, why the option is not a nullptr.
+ // But nonethless, we should only copy when new_shape is not a nullptr.
+ if (new_shape != nullptr) {
+ TF_LITE_ENSURE_STATUS(
+ FlatBufferIntVectorToArray(sizeof(params->shape), new_shape,
+ params->shape, error_reporter, "reshape"));
+ params->num_dimensions = new_shape->size();
+ } else {
+ // TODO(b/157480169) TODO(b/147203660): We should either return
+ // kTfLiteError or fill in some reasonable defaults in the params struct.
+ // We are not doing so until we better undertand the ramifications of
+ // changing the legacy behavior.
+ }
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ResizeNearestNeighborOptions* schema_params =
+ op->builtin_options_as_ResizeNearestNeighborOptions();
+
+ if (schema_params != nullptr) {
+ params->align_corners = schema_params->align_corners();
+ params->half_pixel_centers = schema_params->half_pixel_centers();
+ } else {
+ params->align_corners = false;
+ params->half_pixel_centers = false;
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseRound(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions();
+
+ if (schema_params != nullptr) {
+ params->beta = schema_params->beta();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const SplitOptions* schema_params = op->builtin_options_as_SplitOptions();
+
+ if (schema_params != nullptr) {
+ params->num_splits = schema_params->num_splits();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseSqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseStridedSlice(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const StridedSliceOptions* schema_params =
+ op->builtin_options_as_StridedSliceOptions();
+
+ if (schema_params != nullptr) {
+ params->begin_mask = schema_params->begin_mask();
+ params->end_mask = schema_params->end_mask();
+ params->ellipsis_mask = schema_params->ellipsis_mask();
+ params->new_axis_mask = schema_params->new_axis_mask();
+ params->shrink_axis_mask = schema_params->shrink_axis_mask();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const SubOptions* schema_params = op->builtin_options_as_SubOptions();
+
+ if (schema_params != nullptr) {
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ params->pot_scale_int16 = schema_params->pot_scale_int16();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions();
+ if (schema_params != nullptr) {
+ params->rank = schema_params->rank();
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ params->asymmetric_quantize_inputs =
+ schema_params->asymmetric_quantize_inputs();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const UnpackOptions* schema_params = op->builtin_options_as_UnpackOptions();
+
+ if (schema_params != nullptr) {
+ params->num = schema_params->num();
+ params->axis = schema_params->axis();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+// TODO(b/145762662): It would be preferable to have the build graph for TF Lite
+// Micro not have the ParseOpData function at all. This would require splitting
+// the current file into two separate files, one of which defines the
+// ParseOpData function and the other that defines the operator specific parse
+// functions (e.g. ParseAdd).
+//
+// Such a split was attempted but was not worth the effort at the time because
+// of the following reasons:
+// * We could either duplicate the functions and the SafeBuiltinDataAllocator
+// class in the anonymous namespace of this file, or attempt to make a common
+// library with these helper functions and class.
+// * Making a common library with a separate build target was not feasible as
+// it introduced circular dependencies due to the ErrorReporter and a common
+// .cc and .h within the same api build target the also cause circular
+// dependencies due to the BuiltinDataAllocator class.
+// * If all the builtin operators were to have their own parse functions, or we
+// were ok with some amount of code duplication, then this split of the .cc
+// files would be a lot more feasible.
+#ifdef TF_LITE_STATIC_MEMORY
+ TF_LITE_REPORT_ERROR(
+ error_reporter,
+ "ParseOpData is unsupported on TfLiteMicro, please use the operator "
+ "specific parse functions (e.g. ParseAdd etc.).\n");
+ return kTfLiteError;
+#else
+ return ParseOpDataTfLite(op, op_type, error_reporter, allocator,
+ builtin_data);
+#endif
+}
+
+} // namespace tflite
diff --git a/TensorflowLiteMicro/tensorflow/lite/core/api/flatbuffer_conversions.h b/TensorflowLiteMicro/tensorflow/lite/core/api/flatbuffer_conversions.h
new file mode 100644
index 0000000..aaeb98c
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/core/api/flatbuffer_conversions.h
@@ -0,0 +1,253 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
+#define TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
+
+// These functions transform codes and data structures that are defined in the
+// flatbuffer serialization format into in-memory values that are used by the
+// runtime API and interpreter.
+
+#include
+#include
+#include
+
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+
+namespace tflite {
+
+// Interface class for builtin data allocations.
+class BuiltinDataAllocator {
+ public:
+ virtual void* Allocate(size_t size, size_t alignment_hint) = 0;
+ virtual void Deallocate(void* data) = 0;
+
+ // Allocate a structure, but make sure it is a POD structure that doesn't
+ // require constructors to run. The reason we do this, is that Interpreter's C
+ // extension part will take ownership so destructors will not be run during
+ // deallocation.
+ template
+ T* AllocatePOD() {
+ // TODO(b/154346074): Change this to is_trivially_destructible when all
+ // platform targets support that properly.
+ static_assert(std::is_pod::value, "Builtin data structure must be POD.");
+ void* allocated_memory = this->Allocate(sizeof(T), alignof(T));
+ return new (allocated_memory) T;
+ }
+
+ virtual ~BuiltinDataAllocator() {}
+};
+
+// Parse the appropriate data out of the op.
+//
+// This handles builtin data explicitly as there are flatbuffer schemas.
+// If it returns kTfLiteOk, it passes the data out with `builtin_data`. The
+// calling function has to pass in an allocator object, and this allocator
+// will be called to reserve space for the output data. If the calling
+// function's allocator reserves memory on the heap, then it's the calling
+// function's responsibility to free it.
+// If it returns kTfLiteError, `builtin_data` will be `nullptr`.
+TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+// Converts the tensor data type used in the flat buffer to the representation
+// used by the runtime.
+TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
+ ErrorReporter* error_reporter);
+
+TfLiteStatus ParseAbs(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseConcatenation(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseCos(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseDequantize(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseEqual(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseFloor(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseFullyConnected(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseGreater(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseGreaterEqual(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseHardSwish(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseL2Normalization(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseLess(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseLessEqual(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseLog(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseLogicalAnd(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseLogicalNot(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseLogicalOr(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseLogistic(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseMinimum(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseNeg(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseNotEqual(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParsePad(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParsePadV2(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParsePrelu(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseQuantize(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseRelu(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseRelu6(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseRound(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseRsqrt(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSqrt(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSquare(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseStridedSlice(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseTanh(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+} // namespace tflite
+
+#endif // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/core/api/op_resolver.cc b/TensorflowLiteMicro/tensorflow/lite/core/api/op_resolver.cc
new file mode 100644
index 0000000..c239d9e
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/core/api/op_resolver.cc
@@ -0,0 +1,66 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/core/api/op_resolver.h"
+
+#include "flatbuffers/flatbuffers.h" // from @flatbuffers
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
+
+namespace tflite {
+
+TfLiteStatus GetRegistrationFromOpCode(
+ const OperatorCode* opcode, const OpResolver& op_resolver,
+ ErrorReporter* error_reporter, const TfLiteRegistration** registration) {
+ TfLiteStatus status = kTfLiteOk;
+ *registration = nullptr;
+ auto builtin_code = opcode->builtin_code();
+ int version = opcode->version();
+
+ if (builtin_code > BuiltinOperator_MAX ||
+ builtin_code < BuiltinOperator_MIN) {
+ TF_LITE_REPORT_ERROR(
+ error_reporter,
+ "Op builtin_code out of range: %d. Are you using old TFLite binary "
+ "with newer model?",
+ builtin_code);
+ status = kTfLiteError;
+ } else if (builtin_code != BuiltinOperator_CUSTOM) {
+ *registration = op_resolver.FindOp(builtin_code, version);
+ if (*registration == nullptr) {
+ TF_LITE_REPORT_ERROR(
+ error_reporter,
+ "Didn't find op for builtin opcode '%s' version '%d'\n",
+ EnumNameBuiltinOperator(builtin_code), version);
+ status = kTfLiteError;
+ }
+ } else if (!opcode->custom_code()) {
+ TF_LITE_REPORT_ERROR(
+ error_reporter,
+ "Operator with CUSTOM builtin_code has no custom_code.\n");
+ status = kTfLiteError;
+ } else {
+ const char* name = opcode->custom_code()->c_str();
+ *registration = op_resolver.FindOp(name, version);
+ if (*registration == nullptr) {
+ // Do not report error for unresolved custom op, we do the final check
+ // while preparing ops.
+ status = kTfLiteError;
+ }
+ }
+ return status;
+}
+
+} // namespace tflite
diff --git a/TensorflowLiteMicro/tensorflow/lite/core/api/op_resolver.h b/TensorflowLiteMicro/tensorflow/lite/core/api/op_resolver.h
new file mode 100644
index 0000000..1294b7b
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/core/api/op_resolver.h
@@ -0,0 +1,48 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
+#define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
+
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+
+namespace tflite {
+
+/// Abstract interface that returns TfLiteRegistrations given op codes or custom
+/// op names. This is the mechanism that ops being referenced in the flatbuffer
+/// model are mapped to executable function pointers (TfLiteRegistrations).
+class OpResolver {
+ public:
+ /// Finds the op registration for a builtin operator by enum code.
+ virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
+ int version) const = 0;
+ /// Finds the op registration of a custom operator by op name.
+ virtual const TfLiteRegistration* FindOp(const char* op,
+ int version) const = 0;
+ virtual ~OpResolver() {}
+};
+
+// Handles the logic for converting between an OperatorCode structure extracted
+// from a flatbuffer and information about a registered operator
+// implementation.
+TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode,
+ const OpResolver& op_resolver,
+ ErrorReporter* error_reporter,
+ const TfLiteRegistration** registration);
+
+} // namespace tflite
+
+#endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/core/api/profiler.h b/TensorflowLiteMicro/tensorflow/lite/core/api/profiler.h
new file mode 100644
index 0000000..897efbe
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/core/api/profiler.h
@@ -0,0 +1,194 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_CORE_API_PROFILER_H_
+#define TENSORFLOW_LITE_CORE_API_PROFILER_H_
+
+#include
+
+namespace tflite {
+
+// A simple utility for enabling profiled event tracing in TensorFlow Lite.
+class Profiler {
+ public:
+ // As certain Profiler instance might be only interested in certain event
+ // types, we define each event type value to allow a Profiler to use
+ // bitmasking bitwise operations to determine whether an event should be
+ // recorded or not.
+ enum class EventType {
+ // Default event type, the metadata field has no special significance.
+ DEFAULT = 1,
+
+ // The event is an operator invocation and the event_metadata field is the
+ // index of operator node.
+ OPERATOR_INVOKE_EVENT = 2,
+
+ // The event is an invocation for an internal operator of a TFLite delegate.
+ // The event_metadata field is the index of operator node that's specific to
+ // the delegate.
+ DELEGATE_OPERATOR_INVOKE_EVENT = 4,
+
+ // The event is a recording of runtime instrumentation such as the overall
+ // TFLite runtime status, the TFLite delegate status (if a delegate
+ // is applied), and the overall model inference latency etc.
+ // Note, the delegate status and overall status are stored as separate
+ // event_metadata fields. In particular, the delegate status is encoded
+ // as DelegateStatus::full_status().
+ GENERAL_RUNTIME_INSTRUMENTATION_EVENT = 8,
+ };
+
+ virtual ~Profiler() {}
+
+ // Signals the beginning of an event and returns a handle to the profile
+ // event. The `event_metadata1` and `event_metadata2` have different
+ // interpretations based on the actual Profiler instance and the `event_type`.
+ // For example, as for the 'SubgraphAwareProfiler' defined in
+ // lite/core/subgraph.h, when the event_type is OPERATOR_INVOKE_EVENT,
+ // `event_metadata1` represents the index of a TFLite node, and
+ // `event_metadata2` represents the index of the subgraph that this event
+ // comes from.
+ virtual uint32_t BeginEvent(const char* tag, EventType event_type,
+ int64_t event_metadata1,
+ int64_t event_metadata2) = 0;
+ // Similar w/ the above, but `event_metadata2` defaults to 0.
+ uint32_t BeginEvent(const char* tag, EventType event_type,
+ int64_t event_metadata) {
+ return BeginEvent(tag, event_type, event_metadata, /*event_metadata2*/ 0);
+ }
+
+ // Signals an end to the specified profile event with 'event_metadata's, This
+ // is useful when 'event_metadata's are not available when the event begins
+ // or when one wants to overwrite the 'event_metadata's set at the beginning.
+ virtual void EndEvent(uint32_t event_handle, int64_t event_metadata1,
+ int64_t event_metadata2) {}
+ // Signals an end to the specified profile event.
+ virtual void EndEvent(uint32_t event_handle) = 0;
+
+ // Appends an event of type 'event_type' with 'tag' and 'event_metadata'
+ // which started at 'start' and ended at 'end'
+ // Note:
+ // In cases were ProfileSimmarizer and tensorflow::StatsCalculator are used
+ // they assume the value is in "usec", if in any case subclasses
+ // didn't put usec, then the values are not meaningful.
+ // TODO karimnosseir: Revisit and make the function more clear.
+ void AddEvent(const char* tag, EventType event_type, uint64_t start,
+ uint64_t end, int64_t event_metadata) {
+ AddEvent(tag, event_type, start, end, event_metadata,
+ /*event_metadata2*/ 0);
+ }
+
+ virtual void AddEvent(const char* tag, EventType event_type, uint64_t start,
+ uint64_t end, int64_t event_metadata1,
+ int64_t event_metadata2) {}
+
+ protected:
+ friend class ScopedProfile;
+};
+
+// Adds a profile event to `profiler` that begins with the construction
+// of the object and ends when the object goes out of scope.
+// The lifetime of tag should be at least the lifetime of `profiler`.
+// `profiler` may be null, in which case nothing is profiled.
+class ScopedProfile {
+ public:
+ ScopedProfile(Profiler* profiler, const char* tag,
+ Profiler::EventType event_type = Profiler::EventType::DEFAULT,
+ int64_t event_metadata = 0)
+ : profiler_(profiler), event_handle_(0) {
+ if (profiler) {
+ event_handle_ = profiler_->BeginEvent(tag, event_type, event_metadata);
+ }
+ }
+
+ ~ScopedProfile() {
+ if (profiler_) {
+ profiler_->EndEvent(event_handle_);
+ }
+ }
+
+ protected:
+ Profiler* profiler_;
+ uint32_t event_handle_;
+};
+
+class ScopedOperatorProfile : public ScopedProfile {
+ public:
+ ScopedOperatorProfile(Profiler* profiler, const char* tag, int node_index)
+ : ScopedProfile(profiler, tag, Profiler::EventType::OPERATOR_INVOKE_EVENT,
+ static_cast(node_index)) {}
+};
+
+class ScopedDelegateOperatorProfile : public ScopedProfile {
+ public:
+ ScopedDelegateOperatorProfile(Profiler* profiler, const char* tag,
+ int node_index)
+ : ScopedProfile(profiler, tag,
+ Profiler::EventType::DELEGATE_OPERATOR_INVOKE_EVENT,
+ static_cast(node_index)) {}
+};
+
+class ScopedRuntimeInstrumentationProfile : public ScopedProfile {
+ public:
+ ScopedRuntimeInstrumentationProfile(Profiler* profiler, const char* tag)
+ : ScopedProfile(
+ profiler, tag,
+ Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, -1) {}
+
+ void set_runtime_status(int64_t delegate_status, int64_t interpreter_status) {
+ if (profiler_) {
+ delegate_status_ = delegate_status;
+ interpreter_status_ = interpreter_status;
+ }
+ }
+
+ ~ScopedRuntimeInstrumentationProfile() {
+ if (profiler_) {
+ profiler_->EndEvent(event_handle_, delegate_status_, interpreter_status_);
+ }
+ }
+
+ private:
+ int64_t delegate_status_;
+ int64_t interpreter_status_;
+};
+
+} // namespace tflite
+
+#define TFLITE_VARNAME_UNIQ_IMPL(name, ctr) name##ctr
+#define TFLITE_VARNAME_UNIQ(name, ctr) TFLITE_VARNAME_UNIQ_IMPL(name, ctr)
+
+#define TFLITE_SCOPED_TAGGED_DEFAULT_PROFILE(profiler, tag) \
+ tflite::ScopedProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \
+ (profiler), (tag))
+
+#define TFLITE_SCOPED_TAGGED_OPERATOR_PROFILE(profiler, tag, node_index) \
+ tflite::ScopedOperatorProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \
+ (profiler), (tag), (node_index))
+
+#define TFLITE_SCOPED_DELEGATE_OPERATOR_PROFILE(profiler, tag, node_index) \
+ tflite::ScopedDelegateOperatorProfile TFLITE_VARNAME_UNIQ( \
+ _profile_, __COUNTER__)((profiler), (tag), (node_index))
+
+#define TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT( \
+ profiler, tag, delegate_status, interpreter_status) \
+ do { \
+ if (!profiler) { \
+ const auto handle = profiler->BeginEvent( \
+ tag, Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, \
+ delegate_status, interpreter_status); \
+ profiler->EndEvent(handle); \
+ } \
+ } while (false);
+
+#endif // TENSORFLOW_LITE_CORE_API_PROFILER_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/core/api/tensor_utils.cc b/TensorflowLiteMicro/tensorflow/lite/core/api/tensor_utils.cc
new file mode 100644
index 0000000..3aac16b
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/core/api/tensor_utils.cc
@@ -0,0 +1,50 @@
+/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/core/api/tensor_utils.h"
+
+#include
+
+#include "tensorflow/lite/c/common.h"
+
+namespace tflite {
+
+TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) {
+ if (!tensor->is_variable) {
+ return kTfLiteOk;
+ }
+ // TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it
+ // to the value of the buffer.
+ int value = 0;
+ if (tensor->type == kTfLiteInt8) {
+ value = tensor->params.zero_point;
+ }
+ // TODO(b/139446230): Provide a platform header to better handle these
+ // specific scenarios.
+#if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \
+ defined(__i386) || defined(__x86__) || defined(__X86__) || \
+ defined(_X86_) || defined(_M_IX86) || defined(_M_X64)
+ memset(tensor->data.raw, value, tensor->bytes);
+#else
+ char* raw_ptr = tensor->data.raw;
+ for (size_t i = 0; i < tensor->bytes; ++i) {
+ *raw_ptr = value;
+ raw_ptr++;
+ }
+#endif
+ return kTfLiteOk;
+}
+
+} // namespace tflite
diff --git a/TensorflowLiteMicro/tensorflow/lite/core/api/tensor_utils.h b/TensorflowLiteMicro/tensorflow/lite/core/api/tensor_utils.h
new file mode 100644
index 0000000..9f1cf94
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/core/api/tensor_utils.h
@@ -0,0 +1,28 @@
+/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
+#define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
+
+#include "tensorflow/lite/c/common.h"
+
+namespace tflite {
+
+// Resets a variable tensor to the default value.
+TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor);
+
+} // namespace tflite
+
+#endif // TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/SConscript b/TensorflowLiteMicro/tensorflow/lite/experimental/SConscript
new file mode 100644
index 0000000..4c815c4
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/SConscript
@@ -0,0 +1,15 @@
+# RT-Thread building script for bridge
+
+import os
+from building import *
+
+cwd = GetCurrentDir()
+objs = []
+list = os.listdir(cwd)
+
+for d in list:
+ path = os.path.join(cwd, d)
+ if os.path.isfile(os.path.join(path, 'SConscript')):
+ objs = objs + SConscript(os.path.join(d, 'SConscript'))
+
+Return('objs')
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/SConscript b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/SConscript
new file mode 100644
index 0000000..4c815c4
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/SConscript
@@ -0,0 +1,15 @@
+# RT-Thread building script for bridge
+
+import os
+from building import *
+
+cwd = GetCurrentDir()
+objs = []
+list = os.listdir(cwd)
+
+for d in list:
+ path = os.path.join(cwd, d)
+ if os.path.isfile(os.path.join(path, 'SConscript')):
+ objs = objs + SConscript(os.path.join(d, 'SConscript'))
+
+Return('objs')
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/SConscript b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/SConscript
new file mode 100644
index 0000000..6b81812
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/SConscript
@@ -0,0 +1,28 @@
+from building import *
+import os
+
+cwd = GetCurrentDir()
+src = Glob('*.c') + Glob('*.cc')
+
+#.
+root = str(Dir('#'))
+packages = os.path.join(root, 'Middlewares')
+file_list = os.listdir(packages)
+for f in file_list:
+ if(f.split('-')[0] == 'TF'):
+ tflm_pkg = os.path.join(packages, f)
+ break
+#./third_party/flatbuffer/include
+flatbuffer = os.path.join(tflm_pkg, "third_party/flatbuffers/include")
+#./third_party/gemmlowp
+gemmlowp = os.path.join(tflm_pkg, "third_party/gemmlowp")
+#./third_party/kissfft
+kissfft = os.path.join(tflm_pkg, "third_party/kissfft")
+#./third_party/ruy
+ruy = os.path.join(tflm_pkg, "third_party/ruy")
+
+CPPPATH = [tflm_pkg, flatbuffer, gemmlowp, kissfft, ruy]
+
+group = DefineGroup('lite/experimental', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/bits.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/bits.h
new file mode 100644
index 0000000..04b3ba6
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/bits.h
@@ -0,0 +1,102 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
+
+#ifdef __cplusplus
+#include
+
+extern "C" {
+#endif
+
+static inline int CountLeadingZeros32Slow(uint64_t n) {
+ int zeroes = 28;
+ if (n >> 16) zeroes -= 16, n >>= 16;
+ if (n >> 8) zeroes -= 8, n >>= 8;
+ if (n >> 4) zeroes -= 4, n >>= 4;
+ return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
+}
+
+static inline int CountLeadingZeros32(uint32_t n) {
+#if defined(_MSC_VER)
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if (_BitScanReverse(&result, n)) {
+ return 31 - result;
+ }
+ return 32;
+#elif defined(__GNUC__)
+
+ // Handle 0 as a special case because __builtin_clz(0) is undefined.
+ if (n == 0) {
+ return 32;
+ }
+ return __builtin_clz(n);
+#else
+ return CountLeadingZeros32Slow(n);
+#endif
+}
+
+static inline int MostSignificantBit32(uint32_t n) {
+ return 32 - CountLeadingZeros32(n);
+}
+
+static inline int CountLeadingZeros64Slow(uint64_t n) {
+ int zeroes = 60;
+ if (n >> 32) zeroes -= 32, n >>= 32;
+ if (n >> 16) zeroes -= 16, n >>= 16;
+ if (n >> 8) zeroes -= 8, n >>= 8;
+ if (n >> 4) zeroes -= 4, n >>= 4;
+ return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
+}
+
+static inline int CountLeadingZeros64(uint64_t n) {
+#if defined(_MSC_VER) && defined(_M_X64)
+ // MSVC does not have __builtin_clzll. Use _BitScanReverse64.
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if (_BitScanReverse64(&result, n)) {
+ return 63 - result;
+ }
+ return 64;
+#elif defined(_MSC_VER)
+ // MSVC does not have __builtin_clzll. Compose two calls to _BitScanReverse
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if ((n >> 32) && _BitScanReverse(&result, n >> 32)) {
+ return 31 - result;
+ }
+ if (_BitScanReverse(&result, n)) {
+ return 63 - result;
+ }
+ return 64;
+#elif defined(__GNUC__)
+
+ // Handle 0 as a special case because __builtin_clzll(0) is undefined.
+ if (n == 0) {
+ return 64;
+ }
+ return __builtin_clzll(n);
+#else
+ return CountLeadingZeros64Slow(n);
+#endif
+}
+
+static inline int MostSignificantBit64(uint64_t n) {
+ return 64 - CountLeadingZeros64(n);
+}
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/fft.cc b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/fft.cc
new file mode 100644
index 0000000..72779f5
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/fft.cc
@@ -0,0 +1,54 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
+
+#include
+
+#define FIXED_POINT 16
+#include "kiss_fft.h"
+#include "tools/kiss_fftr.h"
+
+void FftCompute(struct FftState* state, const int16_t* input,
+ int input_scale_shift) {
+ const size_t input_size = state->input_size;
+ const size_t fft_size = state->fft_size;
+
+ int16_t* fft_input = state->input;
+ // First, scale the input by the given shift.
+ size_t i;
+ for (i = 0; i < input_size; ++i) {
+ fft_input[i] = static_cast(static_cast(input[i])
+ << input_scale_shift);
+ }
+ // Zero out whatever else remains in the top part of the input.
+ for (; i < fft_size; ++i) {
+ fft_input[i] = 0;
+ }
+
+ // Apply the FFT.
+ kiss_fftr(
+ reinterpret_cast(state->scratch),
+ state->input,
+ reinterpret_cast(state->output));
+}
+
+void FftInit(struct FftState* state) {
+ // All the initialization is done in FftPopulateState()
+}
+
+void FftReset(struct FftState* state) {
+ memset(state->input, 0, state->fft_size * sizeof(*state->input));
+ memset(state->output, 0, (state->fft_size / 2 + 1) * sizeof(*state->output));
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/fft.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/fft.h
new file mode 100644
index 0000000..aaffa69
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/fft.h
@@ -0,0 +1,50 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
+
+#include
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct complex_int16_t {
+ int16_t real;
+ int16_t imag;
+};
+
+struct FftState {
+ int16_t* input;
+ struct complex_int16_t* output;
+ size_t fft_size;
+ size_t input_size;
+ void* scratch;
+ size_t scratch_size;
+};
+
+void FftCompute(struct FftState* state, const int16_t* input,
+ int input_scale_shift);
+
+void FftInit(struct FftState* state);
+
+void FftReset(struct FftState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/fft_util.cc b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/fft_util.cc
new file mode 100644
index 0000000..d516b46
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/fft_util.cc
@@ -0,0 +1,72 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
+
+#include
+
+#define FIXED_POINT 16
+#include "kiss_fft.h"
+#include "tools/kiss_fftr.h"
+
+int FftPopulateState(struct FftState* state, size_t input_size) {
+ state->input_size = input_size;
+ state->fft_size = 1;
+ while (state->fft_size < state->input_size) {
+ state->fft_size <<= 1;
+ }
+
+ state->input = reinterpret_cast(
+ malloc(state->fft_size * sizeof(*state->input)));
+ if (state->input == nullptr) {
+ fprintf(stderr, "Failed to alloc fft input buffer\n");
+ return 0;
+ }
+
+ state->output = reinterpret_cast(
+ malloc((state->fft_size / 2 + 1) * sizeof(*state->output) * 2));
+ if (state->output == nullptr) {
+ fprintf(stderr, "Failed to alloc fft output buffer\n");
+ return 0;
+ }
+
+ // Ask kissfft how much memory it wants.
+ size_t scratch_size = 0;
+ kiss_fftr_cfg kfft_cfg = kiss_fftr_alloc(
+ state->fft_size, 0, nullptr, &scratch_size);
+ if (kfft_cfg != nullptr) {
+ fprintf(stderr, "Kiss memory sizing failed.\n");
+ return 0;
+ }
+ state->scratch = malloc(scratch_size);
+ if (state->scratch == nullptr) {
+ fprintf(stderr, "Failed to alloc fft scratch buffer\n");
+ return 0;
+ }
+ state->scratch_size = scratch_size;
+ // Let kissfft configure the scratch space we just allocated
+ kfft_cfg = kiss_fftr_alloc(state->fft_size, 0,
+ state->scratch, &scratch_size);
+ if (kfft_cfg != state->scratch) {
+ fprintf(stderr, "Kiss memory preallocation strategy failed.\n");
+ return 0;
+ }
+ return 1;
+}
+
+void FftFreeStateContents(struct FftState* state) {
+ free(state->input);
+ free(state->output);
+ free(state->scratch);
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/fft_util.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/fft_util.h
new file mode 100644
index 0000000..6a47130
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/fft_util.h
@@ -0,0 +1,34 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
+
+#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Prepares and FFT for the given input size.
+int FftPopulateState(struct FftState* state, size_t input_size);
+
+// Frees any allocated buffers.
+void FftFreeStateContents(struct FftState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/filterbank.c b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/filterbank.c
new file mode 100644
index 0000000..80f8738
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/filterbank.c
@@ -0,0 +1,134 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
+
+#include
+
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
+
+void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state,
+ struct complex_int16_t* fft_output,
+ int32_t* energy) {
+ const int end_index = state->end_index;
+ int i;
+ energy += state->start_index;
+ fft_output += state->start_index;
+ for (i = state->start_index; i < end_index; ++i) {
+ const int32_t real = fft_output->real;
+ const int32_t imag = fft_output->imag;
+ fft_output++;
+ const uint32_t mag_squared = (real * real) + (imag * imag);
+ *energy++ = mag_squared;
+ }
+}
+
+void FilterbankAccumulateChannels(struct FilterbankState* state,
+ const int32_t* energy) {
+ uint64_t* work = state->work;
+ uint64_t weight_accumulator = 0;
+ uint64_t unweight_accumulator = 0;
+
+ const int16_t* channel_frequency_starts = state->channel_frequency_starts;
+ const int16_t* channel_weight_starts = state->channel_weight_starts;
+ const int16_t* channel_widths = state->channel_widths;
+
+ int num_channels_plus_1 = state->num_channels + 1;
+ int i;
+ for (i = 0; i < num_channels_plus_1; ++i) {
+ const int32_t* magnitudes = energy + *channel_frequency_starts++;
+ const int16_t* weights = state->weights + *channel_weight_starts;
+ const int16_t* unweights = state->unweights + *channel_weight_starts++;
+ const int width = *channel_widths++;
+ int j;
+ for (j = 0; j < width; ++j) {
+ weight_accumulator += *weights++ * ((uint64_t)*magnitudes);
+ unweight_accumulator += *unweights++ * ((uint64_t)*magnitudes);
+ ++magnitudes;
+ }
+ *work++ = weight_accumulator;
+ weight_accumulator = unweight_accumulator;
+ unweight_accumulator = 0;
+ }
+}
+
+static uint16_t Sqrt32(uint32_t num) {
+ if (num == 0) {
+ return 0;
+ }
+ uint32_t res = 0;
+ int max_bit_number = 32 - MostSignificantBit32(num);
+ max_bit_number |= 1;
+ uint32_t bit = 1U << (31 - max_bit_number);
+ int iterations = (31 - max_bit_number) / 2 + 1;
+ while (iterations--) {
+ if (num >= res + bit) {
+ num -= res + bit;
+ res = (res >> 1U) + bit;
+ } else {
+ res >>= 1U;
+ }
+ bit >>= 2U;
+ }
+ // Do rounding - if we have the bits.
+ if (num > res && res != 0xFFFF) {
+ ++res;
+ }
+ return res;
+}
+
+static uint32_t Sqrt64(uint64_t num) {
+ // Take a shortcut and just use 32 bit operations if the upper word is all
+ // clear. This will cause a slight off by one issue for numbers close to 2^32,
+ // but it probably isn't going to matter (and gives us a big performance win).
+ if ((num >> 32) == 0) {
+ return Sqrt32((uint32_t)num);
+ }
+ uint64_t res = 0;
+ int max_bit_number = 64 - MostSignificantBit64(num);
+ max_bit_number |= 1;
+ uint64_t bit = 1ULL << (63 - max_bit_number);
+ int iterations = (63 - max_bit_number) / 2 + 1;
+ while (iterations--) {
+ if (num >= res + bit) {
+ num -= res + bit;
+ res = (res >> 1U) + bit;
+ } else {
+ res >>= 1U;
+ }
+ bit >>= 2U;
+ }
+ // Do rounding - if we have the bits.
+ if (num > res && res != 0xFFFFFFFFLL) {
+ ++res;
+ }
+ return res;
+}
+
+uint32_t* FilterbankSqrt(struct FilterbankState* state, int scale_down_shift) {
+ const int num_channels = state->num_channels;
+ const uint64_t* work = state->work + 1;
+ // Reuse the work buffer since we're fine clobbering it at this point to hold
+ // the output.
+ uint32_t* output = (uint32_t*)state->work;
+ int i;
+ for (i = 0; i < num_channels; ++i) {
+ *output++ = Sqrt64(*work++) >> scale_down_shift;
+ }
+ return (uint32_t*)state->work;
+}
+
+void FilterbankReset(struct FilterbankState* state) {
+ memset(state->work, 0, (state->num_channels + 1) * sizeof(*state->work));
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/filterbank.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/filterbank.h
new file mode 100644
index 0000000..1e6d388
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/filterbank.h
@@ -0,0 +1,63 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
+
+#include
+#include
+
+#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
+
+#define kFilterbankBits 12
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct FilterbankState {
+ int num_channels;
+ int start_index;
+ int end_index;
+ int16_t* channel_frequency_starts;
+ int16_t* channel_weight_starts;
+ int16_t* channel_widths;
+ int16_t* weights;
+ int16_t* unweights;
+ uint64_t* work;
+};
+
+// Converts the relevant complex values of an FFT output into energy (the
+// square magnitude).
+void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state,
+ struct complex_int16_t* fft_output,
+ int32_t* energy);
+
+// Computes the mel-scale filterbank on the given energy array. Output is cached
+// internally - to fetch it, you need to call FilterbankSqrt.
+void FilterbankAccumulateChannels(struct FilterbankState* state,
+ const int32_t* energy);
+
+// Applies an integer square root to the 64 bit intermediate values of the
+// filterbank, and returns a pointer to them. Memory will be invalidated the
+// next time FilterbankAccumulateChannels is called.
+uint32_t* FilterbankSqrt(struct FilterbankState* state, int scale_down_shift);
+
+void FilterbankReset(struct FilterbankState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c
new file mode 100644
index 0000000..f18ebf5
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c
@@ -0,0 +1,220 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h"
+
+#include
+#include
+#include
+
+#define kFilterbankIndexAlignment 4
+#define kFilterbankChannelBlockSize 4
+
+void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config) {
+ config->num_channels = 32;
+ config->lower_band_limit = 125.0f;
+ config->upper_band_limit = 7500.0f;
+ config->output_scale_shift = 7;
+}
+
+static float FreqToMel(float freq) { return 1127.0 * log1p(freq / 700.0); }
+
+static void CalculateCenterFrequencies(const int num_channels,
+ const float lower_frequency_limit,
+ const float upper_frequency_limit,
+ float* center_frequencies) {
+ assert(lower_frequency_limit >= 0.0f);
+ assert(upper_frequency_limit > lower_frequency_limit);
+
+ const float mel_low = FreqToMel(lower_frequency_limit);
+ const float mel_hi = FreqToMel(upper_frequency_limit);
+ const float mel_span = mel_hi - mel_low;
+ const float mel_spacing = mel_span / ((float)num_channels);
+ int i;
+ for (i = 0; i < num_channels; ++i) {
+ center_frequencies[i] = mel_low + (mel_spacing * (i + 1));
+ }
+}
+
+static void QuantizeFilterbankWeights(const float float_weight, int16_t* weight,
+ int16_t* unweight) {
+ *weight = floor(float_weight * (1 << kFilterbankBits) + 0.5);
+ *unweight = floor((1.0 - float_weight) * (1 << kFilterbankBits) + 0.5);
+}
+
+int FilterbankPopulateState(const struct FilterbankConfig* config,
+ struct FilterbankState* state, int sample_rate,
+ int spectrum_size) {
+ state->num_channels = config->num_channels;
+ const int num_channels_plus_1 = config->num_channels + 1;
+
+ // How should we align things to index counts given the byte alignment?
+ const int index_alignment =
+ (kFilterbankIndexAlignment < sizeof(int16_t)
+ ? 1
+ : kFilterbankIndexAlignment / sizeof(int16_t));
+
+ state->channel_frequency_starts =
+ malloc(num_channels_plus_1 * sizeof(*state->channel_frequency_starts));
+ state->channel_weight_starts =
+ malloc(num_channels_plus_1 * sizeof(*state->channel_weight_starts));
+ state->channel_widths =
+ malloc(num_channels_plus_1 * sizeof(*state->channel_widths));
+ state->work = malloc(num_channels_plus_1 * sizeof(*state->work));
+
+ float* center_mel_freqs =
+ malloc(num_channels_plus_1 * sizeof(*center_mel_freqs));
+ int16_t* actual_channel_starts =
+ malloc(num_channels_plus_1 * sizeof(*actual_channel_starts));
+ int16_t* actual_channel_widths =
+ malloc(num_channels_plus_1 * sizeof(*actual_channel_widths));
+
+ if (state->channel_frequency_starts == NULL ||
+ state->channel_weight_starts == NULL || state->channel_widths == NULL ||
+ center_mel_freqs == NULL || actual_channel_starts == NULL ||
+ actual_channel_widths == NULL) {
+ free(center_mel_freqs);
+ free(actual_channel_starts);
+ free(actual_channel_widths);
+ fprintf(stderr, "Failed to allocate channel buffers\n");
+ return 0;
+ }
+
+ CalculateCenterFrequencies(num_channels_plus_1, config->lower_band_limit,
+ config->upper_band_limit, center_mel_freqs);
+
+ // Always exclude DC.
+ const float hz_per_sbin = 0.5 * sample_rate / ((float)spectrum_size - 1);
+ state->start_index = 1.5 + config->lower_band_limit / hz_per_sbin;
+ state->end_index = 0; // Initialized to zero here, but actually set below.
+
+ // For each channel, we need to figure out what frequencies belong to it, and
+ // how much padding we need to add so that we can efficiently multiply the
+ // weights and unweights for accumulation. To simplify the multiplication
+ // logic, all channels will have some multiplication to do (even if there are
+ // no frequencies that accumulate to that channel) - they will be directed to
+ // a set of zero weights.
+ int chan_freq_index_start = state->start_index;
+ int weight_index_start = 0;
+ int needs_zeros = 0;
+
+ int chan;
+ for (chan = 0; chan < num_channels_plus_1; ++chan) {
+ // Keep jumping frequencies until we overshoot the bound on this channel.
+ int freq_index = chan_freq_index_start;
+ while (FreqToMel((freq_index)*hz_per_sbin) <= center_mel_freqs[chan]) {
+ ++freq_index;
+ }
+
+ const int width = freq_index - chan_freq_index_start;
+ actual_channel_starts[chan] = chan_freq_index_start;
+ actual_channel_widths[chan] = width;
+
+ if (width == 0) {
+ // This channel doesn't actually get anything from the frequencies, it's
+ // always zero. We need then to insert some 'zero' weights into the
+ // output, and just redirect this channel to do a single multiplication at
+ // this point. For simplicity, the zeros are placed at the beginning of
+ // the weights arrays, so we have to go and update all the other
+ // weight_starts to reflect this shift (but only once).
+ state->channel_frequency_starts[chan] = 0;
+ state->channel_weight_starts[chan] = 0;
+ state->channel_widths[chan] = kFilterbankChannelBlockSize;
+ if (!needs_zeros) {
+ needs_zeros = 1;
+ int j;
+ for (j = 0; j < chan; ++j) {
+ state->channel_weight_starts[j] += kFilterbankChannelBlockSize;
+ }
+ weight_index_start += kFilterbankChannelBlockSize;
+ }
+ } else {
+ // How far back do we need to go to ensure that we have the proper
+ // alignment?
+ const int aligned_start =
+ (chan_freq_index_start / index_alignment) * index_alignment;
+ const int aligned_width = (chan_freq_index_start - aligned_start + width);
+ const int padded_width =
+ (((aligned_width - 1) / kFilterbankChannelBlockSize) + 1) *
+ kFilterbankChannelBlockSize;
+
+ state->channel_frequency_starts[chan] = aligned_start;
+ state->channel_weight_starts[chan] = weight_index_start;
+ state->channel_widths[chan] = padded_width;
+ weight_index_start += padded_width;
+ }
+ chan_freq_index_start = freq_index;
+ }
+
+ // Allocate the two arrays to store the weights - weight_index_start contains
+ // the index of what would be the next set of weights that we would need to
+ // add, so that's how many weights we need to allocate.
+ state->weights = calloc(weight_index_start, sizeof(*state->weights));
+ state->unweights = calloc(weight_index_start, sizeof(*state->unweights));
+
+ // If the alloc failed, we also need to nuke the arrays.
+ if (state->weights == NULL || state->unweights == NULL) {
+ free(center_mel_freqs);
+ free(actual_channel_starts);
+ free(actual_channel_widths);
+ fprintf(stderr, "Failed to allocate weights or unweights\n");
+ return 0;
+ }
+
+ // Next pass, compute all the weights. Since everything has been memset to
+ // zero, we only need to fill in the weights that correspond to some frequency
+ // for a channel.
+ const float mel_low = FreqToMel(config->lower_band_limit);
+ for (chan = 0; chan < num_channels_plus_1; ++chan) {
+ int frequency = actual_channel_starts[chan];
+ const int num_frequencies = actual_channel_widths[chan];
+ const int frequency_offset =
+ frequency - state->channel_frequency_starts[chan];
+ const int weight_start = state->channel_weight_starts[chan];
+ const float denom_val = (chan == 0) ? mel_low : center_mel_freqs[chan - 1];
+
+ int j;
+ for (j = 0; j < num_frequencies; ++j, ++frequency) {
+ const float weight =
+ (center_mel_freqs[chan] - FreqToMel(frequency * hz_per_sbin)) /
+ (center_mel_freqs[chan] - denom_val);
+
+ // Make the float into an integer for the weights (and unweights).
+ const int weight_index = weight_start + frequency_offset + j;
+ QuantizeFilterbankWeights(weight, state->weights + weight_index,
+ state->unweights + weight_index);
+ }
+ if (frequency > state->end_index) {
+ state->end_index = frequency;
+ }
+ }
+
+ free(center_mel_freqs);
+ free(actual_channel_starts);
+ free(actual_channel_widths);
+ if (state->end_index >= spectrum_size) {
+ fprintf(stderr, "Filterbank end_index is above spectrum size.\n");
+ return 0;
+ }
+ return 1;
+}
+
+void FilterbankFreeStateContents(struct FilterbankState* state) {
+ free(state->channel_frequency_starts);
+ free(state->channel_weight_starts);
+ free(state->channel_widths);
+ free(state->weights);
+ free(state->unweights);
+ free(state->work);
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h
new file mode 100644
index 0000000..781d102
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h
@@ -0,0 +1,50 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
+
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct FilterbankConfig {
+ // number of frequency channel buckets for filterbank
+ int num_channels;
+ // maximum frequency to include
+ float upper_band_limit;
+ // minimum frequency to include
+ float lower_band_limit;
+ // unused
+ int output_scale_shift;
+};
+
+// Fills the frontendConfig with "sane" defaults.
+void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config);
+
+// Allocates any buffers.
+int FilterbankPopulateState(const struct FilterbankConfig* config,
+ struct FilterbankState* state, int sample_rate,
+ int spectrum_size);
+
+// Frees any allocated buffers.
+void FilterbankFreeStateContents(struct FilterbankState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/frontend.c b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/frontend.c
new file mode 100644
index 0000000..9de2a87
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/frontend.c
@@ -0,0 +1,72 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
+
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
+
+struct FrontendOutput FrontendProcessSamples(struct FrontendState* state,
+ const int16_t* samples,
+ size_t num_samples,
+ size_t* num_samples_read) {
+ struct FrontendOutput output;
+ output.values = NULL;
+ output.size = 0;
+
+ // Try to apply the window - if it fails, return and wait for more data.
+ if (!WindowProcessSamples(&state->window, samples, num_samples,
+ num_samples_read)) {
+ return output;
+ }
+
+ // Apply the FFT to the window's output (and scale it so that the fixed point
+ // FFT can have as much resolution as possible).
+ int input_shift =
+ 15 - MostSignificantBit32(state->window.max_abs_output_value);
+ FftCompute(&state->fft, state->window.output, input_shift);
+
+ // We can re-ruse the fft's output buffer to hold the energy.
+ int32_t* energy = (int32_t*)state->fft.output;
+
+ FilterbankConvertFftComplexToEnergy(&state->filterbank, state->fft.output,
+ energy);
+
+ FilterbankAccumulateChannels(&state->filterbank, energy);
+ uint32_t* scaled_filterbank = FilterbankSqrt(&state->filterbank, input_shift);
+
+ // Apply noise reduction.
+ NoiseReductionApply(&state->noise_reduction, scaled_filterbank);
+
+ if (state->pcan_gain_control.enable_pcan) {
+ PcanGainControlApply(&state->pcan_gain_control, scaled_filterbank);
+ }
+
+ // Apply the log and scale.
+ int correction_bits =
+ MostSignificantBit32(state->fft.fft_size) - 1 - (kFilterbankBits / 2);
+ uint16_t* logged_filterbank =
+ LogScaleApply(&state->log_scale, scaled_filterbank,
+ state->filterbank.num_channels, correction_bits);
+
+ output.size = state->filterbank.num_channels;
+ output.values = logged_filterbank;
+ return output;
+}
+
+void FrontendReset(struct FrontendState* state) {
+ WindowReset(&state->window);
+ FftReset(&state->fft);
+ FilterbankReset(&state->filterbank);
+ NoiseReductionReset(&state->noise_reduction);
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/frontend.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/frontend.h
new file mode 100644
index 0000000..883df5f
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/frontend.h
@@ -0,0 +1,64 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
+
+#include
+#include
+
+#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct FrontendState {
+ struct WindowState window;
+ struct FftState fft;
+ struct FilterbankState filterbank;
+ struct NoiseReductionState noise_reduction;
+ struct PcanGainControlState pcan_gain_control;
+ struct LogScaleState log_scale;
+};
+
+struct FrontendOutput {
+ const uint16_t* values;
+ size_t size;
+};
+
+// Main entry point to processing frontend samples. Updates num_samples_read to
+// contain the number of samples that have been consumed from the input array.
+// Returns a struct containing the generated output. If not enough samples were
+// added to generate a feature vector, the returned size will be 0 and the
+// values pointer will be NULL. Note that the output pointer will be invalidated
+// as soon as FrontendProcessSamples is called again, so copy the contents
+// elsewhere if you need to use them later.
+struct FrontendOutput FrontendProcessSamples(struct FrontendState* state,
+ const int16_t* samples,
+ size_t num_samples,
+ size_t* num_samples_read);
+
+void FrontendReset(struct FrontendState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c
new file mode 100644
index 0000000..27224f6
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c
@@ -0,0 +1,85 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
+
+#include
+#include
+
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
+
+void FrontendFillConfigWithDefaults(struct FrontendConfig* config) {
+ WindowFillConfigWithDefaults(&config->window);
+ FilterbankFillConfigWithDefaults(&config->filterbank);
+ NoiseReductionFillConfigWithDefaults(&config->noise_reduction);
+ PcanGainControlFillConfigWithDefaults(&config->pcan_gain_control);
+ LogScaleFillConfigWithDefaults(&config->log_scale);
+}
+
+int FrontendPopulateState(const struct FrontendConfig* config,
+ struct FrontendState* state, int sample_rate) {
+ memset(state, 0, sizeof(*state));
+
+ if (!WindowPopulateState(&config->window, &state->window, sample_rate)) {
+ fprintf(stderr, "Failed to populate window state\n");
+ return 0;
+ }
+
+ if (!FftPopulateState(&state->fft, state->window.size)) {
+ fprintf(stderr, "Failed to populate fft state\n");
+ return 0;
+ }
+ FftInit(&state->fft);
+
+ if (!FilterbankPopulateState(&config->filterbank, &state->filterbank,
+ sample_rate, state->fft.fft_size / 2 + 1)) {
+ fprintf(stderr, "Failed to populate filterbank state\n");
+ return 0;
+ }
+
+ if (!NoiseReductionPopulateState(&config->noise_reduction,
+ &state->noise_reduction,
+ state->filterbank.num_channels)) {
+ fprintf(stderr, "Failed to populate noise reduction state\n");
+ return 0;
+ }
+
+ int input_correction_bits =
+ MostSignificantBit32(state->fft.fft_size) - 1 - (kFilterbankBits / 2);
+ if (!PcanGainControlPopulateState(
+ &config->pcan_gain_control, &state->pcan_gain_control,
+ state->noise_reduction.estimate, state->filterbank.num_channels,
+ state->noise_reduction.smoothing_bits, input_correction_bits)) {
+ fprintf(stderr, "Failed to populate pcan gain control state\n");
+ return 0;
+ }
+
+ if (!LogScalePopulateState(&config->log_scale, &state->log_scale)) {
+ fprintf(stderr, "Failed to populate log scale state\n");
+ return 0;
+ }
+
+ FrontendReset(state);
+
+ // All good, return a true value.
+ return 1;
+}
+
+void FrontendFreeStateContents(struct FrontendState* state) {
+ WindowFreeStateContents(&state->window);
+ FftFreeStateContents(&state->fft);
+ FilterbankFreeStateContents(&state->filterbank);
+ NoiseReductionFreeStateContents(&state->noise_reduction);
+ PcanGainControlFreeStateContents(&state->pcan_gain_control);
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h
new file mode 100644
index 0000000..895ce6c
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h
@@ -0,0 +1,52 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
+
+#include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/window_util.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct FrontendConfig {
+ struct WindowConfig window;
+ struct FilterbankConfig filterbank;
+ struct NoiseReductionConfig noise_reduction;
+ struct PcanGainControlConfig pcan_gain_control;
+ struct LogScaleConfig log_scale;
+};
+
+// Fills the frontendConfig with "sane" defaults.
+void FrontendFillConfigWithDefaults(struct FrontendConfig* config);
+
+// Allocates any buffers.
+int FrontendPopulateState(const struct FrontendConfig* config,
+ struct FrontendState* state, int sample_rate);
+
+// Frees any allocated buffers.
+void FrontendFreeStateContents(struct FrontendState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_lut.c b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_lut.c
new file mode 100644
index 0000000..f59618e
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_lut.c
@@ -0,0 +1,30 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h"
+const uint16_t kLogLut[]
+#ifndef _MSC_VER
+ __attribute__((aligned(4)))
+#endif // _MSV_VER
+ = {0, 224, 442, 654, 861, 1063, 1259, 1450, 1636, 1817, 1992, 2163,
+ 2329, 2490, 2646, 2797, 2944, 3087, 3224, 3358, 3487, 3611, 3732, 3848,
+ 3960, 4068, 4172, 4272, 4368, 4460, 4549, 4633, 4714, 4791, 4864, 4934,
+ 5001, 5063, 5123, 5178, 5231, 5280, 5326, 5368, 5408, 5444, 5477, 5507,
+ 5533, 5557, 5578, 5595, 5610, 5622, 5631, 5637, 5640, 5641, 5638, 5633,
+ 5626, 5615, 5602, 5586, 5568, 5547, 5524, 5498, 5470, 5439, 5406, 5370,
+ 5332, 5291, 5249, 5203, 5156, 5106, 5054, 5000, 4944, 4885, 4825, 4762,
+ 4697, 4630, 4561, 4490, 4416, 4341, 4264, 4184, 4103, 4020, 3935, 3848,
+ 3759, 3668, 3575, 3481, 3384, 3286, 3186, 3084, 2981, 2875, 2768, 2659,
+ 2549, 2437, 2323, 2207, 2090, 1971, 1851, 1729, 1605, 1480, 1353, 1224,
+ 1094, 963, 830, 695, 559, 421, 282, 142, 0, 0};
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_lut.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_lut.h
new file mode 100644
index 0000000..b2448a3
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_lut.h
@@ -0,0 +1,40 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
+
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Number of segments in the log lookup table. The table will be kLogSegments+1
+// in length (with some padding).
+#define kLogSegments 128
+#define kLogSegmentsLog2 7
+
+// Scale used by lookup table.
+#define kLogScale 65536
+#define kLogScaleLog2 16
+#define kLogCoeff 45426
+
+extern const uint16_t kLogLut[];
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_scale.c b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_scale.c
new file mode 100644
index 0000000..c27a50a
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_scale.c
@@ -0,0 +1,83 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
+
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h"
+
+#define kuint16max 0x0000FFFF
+
+// The following functions implement integer logarithms of various sizes. The
+// approximation is calculated according to method described in
+// www.inti.gob.ar/electronicaeinformatica/instrumentacion/utic/
+// publicaciones/SPL2007/Log10-spl07.pdf
+// It first calculates log2 of the input and then converts it to natural
+// logarithm.
+
+static uint32_t Log2FractionPart(const uint32_t x, const uint32_t log2x) {
+ // Part 1
+ int32_t frac = x - (1LL << log2x);
+ if (log2x < kLogScaleLog2) {
+ frac <<= kLogScaleLog2 - log2x;
+ } else {
+ frac >>= log2x - kLogScaleLog2;
+ }
+ // Part 2
+ const uint32_t base_seg = frac >> (kLogScaleLog2 - kLogSegmentsLog2);
+ const uint32_t seg_unit =
+ (((uint32_t)1) << kLogScaleLog2) >> kLogSegmentsLog2;
+
+ const int32_t c0 = kLogLut[base_seg];
+ const int32_t c1 = kLogLut[base_seg + 1];
+ const int32_t seg_base = seg_unit * base_seg;
+ const int32_t rel_pos = ((c1 - c0) * (frac - seg_base)) >> kLogScaleLog2;
+ return frac + c0 + rel_pos;
+}
+
+static uint32_t Log(const uint32_t x, const uint32_t scale_shift) {
+ const uint32_t integer = MostSignificantBit32(x) - 1;
+ const uint32_t fraction = Log2FractionPart(x, integer);
+ const uint32_t log2 = (integer << kLogScaleLog2) + fraction;
+ const uint32_t round = kLogScale / 2;
+ const uint32_t loge = (((uint64_t)kLogCoeff) * log2 + round) >> kLogScaleLog2;
+ // Finally scale to our output scale
+ const uint32_t loge_scaled = ((loge << scale_shift) + round) >> kLogScaleLog2;
+ return loge_scaled;
+}
+
+uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal,
+ int signal_size, int correction_bits) {
+ const int scale_shift = state->scale_shift;
+ uint16_t* output = (uint16_t*)signal;
+ uint16_t* ret = output;
+ int i;
+ for (i = 0; i < signal_size; ++i) {
+ uint32_t value = *signal++;
+ if (state->enable_log) {
+ if (correction_bits < 0) {
+ value >>= -correction_bits;
+ } else {
+ value <<= correction_bits;
+ }
+ if (value > 1) {
+ value = Log(value, scale_shift);
+ } else {
+ value = 0;
+ }
+ }
+ *output++ = (value < kuint16max) ? value : kuint16max;
+ }
+ return ret;
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_scale.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_scale.h
new file mode 100644
index 0000000..a383f32
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_scale.h
@@ -0,0 +1,39 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
+
+#include
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct LogScaleState {
+ int enable_log;
+ int scale_shift;
+};
+
+// Applies a fixed point logarithm to the signal and converts it to 16 bit. Note
+// that the signal array will be modified.
+uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal,
+ int signal_size, int correction_bits);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c
new file mode 100644
index 0000000..0e3dd1d
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c
@@ -0,0 +1,27 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
+
+void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config) {
+ config->enable_log = 1;
+ config->scale_shift = 6;
+}
+
+int LogScalePopulateState(const struct LogScaleConfig* config,
+ struct LogScaleState* state) {
+ state->enable_log = config->enable_log;
+ state->scale_shift = config->scale_shift;
+ return 1;
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h
new file mode 100644
index 0000000..11f7d9e
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h
@@ -0,0 +1,45 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
+
+#include
+#include
+
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct LogScaleConfig {
+ // set to false (0) to disable this module
+ int enable_log;
+ // scale results by 2^(scale_shift)
+ int scale_shift;
+};
+
+// Populates the LogScaleConfig with "sane" default values.
+void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config);
+
+// Allocates any buffers.
+int LogScalePopulateState(const struct LogScaleConfig* config,
+ struct LogScaleState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c
new file mode 100644
index 0000000..16b30e6
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c
@@ -0,0 +1,51 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
+
+#include
+
+void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal) {
+ int i;
+ for (i = 0; i < state->num_channels; ++i) {
+ const uint32_t smoothing =
+ ((i & 1) == 0) ? state->even_smoothing : state->odd_smoothing;
+ const uint32_t one_minus_smoothing = (1 << kNoiseReductionBits) - smoothing;
+
+ // Update the estimate of the noise.
+ const uint32_t signal_scaled_up = signal[i] << state->smoothing_bits;
+ uint32_t estimate =
+ (((uint64_t)signal_scaled_up * smoothing) +
+ ((uint64_t)state->estimate[i] * one_minus_smoothing)) >>
+ kNoiseReductionBits;
+ state->estimate[i] = estimate;
+
+ // Make sure that we can't get a negative value for the signal - estimate.
+ if (estimate > signal_scaled_up) {
+ estimate = signal_scaled_up;
+ }
+
+ const uint32_t floor =
+ ((uint64_t)signal[i] * state->min_signal_remaining) >>
+ kNoiseReductionBits;
+ const uint32_t subtracted =
+ (signal_scaled_up - estimate) >> state->smoothing_bits;
+ const uint32_t output = subtracted > floor ? subtracted : floor;
+ signal[i] = output;
+ }
+}
+
+void NoiseReductionReset(struct NoiseReductionState* state) {
+ memset(state->estimate, 0, sizeof(*state->estimate) * state->num_channels);
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h
new file mode 100644
index 0000000..46d3f52
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h
@@ -0,0 +1,46 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
+
+#define kNoiseReductionBits 14
+
+#include
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct NoiseReductionState {
+ int smoothing_bits;
+ uint16_t even_smoothing;
+ uint16_t odd_smoothing;
+ uint16_t min_signal_remaining;
+ int num_channels;
+ uint32_t* estimate;
+};
+
+// Removes stationary noise from each channel of the signal using a low pass
+// filter.
+void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal);
+
+void NoiseReductionReset(struct NoiseReductionState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c
new file mode 100644
index 0000000..a6c9234
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c
@@ -0,0 +1,45 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
+
+#include
+
+void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config) {
+ config->smoothing_bits = 10;
+ config->even_smoothing = 0.025;
+ config->odd_smoothing = 0.06;
+ config->min_signal_remaining = 0.05;
+}
+
+int NoiseReductionPopulateState(const struct NoiseReductionConfig* config,
+ struct NoiseReductionState* state,
+ int num_channels) {
+ state->smoothing_bits = config->smoothing_bits;
+ state->odd_smoothing = config->odd_smoothing * (1 << kNoiseReductionBits);
+ state->even_smoothing = config->even_smoothing * (1 << kNoiseReductionBits);
+ state->min_signal_remaining =
+ config->min_signal_remaining * (1 << kNoiseReductionBits);
+ state->num_channels = num_channels;
+ state->estimate = calloc(state->num_channels, sizeof(*state->estimate));
+ if (state->estimate == NULL) {
+ fprintf(stderr, "Failed to alloc estimate buffer\n");
+ return 0;
+ }
+ return 1;
+}
+
+void NoiseReductionFreeStateContents(struct NoiseReductionState* state) {
+ free(state->estimate);
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h
new file mode 100644
index 0000000..fa55539
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h
@@ -0,0 +1,50 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
+
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct NoiseReductionConfig {
+ // scale the signal up by 2^(smoothing_bits) before reduction
+ int smoothing_bits;
+ // smoothing coefficient for even-numbered channels
+ float even_smoothing;
+ // smoothing coefficient for odd-numbered channels
+ float odd_smoothing;
+ // fraction of signal to preserve (1.0 disables this module)
+ float min_signal_remaining;
+};
+
+// Populates the NoiseReductionConfig with "sane" default values.
+void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config);
+
+// Allocates any buffers.
+int NoiseReductionPopulateState(const struct NoiseReductionConfig* config,
+ struct NoiseReductionState* state,
+ int num_channels);
+
+// Frees any allocated buffers.
+void NoiseReductionFreeStateContents(struct NoiseReductionState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c
new file mode 100644
index 0000000..22d5876
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c
@@ -0,0 +1,56 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
+
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
+
+int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut) {
+ if (x <= 2) {
+ return lut[x];
+ }
+
+ const int16_t interval = MostSignificantBit32(x);
+ lut += 4 * interval - 6;
+
+ const int16_t frac =
+ ((interval < 11) ? (x << (11 - interval)) : (x >> (interval - 11))) &
+ 0x3FF;
+
+ int32_t result = ((int32_t)lut[2] * frac) >> 5;
+ result += (int32_t)((uint32_t)lut[1] << 5);
+ result *= frac;
+ result = (result + (1 << 14)) >> 15;
+ result += lut[0];
+ return (int16_t)result;
+}
+
+uint32_t PcanShrink(const uint32_t x) {
+ if (x < (2 << kPcanSnrBits)) {
+ return (x * x) >> (2 + 2 * kPcanSnrBits - kPcanOutputBits);
+ } else {
+ return (x >> (kPcanSnrBits - kPcanOutputBits)) - (1 << kPcanOutputBits);
+ }
+}
+
+void PcanGainControlApply(struct PcanGainControlState* state,
+ uint32_t* signal) {
+ int i;
+ for (i = 0; i < state->num_channels; ++i) {
+ const uint32_t gain =
+ WideDynamicFunction(state->noise_estimate[i], state->gain_lut);
+ const uint32_t snr = ((uint64_t)signal[i] * gain) >> state->snr_shift;
+ signal[i] = PcanShrink(snr);
+ }
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h
new file mode 100644
index 0000000..3f6222b
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h
@@ -0,0 +1,47 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
+
+#include
+#include
+
+#define kPcanSnrBits 12
+#define kPcanOutputBits 6
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Details at https://research.google/pubs/pub45911.pdf
+struct PcanGainControlState {
+ int enable_pcan;
+ uint32_t* noise_estimate;
+ int num_channels;
+ int16_t* gain_lut;
+ int32_t snr_shift;
+};
+
+int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut);
+
+uint32_t PcanShrink(const uint32_t x);
+
+void PcanGainControlApply(struct PcanGainControlState* state, uint32_t* signal);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c
new file mode 100644
index 0000000..e850d43
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c
@@ -0,0 +1,92 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h"
+
+#include
+#include
+
+#define kint16max 0x00007FFF
+
+void PcanGainControlFillConfigWithDefaults(
+ struct PcanGainControlConfig* config) {
+ config->enable_pcan = 0;
+ config->strength = 0.95;
+ config->offset = 80.0;
+ config->gain_bits = 21;
+}
+
+int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config,
+ int32_t input_bits, uint32_t x) {
+ const float x_as_float = ((float)x) / ((uint32_t)1 << input_bits);
+ const float gain_as_float =
+ ((uint32_t)1 << config->gain_bits) *
+ powf(x_as_float + config->offset, -config->strength);
+
+ if (gain_as_float > kint16max) {
+ return kint16max;
+ }
+ return (int16_t)(gain_as_float + 0.5f);
+}
+
+int PcanGainControlPopulateState(const struct PcanGainControlConfig* config,
+ struct PcanGainControlState* state,
+ uint32_t* noise_estimate,
+ const int num_channels,
+ const uint16_t smoothing_bits,
+ const int32_t input_correction_bits) {
+ state->enable_pcan = config->enable_pcan;
+ if (!state->enable_pcan) {
+ return 1;
+ }
+ state->noise_estimate = noise_estimate;
+ state->num_channels = num_channels;
+ state->gain_lut = malloc(kWideDynamicFunctionLUTSize * sizeof(int16_t));
+ if (state->gain_lut == NULL) {
+ fprintf(stderr, "Failed to allocate gain LUT\n");
+ return 0;
+ }
+ state->snr_shift = config->gain_bits - input_correction_bits - kPcanSnrBits;
+
+ const int32_t input_bits = smoothing_bits - input_correction_bits;
+ state->gain_lut[0] = PcanGainLookupFunction(config, input_bits, 0);
+ state->gain_lut[1] = PcanGainLookupFunction(config, input_bits, 1);
+ state->gain_lut -= 6;
+ int interval;
+ for (interval = 2; interval <= kWideDynamicFunctionBits; ++interval) {
+ const uint32_t x0 = (uint32_t)1 << (interval - 1);
+ const uint32_t x1 = x0 + (x0 >> 1);
+ const uint32_t x2 =
+ (interval == kWideDynamicFunctionBits) ? x0 + (x0 - 1) : 2 * x0;
+
+ const int16_t y0 = PcanGainLookupFunction(config, input_bits, x0);
+ const int16_t y1 = PcanGainLookupFunction(config, input_bits, x1);
+ const int16_t y2 = PcanGainLookupFunction(config, input_bits, x2);
+
+ const int32_t diff1 = (int32_t)y1 - y0;
+ const int32_t diff2 = (int32_t)y2 - y0;
+ const int32_t a1 = 4 * diff1 - diff2;
+ const int32_t a2 = diff2 - a1;
+
+ state->gain_lut[4 * interval] = y0;
+ state->gain_lut[4 * interval + 1] = (int16_t)a1;
+ state->gain_lut[4 * interval + 2] = (int16_t)a2;
+ }
+ state->gain_lut += 6;
+ return 1;
+}
+
+void PcanGainControlFreeStateContents(struct PcanGainControlState* state) {
+ free(state->gain_lut);
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h
new file mode 100644
index 0000000..d4bfaa2
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h
@@ -0,0 +1,57 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
+
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
+
+#define kWideDynamicFunctionBits 32
+#define kWideDynamicFunctionLUTSize (4 * kWideDynamicFunctionBits - 3)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct PcanGainControlConfig {
+ // set to false (0) to disable this module
+ int enable_pcan;
+ // gain normalization exponent (0.0 disables, 1.0 full strength)
+ float strength;
+ // positive value added in the normalization denominator
+ float offset;
+ // number of fractional bits in the gain
+ int gain_bits;
+};
+
+void PcanGainControlFillConfigWithDefaults(
+ struct PcanGainControlConfig* config);
+
+int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config,
+ int32_t input_bits, uint32_t x);
+
+int PcanGainControlPopulateState(const struct PcanGainControlConfig* config,
+ struct PcanGainControlState* state,
+ uint32_t* noise_estimate,
+ const int num_channels,
+ const uint16_t smoothing_bits,
+ const int32_t input_correction_bits);
+
+void PcanGainControlFreeStateContents(struct PcanGainControlState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/window.c b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/window.c
new file mode 100644
index 0000000..10da676
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/window.c
@@ -0,0 +1,70 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
+
+#include
+
+int WindowProcessSamples(struct WindowState* state, const int16_t* samples,
+ size_t num_samples, size_t* num_samples_read) {
+ const int size = state->size;
+
+ // Copy samples from the samples buffer over to our local input.
+ size_t max_samples_to_copy = state->size - state->input_used;
+ if (max_samples_to_copy > num_samples) {
+ max_samples_to_copy = num_samples;
+ }
+ memcpy(state->input + state->input_used, samples,
+ max_samples_to_copy * sizeof(*samples));
+ *num_samples_read = max_samples_to_copy;
+ state->input_used += max_samples_to_copy;
+
+ if (state->input_used < state->size) {
+ // We don't have enough samples to compute a window.
+ return 0;
+ }
+
+ // Apply the window to the input.
+ const int16_t* coefficients = state->coefficients;
+ const int16_t* input = state->input;
+ int16_t* output = state->output;
+ int i;
+ int16_t max_abs_output_value = 0;
+ for (i = 0; i < size; ++i) {
+ int16_t new_value =
+ (((int32_t)*input++) * *coefficients++) >> kFrontendWindowBits;
+ *output++ = new_value;
+ if (new_value < 0) {
+ new_value = -new_value;
+ }
+ if (new_value > max_abs_output_value) {
+ max_abs_output_value = new_value;
+ }
+ }
+ // Shuffle the input down by the step size, and update how much we have used.
+ memmove(state->input, state->input + state->step,
+ sizeof(*state->input) * (state->size - state->step));
+ state->input_used -= state->step;
+ state->max_abs_output_value = max_abs_output_value;
+
+ // Indicate that the output buffer is valid for the next stage.
+ return 1;
+}
+
+void WindowReset(struct WindowState* state) {
+ memset(state->input, 0, state->size * sizeof(*state->input));
+ memset(state->output, 0, state->size * sizeof(*state->output));
+ state->input_used = 0;
+ state->max_abs_output_value = 0;
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/window.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/window.h
new file mode 100644
index 0000000..bad8151
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/window.h
@@ -0,0 +1,49 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
+
+#include
+#include
+
+#define kFrontendWindowBits 12
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct WindowState {
+ size_t size;
+ int16_t* coefficients;
+ size_t step;
+
+ int16_t* input;
+ size_t input_used;
+ int16_t* output;
+ int16_t max_abs_output_value;
+};
+
+// Applies a window to the samples coming in, stepping forward at the given
+// rate.
+int WindowProcessSamples(struct WindowState* state, const int16_t* samples,
+ size_t num_samples, size_t* num_samples_read);
+
+void WindowReset(struct WindowState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/window_util.c b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/window_util.c
new file mode 100644
index 0000000..eee6e7b
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/window_util.c
@@ -0,0 +1,73 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/window_util.h"
+
+#include
+#include
+#include
+#include
+
+// Some platforms don't have M_PI
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+
+void WindowFillConfigWithDefaults(struct WindowConfig* config) {
+ config->size_ms = 25;
+ config->step_size_ms = 10;
+}
+
+int WindowPopulateState(const struct WindowConfig* config,
+ struct WindowState* state, int sample_rate) {
+ state->size = config->size_ms * sample_rate / 1000;
+ state->step = config->step_size_ms * sample_rate / 1000;
+
+ state->coefficients = malloc(state->size * sizeof(*state->coefficients));
+ if (state->coefficients == NULL) {
+ fprintf(stderr, "Failed to allocate window coefficients\n");
+ return 0;
+ }
+
+ // Populate the window values.
+ const float arg = M_PI * 2.0 / ((float)state->size);
+ int i;
+ for (i = 0; i < state->size; ++i) {
+ float float_value = 0.5 - (0.5 * cos(arg * (i + 0.5)));
+ // Scale it to fixed point and round it.
+ state->coefficients[i] =
+ floor(float_value * (1 << kFrontendWindowBits) + 0.5);
+ }
+
+ state->input_used = 0;
+ state->input = malloc(state->size * sizeof(*state->input));
+ if (state->input == NULL) {
+ fprintf(stderr, "Failed to allocate window input\n");
+ return 0;
+ }
+
+ state->output = malloc(state->size * sizeof(*state->output));
+ if (state->output == NULL) {
+ fprintf(stderr, "Failed to allocate window output\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+void WindowFreeStateContents(struct WindowState* state) {
+ free(state->coefficients);
+ free(state->input);
+ free(state->output);
+}
diff --git a/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/window_util.h b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/window_util.h
new file mode 100644
index 0000000..68e4de9
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/experimental/microfrontend/lib/window_util.h
@@ -0,0 +1,45 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
+
+#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct WindowConfig {
+ // length of window frame in milliseconds
+ size_t size_ms;
+ // length of step for next frame in milliseconds
+ size_t step_size_ms;
+};
+
+// Populates the WindowConfig with "sane" default values.
+void WindowFillConfigWithDefaults(struct WindowConfig* config);
+
+// Allocates any buffers.
+int WindowPopulateState(const struct WindowConfig* config,
+ struct WindowState* state, int sample_rate);
+
+// Frees any allocated buffers.
+void WindowFreeStateContents(struct WindowState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
diff --git a/TensorflowLiteMicro/tensorflow/lite/kernels/SConscript b/TensorflowLiteMicro/tensorflow/lite/kernels/SConscript
new file mode 100644
index 0000000..2763426
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/kernels/SConscript
@@ -0,0 +1,29 @@
+from building import *
+import os
+
+cwd = GetCurrentDir()
+src = Glob('*.c') + Glob('*.cc') + Glob('internal/*.cc')
+
+#.
+root = str(Dir('#'))
+packages = os.path.join(root, 'Middlewares')
+file_list = os.listdir(packages)
+for f in file_list:
+ if(f.split('-')[0] == 'TF'):
+ tflm_pkg = os.path.join(packages, f)
+ break
+#./third_party/flatbuffer/include
+flatbuffer = os.path.join(tflm_pkg, "third_party/flatbuffers/include")
+#./third_party/gemmlowp
+gemmlowp = os.path.join(tflm_pkg, "third_party/gemmlowp")
+#./third_party/kissfft
+kissfft = os.path.join(tflm_pkg, "third_party/kissfft")
+#./third_party/ruy
+ruy = os.path.join(tflm_pkg, "third_party/ruy")
+
+
+CPPPATH = [tflm_pkg, flatbuffer, gemmlowp, kissfft, ruy]
+
+group = DefineGroup('lite/kernels', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')
diff --git a/TensorflowLiteMicro/tensorflow/lite/kernels/internal/common.h b/TensorflowLiteMicro/tensorflow/lite/kernels/internal/common.h
new file mode 100644
index 0000000..66a2d97
--- /dev/null
+++ b/TensorflowLiteMicro/tensorflow/lite/kernels/internal/common.h
@@ -0,0 +1,956 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_
+#define TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_
+
+#ifndef ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
+#ifdef GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
+#define ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
+#endif
+#endif
+
+#include
+
+#include "fixedpoint/fixedpoint.h"
+#include "tensorflow/lite/kernels/internal/cppmath.h"
+#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
+#include "tensorflow/lite/kernels/internal/types.h"
+
+namespace tflite {
+
+constexpr int kReverseShift = -1;
+
+inline void GetActivationMinMax(FusedActivationFunctionType ac,
+ float* output_activation_min,
+ float* output_activation_max) {
+ switch (ac) {
+ case FusedActivationFunctionType::kNone:
+ *output_activation_min = std::numeric_limits::lowest();
+ *output_activation_max = std::numeric_limits::max();
+ break;
+ case FusedActivationFunctionType::kRelu:
+ *output_activation_min = 0.f;
+ *output_activation_max = std::numeric_limits::max();
+ break;
+ case FusedActivationFunctionType::kRelu1:
+ *output_activation_min = -1.f;
+ *output_activation_max = 1.f;
+ break;
+ case FusedActivationFunctionType::kRelu6:
+ *output_activation_min = 0.f;
+ *output_activation_max = 6.f;
+ break;
+ }
+}
+
+template
+inline T ActivationFunctionWithMinMax(T x, T output_activation_min,
+ T output_activation_max) {
+ using std::max;
+ using std::min;
+ return min(max(x, output_activation_min), output_activation_max);
+}
+
+// Legacy function, left for compatibility only.
+template
+float ActivationFunction(float x) {
+ float output_activation_min, output_activation_max;
+ GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
+ return ActivationFunctionWithMinMax(x, output_activation_min,
+ output_activation_max);
+}
+
+inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
+ const float* bias_data, int array_size,
+ float* array_data) {
+ // Note: see b/132215220: in May 2019 we thought it would be OK to replace
+ // this with the Eigen one-liner:
+ // return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max).
+ // This turned out to severely regress performance: +4ms (i.e. 8%) on
+ // MobileNet v2 / 1.0 / 224. So we keep custom NEON code for now.
+ TFLITE_DCHECK_EQ((array_size % bias_size), 0);
+#ifdef USE_NEON
+ float* array_ptr = array_data;
+ float* array_end_ptr = array_ptr + array_size;
+ const auto clamp_min_vec = vdupq_n_f32(clamp_min);
+ const auto clamp_max_vec = vdupq_n_f32(clamp_max);
+ for (; array_ptr != array_end_ptr; array_ptr += bias_size) {
+ int i = 0;
+ for (; i <= bias_size - 16; i += 16) {
+ auto b0 = vld1q_f32(bias_data + i);
+ auto b1 = vld1q_f32(bias_data + i + 4);
+ auto b2 = vld1q_f32(bias_data + i + 8);
+ auto b3 = vld1q_f32(bias_data + i + 12);
+ auto a0 = vld1q_f32(array_ptr + i);
+ auto a1 = vld1q_f32(array_ptr + i + 4);
+ auto a2 = vld1q_f32(array_ptr + i + 8);
+ auto a3 = vld1q_f32(array_ptr + i + 12);
+ auto x0 = vaddq_f32(a0, b0);
+ auto x1 = vaddq_f32(a1, b1);
+ auto x2 = vaddq_f32(a2, b2);
+ auto x3 = vaddq_f32(a3, b3);
+ x0 = vmaxq_f32(clamp_min_vec, x0);
+ x1 = vmaxq_f32(clamp_min_vec, x1);
+ x2 = vmaxq_f32(clamp_min_vec, x2);
+ x3 = vmaxq_f32(clamp_min_vec, x3);
+ x0 = vminq_f32(clamp_max_vec, x0);
+ x1 = vminq_f32(clamp_max_vec, x1);
+ x2 = vminq_f32(clamp_max_vec, x2);
+ x3 = vminq_f32(clamp_max_vec, x3);
+ vst1q_f32(array_ptr + i, x0);
+ vst1q_f32(array_ptr + i + 4, x1);
+ vst1q_f32(array_ptr + i + 8, x2);
+ vst1q_f32(array_ptr + i + 12, x3);
+ }
+ for (; i <= bias_size - 4; i += 4) {
+ auto b = vld1q_f32(bias_data + i);
+ auto a = vld1q_f32(array_ptr + i);
+ auto x = vaddq_f32(a, b);
+ x = vmaxq_f32(clamp_min_vec, x);
+ x = vminq_f32(clamp_max_vec, x);
+ vst1q_f32(array_ptr + i, x);
+ }
+ for (; i < bias_size; i++) {
+ array_ptr[i] = ActivationFunctionWithMinMax(array_ptr[i] + bias_data[i],
+ clamp_min, clamp_max);
+ }
+ }
+#else // not NEON
+ for (int array_offset = 0; array_offset < array_size;
+ array_offset += bias_size) {
+ for (int i = 0; i < bias_size; i++) {
+ array_data[array_offset + i] = ActivationFunctionWithMinMax(
+ array_data[array_offset + i] + bias_data[i], clamp_min, clamp_max);
+ }
+ }
+#endif
+}
+
+inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
+ int32_t x, int32_t quantized_multiplier, int left_shift) {
+ using gemmlowp::RoundingDivideByPOT;
+ using gemmlowp::SaturatingRoundingDoublingHighMul;
+ return RoundingDivideByPOT(
+ SaturatingRoundingDoublingHighMul(x, quantized_multiplier), -left_shift);
+}
+
+inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne(
+ int32_t x, int32_t quantized_multiplier, int left_shift) {
+ using gemmlowp::SaturatingRoundingDoublingHighMul;
+ return SaturatingRoundingDoublingHighMul(x * (1 << left_shift),
+ quantized_multiplier);
+}
+
+inline int32_t MultiplyByQuantizedMultiplier(int32_t x,
+ int32_t quantized_multiplier,
+ int shift) {
+ using gemmlowp::RoundingDivideByPOT;
+ using gemmlowp::SaturatingRoundingDoublingHighMul;
+ int left_shift = shift > 0 ? shift : 0;
+ int right_shift = shift > 0 ? 0 : -shift;
+ return RoundingDivideByPOT(SaturatingRoundingDoublingHighMul(
+ x * (1 << left_shift), quantized_multiplier),
+ right_shift);
+}
+
+inline int32_t MultiplyByQuantizedMultiplier(int64_t x,
+ int32_t quantized_multiplier,
+ int shift) {
+ // Inputs:
+ // - quantized_multiplier has fixed point at bit 31
+ // - shift is -31 to +7 (negative for right shift)
+ //
+ // Assumptions: The following input ranges are assumed
+ // - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1)
+ // - scaling is chosen so final scaled result fits in int32_t
+ // - input x is in the range -(1<<47) <= x < (1<<47)
+ assert(quantized_multiplier >= 0);
+ assert(shift >= -31 && shift < 8);
+
+ int32_t reduced_multiplier = (quantized_multiplier + (1 << 15)) >> 16;
+ int total_shift = 15 - shift;
+ x = (x * (int64_t)reduced_multiplier) + ((int64_t)1 << (total_shift - 1));
+ int32_t result = x >> total_shift;
+ return result;
+}
+
+template
+int CountLeadingZeros(T integer_input) {
+ static_assert(std::is_unsigned::value,
+ "Only unsigned integer types handled.");
+#if defined(__GNUC__)
+ return integer_input ? __builtin_clz(integer_input)
+ : std::numeric_limits::digits;
+#else
+ if (integer_input == 0) {
+ return std::numeric_limits::digits;
+ }
+
+ const T one_in_leading_positive = static_cast(1)
+ << (std::numeric_limits::digits - 1);
+ int leading_zeros = 0;
+ while (integer_input < one_in_leading_positive) {
+ integer_input <<= 1;
+ ++leading_zeros;
+ }
+ return leading_zeros;
+#endif
+}
+
+template
+inline int CountLeadingSignBits(T integer_input) {
+ static_assert(std::is_signed::value, "Only signed integer types handled.");
+#if defined(__GNUC__) && !defined(__clang__)
+ return integer_input ? __builtin_clrsb(integer_input)
+ : std::numeric_limits::digits;
+#else
+ using U = typename std::make_unsigned::type;
+ return integer_input >= 0
+ ? CountLeadingZeros(static_cast(integer_input)) - 1
+ : integer_input != std::numeric_limits::min()
+ ? CountLeadingZeros(2 * static_cast(-integer_input) - 1)
+ : 0;
+#endif
+}
+
+// Use "count leading zeros" helper functions to do a fast Floor(log_2(x)).
+template
+inline Integer FloorLog2(Integer n) {
+ static_assert(std::is_integral::value, "");
+ static_assert(std::is_signed::value, "");
+ static_assert(sizeof(Integer) == 4 || sizeof(Integer) == 8, "");
+ TFLITE_CHECK_GT(n, 0);
+ if (sizeof(Integer) == 4) {
+ return 30 - CountLeadingSignBits(n);
+ } else {
+ return 62 - CountLeadingSignBits(n);
+ }
+}
+
+// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in
+// softmax
+inline void gen_lut(const std::function& func, double min,
+ double max, int16_t* table, const int num) {
+ // size of table should equal to num + 1
+ // last element only for slope calculation
+ double step = (max - min) / (num - 1);
+ double half_step = step / 2.0;
+ for (int i = 0; i < num - 1; i++) {
+ double sample_val = TfLiteRound(func(min + i * step) * 32768.0);
+ double midpoint_interp_val =
+ TfLiteRound((func(min + (i + 1) * step) * 32768.0 +
+ TfLiteRound(func(min + i * step) * 32768.0)) /
+ 2.0);
+ double midpoint_val =
+ TfLiteRound(func(min + i * step + half_step) * 32768.0);
+ double midpoint_err = midpoint_interp_val - midpoint_val;
+ double bias = TfLiteRound(midpoint_err / 2.0);
+ table[i] = std::min(std::max(sample_val - bias, -32768.0), 32767.0);
+ }
+ table[num - 1] =
+ std::min(std::max(TfLiteRound(func(max) * 32768.0), -32768.0), 32767.0);
+}
+
+// int16_t func table lookup, e.g., lookup exp() and 1/(1+x) used in softmax
+inline int16_t generic_int16_table_lookup(int16_t value, const int16_t* lut) {
+ // 512 base value, lut[513] only for calculate slope
+ uint16_t index = static_cast(256 + (value >> 7));
+ assert(index < 512 && "LUT index out of range.");
+ int16_t offset = value & 0x7f;
+
+ // base and slope are Q0.15
+ int16_t base = lut[index];
+ int16_t slope = lut[index + 1] - lut[index];
+
+ // Q0.15 * Q0.7 = Q0.22
+ // Round and convert from Q0.22 to Q0.15
+ int32_t delta = (static_cast(slope) * offset + 64) >> 7;
+
+ // Q0.15 + Q0.15
+ return base + delta;
+}
+
+// Table of sigmoid(i/24) at 0.16 format - 256 elements.
+
+// We use combined sigmoid and tanh look-up table, since
+// tanh(x) = 2*sigmoid(2*x) -1.
+// Both functions are symmetric, so the LUT table is only needed
+// for the absolute value of the input.
+static const uint16_t sigmoid_table_uint16[256] = {
+ 32768, 33451, 34133, 34813, 35493, 36169, 36843, 37513, 38180, 38841, 39498,
+ 40149, 40794, 41432, 42064, 42688, 43304, 43912, 44511, 45102, 45683, 46255,
+ 46817, 47369, 47911, 48443, 48964, 49475, 49975, 50464, 50942, 51409, 51865,
+ 52311, 52745, 53169, 53581, 53983, 54374, 54755, 55125, 55485, 55834, 56174,
+ 56503, 56823, 57133, 57433, 57724, 58007, 58280, 58544, 58800, 59048, 59288,
+ 59519, 59743, 59959, 60168, 60370, 60565, 60753, 60935, 61110, 61279, 61441,
+ 61599, 61750, 61896, 62036, 62172, 62302, 62428, 62549, 62666, 62778, 62886,
+ 62990, 63090, 63186, 63279, 63368, 63454, 63536, 63615, 63691, 63765, 63835,
+ 63903, 63968, 64030, 64090, 64148, 64204, 64257, 64308, 64357, 64405, 64450,
+ 64494, 64536, 64576, 64614, 64652, 64687, 64721, 64754, 64786, 64816, 64845,
+ 64873, 64900, 64926, 64950, 64974, 64997, 65019, 65039, 65060, 65079, 65097,
+ 65115, 65132, 65149, 65164, 65179, 65194, 65208, 65221, 65234, 65246, 65258,
+ 65269, 65280, 65291, 65301, 65310, 65319, 65328, 65337, 65345, 65352, 65360,
+ 65367, 65374, 65381, 65387, 65393, 65399, 65404, 65410, 65415, 65420, 65425,
+ 65429, 65433, 65438, 65442, 65445, 65449, 65453, 65456, 65459, 65462, 65465,
+ 65468, 65471, 65474, 65476, 65479, 65481, 65483, 65485, 65488, 65489, 65491,
+ 65493, 65495, 65497, 65498, 65500, 65501, 65503, 65504, 65505, 65507, 65508,
+ 65509, 65510, 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65517, 65518,
+ 65519, 65520, 65520, 65521, 65522, 65522, 65523, 65523, 65524, 65524, 65525,
+ 65525, 65526, 65526, 65526, 65527, 65527, 65528, 65528, 65528, 65529, 65529,
+ 65529, 65529, 65530, 65530, 65530, 65530, 65531, 65531, 65531, 65531, 65531,
+ 65532, 65532, 65532, 65532, 65532, 65532, 65533, 65533, 65533, 65533, 65533,
+ 65533, 65533, 65533, 65534, 65534, 65534, 65534, 65534, 65534, 65534, 65534,
+ 65534, 65534, 65535};
+
+// TODO(b/77858996): Add these to gemmlowp.
+template
+IntegerType SaturatingAddNonGemmlowp(IntegerType a, IntegerType b) {
+ static_assert(std::is_same::value, "unimplemented");
+ return a;
+}
+
+template <>
+inline std::int32_t SaturatingAddNonGemmlowp(std::int32_t a, std::int32_t b) {
+ std::int64_t a64 = a;
+ std::int64_t b64 = b;
+ std::int64_t sum = a64 + b64;
+ return static_cast(std::min(
+ static_cast(std::numeric_limits::max()),
+ std::max(
+ static_cast(std::numeric_limits::min()),
+ sum)));
+}
+
+template
+gemmlowp::FixedPoint SaturatingAddNonGemmlowp(
+ gemmlowp::FixedPoint a,
+ gemmlowp::FixedPoint b) {
+ return gemmlowp::FixedPoint::FromRaw(
+ SaturatingAddNonGemmlowp(a.raw(), b.raw()));
+}
+
+template
+IntegerType SaturatingSub(IntegerType a, IntegerType b) {
+ static_assert(std::is_same::value, "unimplemented");
+ return a;
+}
+
+template <>
+inline std::int16_t SaturatingSub(std::int16_t a, std::int16_t b) {
+ std::int32_t a32 = a;
+ std::int32_t b32 = b;
+ std::int32_t diff = a32 - b32;
+ return static_cast(
+ std::min(static_cast(32767),
+ std::max(static_cast(-32768), diff)));
+}
+
+template <>
+inline std::int32_t SaturatingSub(std::int32_t a, std::int32_t b) {
+ std::int64_t a64 = a;
+ std::int64_t b64 = b;
+ std::int64_t diff = a64 - b64;
+ return static_cast(std::min(
+ static_cast(std::numeric_limits::max()),
+ std::max(
+ static_cast(std::numeric_limits::min()),
+ diff)));
+}
+
+template
+gemmlowp::FixedPoint SaturatingSub(
+ gemmlowp::FixedPoint a,
+ gemmlowp::FixedPoint b) {
+ return gemmlowp::FixedPoint::FromRaw(
+ SaturatingSub(a.raw(), b.raw()));
+}
+// End section to be moved to gemmlowp.
+
+template
+IntegerType SaturatingRoundingMultiplyByPOTParam(IntegerType x, int exponent) {
+ if (exponent == 0) {
+ return x;
+ }
+ using ScalarIntegerType =
+ typename gemmlowp::FixedPointRawTypeTraits::ScalarRawType;
+ const IntegerType min =
+ gemmlowp::Dup(std::numeric_limits::min());
+ const IntegerType max =
+ gemmlowp::Dup(std::numeric_limits::max());
+ const int ScalarIntegerTypeBits = 8 * sizeof(ScalarIntegerType);
+
+ const std::int32_t threshold =
+ ((1 << (ScalarIntegerTypeBits - 1 - exponent)) - 1);
+ const IntegerType positive_mask =
+ gemmlowp::MaskIfGreaterThan(x, gemmlowp::Dup(threshold));
+ const IntegerType negative_mask =
+ gemmlowp::MaskIfLessThan(x, gemmlowp::Dup(-threshold));
+
+ IntegerType result = gemmlowp::ShiftLeft(x, exponent);
+ result = gemmlowp::SelectUsingMask(positive_mask, max, result);
+ result = gemmlowp::SelectUsingMask(negative_mask, min, result);
+ return result;
+}
+
+// If we want to leave IntegerBits fixed, then multiplication
+// by a power of two has to be saturating/rounding, not exact anymore.
+template
+gemmlowp::FixedPoint
+SaturatingRoundingMultiplyByPOTParam(
+ gemmlowp::FixedPoint a, int exponent) {
+ return gemmlowp::FixedPoint::FromRaw(
+ SaturatingRoundingMultiplyByPOTParam(a.raw(), exponent));
+}
+
+// Convert int32_t multiplier to int16_t with rounding.
+inline void DownScaleInt32ToInt16Multiplier(int32_t multiplier_int32_t,
+ int16_t* multiplier_int16_t) {
+ TFLITE_DCHECK_GE(multiplier_int32_t, 0);
+ static constexpr int32_t kRoundingOffset = 1 << 15;
+ if (multiplier_int32_t >=
+ std::numeric_limits::max() - kRoundingOffset) {
+ *multiplier_int16_t = std::numeric_limits::max();
+ return;
+ }
+ const int32_t result = (multiplier_int32_t + kRoundingOffset) >> 16;
+ TFLITE_DCHECK_LE(result << 16, multiplier_int32_t + kRoundingOffset);
+ TFLITE_DCHECK_GT(result << 16, multiplier_int32_t - kRoundingOffset);
+ *multiplier_int16_t = result;
+ TFLITE_DCHECK_EQ(*multiplier_int16_t, result);
+}
+
+// Minimum output bits to accommodate log of maximum input range. It actually
+// does not matter if one considers, say, [-64,64] or [-64,64).
+//
+// For example, run this through Octave:
+// [0:127; ...
+// ceil(log(abs( log(2.^(0:127))+1 ))/log(2)); ...
+// ceil(log(abs( log(2.^(0:127))+1 ))/log(2))]
+constexpr int min_log_x_output_bits(int input_bits) {
+ return input_bits > 90 ? 7
+ : input_bits > 44 ? 6
+ : input_bits > 21 ? 5
+ : input_bits > 10 ? 4
+ : input_bits > 4 ? 3
+ : input_bits > 1 ? 2
+ : 1;
+}
+
+// Although currently the name of this function says that it cannot handle
+// values less than 1, in practice it can handle as low as 1/x_max, where
+// x_max is the largest representable input. In other words, the output range
+// is symmetric.
+template
+inline gemmlowp::FixedPoint
+log_x_for_x_greater_than_or_equal_to_1_impl(
+ gemmlowp::FixedPoint input_val) {
+ // assert(__builtin_clz(0u) >= std::numeric_limits::digits - 1);
+ // assert(__builtin_clz(0u) <= std::numeric_limits::digits);
+ using FixedPoint0 = gemmlowp::FixedPoint;
+ // The reason for accumulating the result with an extra bit of headroom is
+ // that z_pow_2_adj * log_2 might be saturated, and adding num_scaled *
+ // recip_denom will otherwise introduce an error.
+ static constexpr int kAccumIntegerBits = OutputIntegerBits + 1;
+ using FixedPointAccum = gemmlowp::FixedPoint;
+
+ const FixedPoint0 log_2 = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
+ FixedPoint0, 1488522236, std::log(2.0));
+ const FixedPoint0 sqrt_sqrt_half = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
+ FixedPoint0, 1805811301, std::sqrt(std::sqrt(0.5)));
+ const FixedPoint0 sqrt_half = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
+ FixedPoint0, 1518500250, std::sqrt(0.5));
+ const FixedPoint0 one_quarter =
+ GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(FixedPoint0, 536870912, 1.0 / 4.0);
+
+ const FixedPoint0 alpha_n = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
+ FixedPoint0, 117049297, 11.0 / 240.0 * std::sqrt(std::sqrt(2.0)));
+ const FixedPoint0 alpha_d = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
+ FixedPoint0, 127690142, 1.0 / 20.0 * std::sqrt(std::sqrt(2.0)));
+ const FixedPoint0 alpha_i = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
+ FixedPoint0, 1057819769,
+ 2.0 / std::sqrt(std::sqrt(2.0)) - std::sqrt(std::sqrt(2.0)));
+ const FixedPoint0 alpha_f = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
+ FixedPoint0, 638450708, 1.0 / 4.0 * std::sqrt(std::sqrt(2.0)));
+
+ const FixedPointAccum shifted_quarter =
+ gemmlowp::Rescale(one_quarter);
+
+ // Reinterpret the input value as Q0.31, because we will figure out the
+ // required shift "ourselves" instead of using, say, Rescale.
+ FixedPoint0 z_a = FixedPoint0::FromRaw(input_val.raw());
+ // z_a_pow_2 = input_integer_bits - z_a_headroom;
+ int z_a_headroom_plus_1 = CountLeadingZeros(static_cast(z_a.raw()));
+ FixedPoint0 r_a_tmp =
+ SaturatingRoundingMultiplyByPOTParam(z_a, (z_a_headroom_plus_1 - 1));
+ const int32_t r_a_raw =
+ SaturatingRoundingMultiplyByPOTParam((r_a_tmp * sqrt_half).raw(), 1);
+ // z_pow_2_adj = max(z_pow_2_a - 0.75, z_pow_2_b - 0.25);
+ // z_pow_2_adj = max(InputIntegerBits - z_a_headroom_plus_1 + 0.25,
+ // InputIntegerBits - z_b_headroom - 0.25);
+ const FixedPointAccum z_a_pow_2_adj = SaturatingAddNonGemmlowp(
+ FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
+ InputIntegerBits - z_a_headroom_plus_1, 31 - kAccumIntegerBits)),
+ shifted_quarter);
+
+ // z_b is treated like z_a, but premultiplying by sqrt(0.5).
+ FixedPoint0 z_b = z_a * sqrt_half;
+ int z_b_headroom = CountLeadingZeros(static_cast(z_b.raw())) - 1;
+ const int32_t r_b_raw =
+ SaturatingRoundingMultiplyByPOTParam(z_a.raw(), z_b_headroom);
+ const FixedPointAccum z_b_pow_2_adj = SaturatingSub(
+ FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
+ InputIntegerBits - z_b_headroom, 31 - kAccumIntegerBits)),
+ shifted_quarter);
+
+ const FixedPoint0 r = FixedPoint0::FromRaw(std::min(r_a_raw, r_b_raw));
+ const FixedPointAccum z_pow_2_adj = FixedPointAccum::FromRaw(
+ std::max(z_a_pow_2_adj.raw(), z_b_pow_2_adj.raw()));
+
+ const FixedPoint0 p = gemmlowp::RoundingHalfSum(r, sqrt_sqrt_half);
+ FixedPoint0 q = r - sqrt_sqrt_half;
+ q = q + q;
+
+ const FixedPoint0 common_sq = q * q;
+ const FixedPoint0 num = q * r + q * common_sq * alpha_n;
+ const FixedPoint0 denom_minus_one_0 =
+ p * (alpha_i + q + alpha_d * common_sq) + alpha_f * q;
+ const FixedPoint0 recip_denom =
+ one_over_one_plus_x_for_x_in_0_1(denom_minus_one_0);
+
+ const FixedPointAccum num_scaled = gemmlowp::Rescale(num);
+ return gemmlowp::Rescale(z_pow_2_adj * log_2 +
+ num_scaled * recip_denom);
+}
+
+template
+inline gemmlowp::FixedPoint
+log_x_for_x_greater_than_or_equal_to_1(
+ gemmlowp::FixedPoint input_val) {
+ static_assert(
+ OutputIntegerBits >= min_log_x_output_bits(InputIntegerBits),
+ "Output integer bits must be sufficient to accommodate logs of inputs.");
+ return log_x_for_x_greater_than_or_equal_to_1_impl(
+ input_val);
+}
+
+inline int32_t GetReciprocal(int32_t x, int x_integer_digits,
+ int* num_bits_over_unit) {
+ int headroom_plus_one = CountLeadingZeros(static_cast(x));
+ // This is the number of bits to the left of the binary point above 1.0.
+ // Consider x=1.25. In that case shifted_scale=0.8 and
+ // no later adjustment will be needed.
+ *num_bits_over_unit = x_integer_digits - headroom_plus_one;
+ const int32_t shifted_sum_minus_one =
+ static_cast((static_cast(x) << headroom_plus_one) -
+ (static_cast(1) << 31));
+
+ gemmlowp::FixedPoint shifted_scale =
+ gemmlowp::one_over_one_plus_x_for_x_in_0_1(
+ gemmlowp::FixedPoint::FromRaw(shifted_sum_minus_one));
+ return shifted_scale.raw();
+}
+
+inline void GetInvSqrtQuantizedMultiplierExp(int32_t input, int reverse_shift,
+ int32_t* output_inv_sqrt,
+ int* output_shift) {
+ TFLITE_DCHECK_GE(input, 0);
+ if (input <= 1) {
+ // Handle the input value 1 separately to avoid overflow in that case
+ // in the general computation below (b/143972021). Also handle 0 as if it
+ // were a 1. 0 is an invalid input here (divide by zero) and 1 is a valid
+ // but rare/unrealistic input value. We can expect both to occur in some
+ // incompletely trained models, but probably not in fully trained models.
+ *output_inv_sqrt = std::numeric_limits::max();
+ *output_shift = 0;
+ return;
+ }
+ TFLITE_DCHECK_GT(input, 1);
+ *output_shift = 11;
+ while (input >= (1 << 29)) {
+ input /= 4;
+ ++*output_shift;
+ }
+ const unsigned max_left_shift_bits =
+ CountLeadingZeros(static_cast(input)) - 1;
+ const unsigned max_left_shift_bit_pairs = max_left_shift_bits / 2;
+ const unsigned left_shift_bit_pairs = max_left_shift_bit_pairs - 1;
+ *output_shift -= left_shift_bit_pairs;
+ input <<= 2 * left_shift_bit_pairs;
+ TFLITE_DCHECK_GE(input, (1 << 27));
+ TFLITE_DCHECK_LT(input, (1 << 29));
+ using gemmlowp::FixedPoint;
+ using gemmlowp::Rescale;
+ using gemmlowp::SaturatingRoundingMultiplyByPOT;
+ // Using 3 integer bits gives us enough room for the internal arithmetic in
+ // this Newton-Raphson iteration.
+ using F3 = FixedPoint;
+ using F0 = FixedPoint;
+ const F3 fixedpoint_input = F3::FromRaw(input >> 1);
+ const F3 fixedpoint_half_input =
+ SaturatingRoundingMultiplyByPOT<-1>(fixedpoint_input);
+ const F3 fixedpoint_half_three =
+ GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F3, (1 << 28) + (1 << 27), 1.5);
+ // Newton-Raphson iteration
+ // Naive unoptimized starting guess: x = 1
+ F3 x = F3::One();
+ // Naive unoptimized number of iterations: 5
+ for (int i = 0; i < 5; i++) {
+ const F3 x3 = Rescale<3>(x * x * x);
+ x = Rescale<3>(fixedpoint_half_three * x - fixedpoint_half_input * x3);
+ }
+ const F0 fixedpoint_half_sqrt_2 =
+ GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F0, 1518500250, std::sqrt(2.) / 2.);
+ x = x * fixedpoint_half_sqrt_2;
+ *output_inv_sqrt = x.raw();
+ if (*output_shift < 0) {
+ *output_inv_sqrt <<= -*output_shift;
+ *output_shift = 0;
+ }
+ // Convert right shift (right is positive) to left shift.
+ *output_shift *= reverse_shift;
+}
+
+// DO NOT USE THIS STRUCT FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING
+// BROADCASTING.
+//
+// NdArrayDesc describes the shape and memory layout of an N-dimensional
+// rectangular array of numbers.
+//
+// NdArrayDesc is basically identical to Dims defined in types.h.
+// However, as Dims