From 16e101e8fbab3e321ff05f048cf6d6dff3d8f1f1 Mon Sep 17 00:00:00 2001 From: Ryan Kuester Date: Thu, 14 Nov 2024 22:48:16 -0600 Subject: [PATCH] feat: add compression metadata flatbuffer schema and tests Add a flatbuffer schema for describing compressed models. Flatbuffers with this schema are to be used as the value in a .tflite model flatbuffer metadata field, and contain the extra information necessary to describe a compressed model. Include tests to ensure basic functionality and demonstrate integration with C++, Python, and Bazel. BUG=#2636 --- tensorflow/lite/micro/compression/BUILD | 97 +++++ .../lite/micro/compression/metadata.fbs | 56 +++ .../lite/micro/compression/metadata_saved.h | 408 ++++++++++++++++++ .../micro/compression/metadata_saved_test.sh | 39 ++ .../compression/metadata_saved_update.sh | 31 ++ .../lite/micro/compression/metadata_test.cc | 81 ++++ .../lite/micro/compression/metadata_test.py | 68 +++ .../micro/tools/ci_build/test_code_style.sh | 2 + 8 files changed, 782 insertions(+) create mode 100644 tensorflow/lite/micro/compression/BUILD create mode 100644 tensorflow/lite/micro/compression/metadata.fbs create mode 100644 tensorflow/lite/micro/compression/metadata_saved.h create mode 100755 tensorflow/lite/micro/compression/metadata_saved_test.sh create mode 100755 tensorflow/lite/micro/compression/metadata_saved_update.sh create mode 100644 tensorflow/lite/micro/compression/metadata_test.cc create mode 100644 tensorflow/lite/micro/compression/metadata_test.py diff --git a/tensorflow/lite/micro/compression/BUILD b/tensorflow/lite/micro/compression/BUILD new file mode 100644 index 00000000000..1e4746fefc3 --- /dev/null +++ b/tensorflow/lite/micro/compression/BUILD @@ -0,0 +1,97 @@ +load( + "//tensorflow/lite/micro:build_def.bzl", + "tflm_cc_library", + "tflm_cc_test", +) +load( + "@flatbuffers//:build_defs.bzl", + "flatbuffer_cc_library", + "flatbuffer_py_library", +) +load("@rules_python//python:defs.bzl", "py_test") +load("@tflm_pip_deps//:requirements.bzl", "requirement") + +package( + default_visibility = [ + "//visibility:public", + ], +) + +flatbuffer_cc_library( + # Generates the header-only library "metadata_generated.h", used to read + # the metadata flatbuffer. + name = "metadata_cc", + srcs = ["metadata.fbs"], +) + +tflm_cc_library( + # The header-only library generated by flatc in ":metadata_cc" is saved to + # the source tree and comitted to git as "metadata_saved.h", which is used + # by code which builds via the Make build system, which has no means of + # generating the header on the fly. Code which builds via both bazel and + # Make should #include the saved header and use this target in its bazel + # BUILD deps. Code built exclusively via bazel would typically depend + # directly on ":metadata_cc", which would generate a header from the schema + # on the fly, during the build. + # + # When the schema definition "metadata.fbs" is changed, this saved header + # should be updated by running the script "./metadata_saved_update.sh", + # outside of bazel (because bazel cannot modify the source tree). The + # script regenerates the header from the schema and copies it to the source + # tree as "metadata_saved.h". + # + # Comitting the generated file risks inconsistency between the schema and + # the saved header, so consistency ensured by the unit test + # ":metadata_saved_test". + # + name = "metadata_saved", + hdrs = ["metadata_saved.h"], +) + +sh_test( + # Ensures consistency bewteen the schema and the saved generated header. + # Fails if they mismatch, in which case, ./metadata_saved_update.sh should + # be run. See :metadata_saved above. + name = "metadata_saved_test", + size = "small", + srcs = ["metadata_saved_test.sh"], + args = [ + "$(location metadata_saved.h)", + "$(location :metadata_cc_srcs)", + ], + data = [ + "metadata_saved.h", + ":metadata_cc_srcs", + ], +) + +tflm_cc_test( + name = "metadata_test_cc", + size = "small", + srcs = ["metadata_test.cc"], + deps = [ + ":metadata_saved", + "//tensorflow/lite/micro:hexdump", + "//tensorflow/lite/micro/testing:micro_test", + "@flatbuffers//:runtime_cc", + ], +) + +flatbuffer_py_library( + # Generates the Python module "metadata_py_generated", used to read the + # metadata flatbuffer. + name = "metadata_py", + srcs = ["metadata.fbs"], +) + +py_test( + name = "metadata_test_py", + size = "small", + srcs = ["metadata_test.py"], + main = "metadata_test.py", + deps = [ + "metadata_py", + "@flatbuffers//:runtime_py", + requirement("hexdump"), + ], +) diff --git a/tensorflow/lite/micro/compression/metadata.fbs b/tensorflow/lite/micro/compression/metadata.fbs new file mode 100644 index 00000000000..6795f598ad9 --- /dev/null +++ b/tensorflow/lite/micro/compression/metadata.fbs @@ -0,0 +1,56 @@ +// Copyright 2024 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +namespace tflite.micro.compression; + +table Metadata { + // Compression data root, to be used in a tflite.Model.metadata field with + // the key "COMPRESSION_METADATA". + + schema_version:int = 1; + // ^ Incremented whenever there are backward-incompatible changes. Code + // should accept models with versions less than or equal the version + // for which the code is built. I.e., code should accept older models, + // but not necessarily newer ones. + + subgraphs:[Subgraph]; + // ^ Compression data indexed by subgraph index. +} + +table Subgraph { + // Per-subgraph compression metadata. + + lut_tensors:[LutTensor]; + // ^ A list of tensors which are compressed using the + // (L)ook-(U)p-(T)able method. The indices of this vector are not + // significant. +} + +table LutTensor { + // Look-Up-Table Tensor: a tensor representation where elements are + // compressed into indices into a table of values. The indices are unsigned + // integers, index_bitwidth-wide, in big-endian bit order, packed into the + // buffer identified by the corresponding tflite.Tensor's buffer field. The + // values are located in a newly-created buffer, encoded according to the + // tflite.Tensor.type. Tensors with multiple channels have distinct value + // tables for each channel, typically along their quantization axis, + // concatenated one after another. An element's index must be looked up in + // the value table corresponding to its channel. + + tensor:int; // index of the corresponding tflite.Tensor + value_buffer:uint; // index of the buffer containing LUT values + index_bitwidth:uint8; // bit-width of LUT indexes +} + +root_type Metadata; diff --git a/tensorflow/lite/micro/compression/metadata_saved.h b/tensorflow/lite/micro/compression/metadata_saved.h new file mode 100644 index 00000000000..dae00547c28 --- /dev/null +++ b/tensorflow/lite/micro/compression/metadata_saved.h @@ -0,0 +1,408 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_METADATA_TFLITE_MICRO_COMPRESSION_H_ +#define FLATBUFFERS_GENERATED_METADATA_TFLITE_MICRO_COMPRESSION_H_ + +#include "flatbuffers/flatbuffers.h" + +// Ensure the included flatbuffers.h is the same version as when this file was +// generated, otherwise it may not be compatible. +static_assert(FLATBUFFERS_VERSION_MAJOR == 23 && + FLATBUFFERS_VERSION_MINOR == 5 && + FLATBUFFERS_VERSION_REVISION == 26, + "Non-compatible flatbuffers version included"); + +namespace tflite { +namespace micro { +namespace compression { + +struct Metadata; +struct MetadataBuilder; +struct MetadataT; + +struct Subgraph; +struct SubgraphBuilder; +struct SubgraphT; + +struct LutTensor; +struct LutTensorBuilder; +struct LutTensorT; + +struct MetadataT : public ::flatbuffers::NativeTable { + typedef Metadata TableType; + int32_t schema_version = 1; + std::vector> subgraphs{}; + MetadataT() = default; + MetadataT(const MetadataT &o); + MetadataT(MetadataT&&) FLATBUFFERS_NOEXCEPT = default; + MetadataT &operator=(MetadataT o) FLATBUFFERS_NOEXCEPT; +}; + +struct Metadata FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef MetadataT NativeTableType; + typedef MetadataBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SCHEMA_VERSION = 4, + VT_SUBGRAPHS = 6 + }; + int32_t schema_version() const { + return GetField(VT_SCHEMA_VERSION, 1); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *subgraphs() const { + return GetPointer> *>(VT_SUBGRAPHS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SCHEMA_VERSION, 4) && + VerifyOffset(verifier, VT_SUBGRAPHS) && + verifier.VerifyVector(subgraphs()) && + verifier.VerifyVectorOfTables(subgraphs()) && + verifier.EndTable(); + } + MetadataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MetadataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MetadataBuilder { + typedef Metadata Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_schema_version(int32_t schema_version) { + fbb_.AddElement(Metadata::VT_SCHEMA_VERSION, schema_version, 1); + } + void add_subgraphs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> subgraphs) { + fbb_.AddOffset(Metadata::VT_SUBGRAPHS, subgraphs); + } + explicit MetadataBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateMetadata( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t schema_version = 1, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> subgraphs = 0) { + MetadataBuilder builder_(_fbb); + builder_.add_subgraphs(subgraphs); + builder_.add_schema_version(schema_version); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateMetadataDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t schema_version = 1, + const std::vector<::flatbuffers::Offset> *subgraphs = nullptr) { + auto subgraphs__ = subgraphs ? _fbb.CreateVector<::flatbuffers::Offset>(*subgraphs) : 0; + return tflite::micro::compression::CreateMetadata( + _fbb, + schema_version, + subgraphs__); +} + +::flatbuffers::Offset CreateMetadata(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SubgraphT : public ::flatbuffers::NativeTable { + typedef Subgraph TableType; + std::vector> lut_tensors{}; + SubgraphT() = default; + SubgraphT(const SubgraphT &o); + SubgraphT(SubgraphT&&) FLATBUFFERS_NOEXCEPT = default; + SubgraphT &operator=(SubgraphT o) FLATBUFFERS_NOEXCEPT; +}; + +struct Subgraph FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SubgraphT NativeTableType; + typedef SubgraphBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_LUT_TENSORS = 4 + }; + const ::flatbuffers::Vector<::flatbuffers::Offset> *lut_tensors() const { + return GetPointer> *>(VT_LUT_TENSORS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_LUT_TENSORS) && + verifier.VerifyVector(lut_tensors()) && + verifier.VerifyVectorOfTables(lut_tensors()) && + verifier.EndTable(); + } + SubgraphT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SubgraphT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SubgraphBuilder { + typedef Subgraph Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_lut_tensors(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> lut_tensors) { + fbb_.AddOffset(Subgraph::VT_LUT_TENSORS, lut_tensors); + } + explicit SubgraphBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateSubgraph( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> lut_tensors = 0) { + SubgraphBuilder builder_(_fbb); + builder_.add_lut_tensors(lut_tensors); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateSubgraphDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<::flatbuffers::Offset> *lut_tensors = nullptr) { + auto lut_tensors__ = lut_tensors ? _fbb.CreateVector<::flatbuffers::Offset>(*lut_tensors) : 0; + return tflite::micro::compression::CreateSubgraph( + _fbb, + lut_tensors__); +} + +::flatbuffers::Offset CreateSubgraph(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LutTensorT : public ::flatbuffers::NativeTable { + typedef LutTensor TableType; + int32_t tensor = 0; + uint32_t value_buffer = 0; + uint8_t index_bitwidth = 0; +}; + +struct LutTensor FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef LutTensorT NativeTableType; + typedef LutTensorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TENSOR = 4, + VT_VALUE_BUFFER = 6, + VT_INDEX_BITWIDTH = 8 + }; + int32_t tensor() const { + return GetField(VT_TENSOR, 0); + } + uint32_t value_buffer() const { + return GetField(VT_VALUE_BUFFER, 0); + } + uint8_t index_bitwidth() const { + return GetField(VT_INDEX_BITWIDTH, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TENSOR, 4) && + VerifyField(verifier, VT_VALUE_BUFFER, 4) && + VerifyField(verifier, VT_INDEX_BITWIDTH, 1) && + verifier.EndTable(); + } + LutTensorT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LutTensorT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LutTensorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LutTensorBuilder { + typedef LutTensor Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_tensor(int32_t tensor) { + fbb_.AddElement(LutTensor::VT_TENSOR, tensor, 0); + } + void add_value_buffer(uint32_t value_buffer) { + fbb_.AddElement(LutTensor::VT_VALUE_BUFFER, value_buffer, 0); + } + void add_index_bitwidth(uint8_t index_bitwidth) { + fbb_.AddElement(LutTensor::VT_INDEX_BITWIDTH, index_bitwidth, 0); + } + explicit LutTensorBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateLutTensor( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t tensor = 0, + uint32_t value_buffer = 0, + uint8_t index_bitwidth = 0) { + LutTensorBuilder builder_(_fbb); + builder_.add_value_buffer(value_buffer); + builder_.add_tensor(tensor); + builder_.add_index_bitwidth(index_bitwidth); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateLutTensor(::flatbuffers::FlatBufferBuilder &_fbb, const LutTensorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline MetadataT::MetadataT(const MetadataT &o) + : schema_version(o.schema_version) { + subgraphs.reserve(o.subgraphs.size()); + for (const auto &subgraphs_ : o.subgraphs) { subgraphs.emplace_back((subgraphs_) ? new tflite::micro::compression::SubgraphT(*subgraphs_) : nullptr); } +} + +inline MetadataT &MetadataT::operator=(MetadataT o) FLATBUFFERS_NOEXCEPT { + std::swap(schema_version, o.schema_version); + std::swap(subgraphs, o.subgraphs); + return *this; +} + +inline MetadataT *Metadata::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new MetadataT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Metadata::UnPackTo(MetadataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = schema_version(); _o->schema_version = _e; } + { auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->subgraphs[_i]) { _e->Get(_i)->UnPackTo(_o->subgraphs[_i].get(), _resolver); } else { _o->subgraphs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->subgraphs.resize(0); } } +} + +inline ::flatbuffers::Offset Metadata::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateMetadata(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateMetadata(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MetadataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _schema_version = _o->schema_version; + auto _subgraphs = _o->subgraphs.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->subgraphs.size(), [](size_t i, _VectorArgs *__va) { return CreateSubgraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher); }, &_va ) : 0; + return tflite::micro::compression::CreateMetadata( + _fbb, + _schema_version, + _subgraphs); +} + +inline SubgraphT::SubgraphT(const SubgraphT &o) { + lut_tensors.reserve(o.lut_tensors.size()); + for (const auto &lut_tensors_ : o.lut_tensors) { lut_tensors.emplace_back((lut_tensors_) ? new tflite::micro::compression::LutTensorT(*lut_tensors_) : nullptr); } +} + +inline SubgraphT &SubgraphT::operator=(SubgraphT o) FLATBUFFERS_NOEXCEPT { + std::swap(lut_tensors, o.lut_tensors); + return *this; +} + +inline SubgraphT *Subgraph::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SubgraphT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Subgraph::UnPackTo(SubgraphT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = lut_tensors(); if (_e) { _o->lut_tensors.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->lut_tensors[_i]) { _e->Get(_i)->UnPackTo(_o->lut_tensors[_i].get(), _resolver); } else { _o->lut_tensors[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->lut_tensors.resize(0); } } +} + +inline ::flatbuffers::Offset Subgraph::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSubgraph(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateSubgraph(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SubgraphT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _lut_tensors = _o->lut_tensors.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->lut_tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateLutTensor(*__va->__fbb, __va->__o->lut_tensors[i].get(), __va->__rehasher); }, &_va ) : 0; + return tflite::micro::compression::CreateSubgraph( + _fbb, + _lut_tensors); +} + +inline LutTensorT *LutTensor::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LutTensorT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void LutTensor::UnPackTo(LutTensorT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = tensor(); _o->tensor = _e; } + { auto _e = value_buffer(); _o->value_buffer = _e; } + { auto _e = index_bitwidth(); _o->index_bitwidth = _e; } +} + +inline ::flatbuffers::Offset LutTensor::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LutTensorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateLutTensor(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateLutTensor(::flatbuffers::FlatBufferBuilder &_fbb, const LutTensorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LutTensorT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _tensor = _o->tensor; + auto _value_buffer = _o->value_buffer; + auto _index_bitwidth = _o->index_bitwidth; + return tflite::micro::compression::CreateLutTensor( + _fbb, + _tensor, + _value_buffer, + _index_bitwidth); +} + +inline const tflite::micro::compression::Metadata *GetMetadata(const void *buf) { + return ::flatbuffers::GetRoot(buf); +} + +inline const tflite::micro::compression::Metadata *GetSizePrefixedMetadata(const void *buf) { + return ::flatbuffers::GetSizePrefixedRoot(buf); +} + +inline bool VerifyMetadataBuffer( + ::flatbuffers::Verifier &verifier) { + return verifier.VerifyBuffer(nullptr); +} + +inline bool VerifySizePrefixedMetadataBuffer( + ::flatbuffers::Verifier &verifier) { + return verifier.VerifySizePrefixedBuffer(nullptr); +} + +inline void FinishMetadataBuffer( + ::flatbuffers::FlatBufferBuilder &fbb, + ::flatbuffers::Offset root) { + fbb.Finish(root); +} + +inline void FinishSizePrefixedMetadataBuffer( + ::flatbuffers::FlatBufferBuilder &fbb, + ::flatbuffers::Offset root) { + fbb.FinishSizePrefixed(root); +} + +inline std::unique_ptr UnPackMetadata( + const void *buf, + const ::flatbuffers::resolver_function_t *res = nullptr) { + return std::unique_ptr(GetMetadata(buf)->UnPack(res)); +} + +inline std::unique_ptr UnPackSizePrefixedMetadata( + const void *buf, + const ::flatbuffers::resolver_function_t *res = nullptr) { + return std::unique_ptr(GetSizePrefixedMetadata(buf)->UnPack(res)); +} + +} // namespace compression +} // namespace micro +} // namespace tflite + +#endif // FLATBUFFERS_GENERATED_METADATA_TFLITE_MICRO_COMPRESSION_H_ diff --git a/tensorflow/lite/micro/compression/metadata_saved_test.sh b/tensorflow/lite/micro/compression/metadata_saved_test.sh new file mode 100755 index 00000000000..aa19503db15 --- /dev/null +++ b/tensorflow/lite/micro/compression/metadata_saved_test.sh @@ -0,0 +1,39 @@ +#!/bin/sh + +# Copyright 2024 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Ensure the consistency between the metadata flatbuffer schema and the header +# library generated from it that has been saved to git. Regenerate the header +# and compare it to the saved version. Fail the test (return 1) if they are not +# identical. See the bazel target ":metadata_saved". + +set -e + +saved=$1 +generated=$2 + +if diff -q $saved $generated +then + exit 0 +else + cat < +#include + +#include "tensorflow/lite/micro/compression/metadata_saved.h" +#include "tensorflow/lite/micro/hexdump.h" +#include "tensorflow/lite/micro/testing/micro_test.h" + +using tflite::micro::compression::LutTensor; +using tflite::micro::compression::LutTensorT; +using tflite::micro::compression::Metadata; +using tflite::micro::compression::MetadataT; +using tflite::micro::compression::Subgraph; +using tflite::micro::compression::SubgraphT; + +namespace { + +bool operator==(const LutTensorT& a, const LutTensor& b) { + return a.tensor == b.tensor() && a.value_buffer == b.value_buffer() && + a.index_bitwidth == b.index_bitwidth(); +} + +} // end anonymous namespace + +TF_LITE_MICRO_TESTS_BEGIN + +TF_LITE_MICRO_TEST(ReadTest) { + // Create these objects on the stack and copy them into the subgraph's vector, + // so they can be compared later to what is read from the flatbuffer. + LutTensorT lut_tensor0; + lut_tensor0.tensor = 63; + lut_tensor0.value_buffer = 128; + lut_tensor0.index_bitwidth = 2; + + LutTensorT lut_tensor1; + lut_tensor1.tensor = 64; + lut_tensor1.value_buffer = 129; + lut_tensor1.index_bitwidth = 4; + + auto subgraph0 = std::make_unique(); + subgraph0->lut_tensors.push_back(std::make_unique(lut_tensor0)); + subgraph0->lut_tensors.push_back(std::make_unique(lut_tensor1)); + + auto metadata = std::make_unique(); + metadata->subgraphs.push_back(std::move(subgraph0)); + + flatbuffers::FlatBufferBuilder builder; + auto root = Metadata::Pack(builder, metadata.get()); + builder.Finish(root); + const uint8_t* buffer = builder.GetBufferPointer(); + const size_t buffer_size = builder.GetSize(); + + tflite::hexdump({reinterpret_cast(buffer), buffer_size}); + std::cout << "length: " << buffer_size << "\n"; + + const Metadata* read_metadata = + tflite::micro::compression::GetMetadata(buffer); + const Subgraph* read_subgraph0 = read_metadata->subgraphs()->Get(0); + const LutTensor* read_lut_tensor0 = read_subgraph0->lut_tensors()->Get(0); + const LutTensor* read_lut_tensor1 = read_subgraph0->lut_tensors()->Get(1); + TF_LITE_MICRO_EXPECT(lut_tensor0 == *read_lut_tensor0); + TF_LITE_MICRO_EXPECT(lut_tensor1 == *read_lut_tensor1); +} + +TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/compression/metadata_test.py b/tensorflow/lite/micro/compression/metadata_test.py new file mode 100644 index 00000000000..a38ea55a831 --- /dev/null +++ b/tensorflow/lite/micro/compression/metadata_test.py @@ -0,0 +1,68 @@ +# Copyright 2024 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Test validity of the flatbuffer schema and illustrate use of the flatbuffer +# machinery with Python + +import sys +import hexdump +import flatbuffers + +# `.*_generated` is the name of the module created by the Bazel rule +# `flatbuffer_py_library' based on the schema. +from tensorflow.lite.micro.compression import metadata_py_generated as schema + + +def main(): + # The classes with a `T` suffix provide an object-oriented representation of + # the object tree in the flatbuffer using native data structures. + lut_tensor0 = schema.LutTensorT() + lut_tensor0.tensor = 63 + lut_tensor0.valueBuffer = 128 + lut_tensor0.indexBitwidth = 2 + + lut_tensor1 = schema.LutTensorT() + lut_tensor1.tensor = 64 + lut_tensor1.valueBuffer = 129 + lut_tensor1.indexBitwidth = 4 + + subgraph0 = schema.SubgraphT() + subgraph0.lutTensors = [lut_tensor0, lut_tensor1] + + metadata = schema.MetadataT() + metadata.subgraphs = [subgraph0] + + # Build the flatbuffer itself using the flatbuffers runtime module. + builder = flatbuffers.Builder(32) + root = metadata.Pack(builder) + builder.Finish(root) + buffer: bytearray = builder.Output() + + print(hexdump.hexdump(buffer, result='return')) + print(f"length: {len(buffer)}") + + read_metadata = schema.MetadataT.InitFromPackedBuf(buffer, 0) + read_subgraph0 = read_metadata.subgraphs[0] + + def attrs_equal(a, b): + return all(vars(a)[key] == vars(b)[key] for key in vars(a)) + + assert attrs_equal(read_subgraph0.lutTensors[0], lut_tensor0) + assert attrs_equal(read_subgraph0.lutTensors[1], lut_tensor1) + + sys.exit() + + +if __name__ == "__main__": + main() diff --git a/tensorflow/lite/micro/tools/ci_build/test_code_style.sh b/tensorflow/lite/micro/tools/ci_build/test_code_style.sh index d49f42938e5..a54708cfd67 100755 --- a/tensorflow/lite/micro/tools/ci_build/test_code_style.sh +++ b/tensorflow/lite/micro/tools/ci_build/test_code_style.sh @@ -46,6 +46,7 @@ tensorflow/lite/micro/tools/make/downloads/pigweed/pw_presubmit/py/pw_presubmit/ -e kernels/internal/reference/reference_ops.h \ -e python/schema_py_generated.py \ -e python_requirements.in \ + -e tensorflow/lite/micro/compression/metadata_saved.h \ -e tools/make/downloads \ -e tools/make/targets/ecm3531 \ -e BUILD\ @@ -97,6 +98,7 @@ tensorflow/lite/micro/tools/make/downloads/pigweed/pw_presubmit/py/pw_presubmit/ -e experimental \ -e schema/schema_generated.h \ -e schema/schema_utils.h \ + -e tensorflow/lite/micro/compression/metadata_saved.h \ -e tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h \ -e "\.inc" \ -e "\.md" \