Skip to content

Commit

Permalink
Pretty print TFRT and RunHandlerWorkQueue options. Otherwise, the opt…
Browse files Browse the repository at this point in the history
…ions are shown as binary data byte-by-byte.

PiperOrigin-RevId: 481494878
  • Loading branch information
lilao authored and tensorflower-gardener committed Oct 16, 2022
1 parent 4eaf9e3 commit 80f0668
Show file tree
Hide file tree
Showing 8 changed files with 96 additions and 2 deletions.
1 change: 1 addition & 0 deletions tensorflow/compiler/mlir/tfrt/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -584,6 +584,7 @@ cc_library(
name = "tfrt_compile_options",
srcs = ["translate/tfrt_compile_options.cc"],
hdrs = ["translate/tfrt_compile_options.h"],
deps = ["@com_google_absl//absl/strings"],
)

cc_library(
Expand Down
36 changes: 36 additions & 0 deletions tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@ limitations under the License.
#include "tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.h"

#include <ostream>
#include <string>
#include <vector>

#include "absl/strings/str_join.h"

namespace tensorflow {

Expand All @@ -32,4 +36,36 @@ std::ostream& operator<<(std::ostream& os, TfrtTpuInfraTarget tpu_target) {
}
}

std::ostream& operator<<(std::ostream& os, const TfrtCompileOptions& options) {
return os << "{"
<< "variable_device = " << options.variable_device
<< ", default_device = " << options.default_device
<< ", enable_optimizer = " << options.enable_optimizer
<< ", enable_native_ops = " << options.enable_native_ops
<< ", enable_grappler = " << options.enable_grappler
<< ", force_data_format = " << options.force_data_format
<< ", tpu_target = " << options.tpu_target
<< ", tpu_fuse_ops = " << options.tpu_fuse_ops
<< ", tpu_move_resource_gather_to_host = "
<< options.tpu_move_resource_gather_to_host
<< ", tpu_gather_table_width_threshold_bytes = "
<< options.tpu_gather_table_width_threshold_bytes
<< ", use_tpu_host_allocator_for_inputs = "
<< options.use_tpu_host_allocator_for_inputs
<< ", hoist_invariant_ops = " << options.hoist_invariant_ops
<< ", enable_while_parallel_iterations = "
<< options.enable_while_parallel_iterations
<< ", auto_fusion_oplist = ["
<< absl::StrJoin(options.auto_fusion_oplist, ",") << "]"
<< ", auto_fusion_min_cluster_size = "
<< options.auto_fusion_min_cluster_size
<< ", cost_threshold = " << options.cost_threshold
<< ", upper_cost_threshold = " << options.upper_cost_threshold
<< ", merge_inter_dependent_streams = "
<< options.merge_inter_dependent_streams
<< ", decompose_resource_ops = " << options.decompose_resource_ops
<< ", compile_to_sync_tfrt_dialect = "
<< options.compile_to_sync_tfrt_dialect << "}";
}

} // namespace tensorflow
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
#define TENSORFLOW_COMPILER_MLIR_TFRT_TRANSLATE_TFRT_COMPILE_OPTIONS_H_

#include <iosfwd>
#include <ostream>
#include <string>
#include <vector>

Expand Down Expand Up @@ -131,6 +132,8 @@ struct TfrtCompileOptions {
bool compile_to_sync_tfrt_dialect = false;
};

std::ostream& operator<<(std::ostream& os, const TfrtCompileOptions& options);

} // namespace tensorflow

#endif // TENSORFLOW_COMPILER_MLIR_TFRT_TRANSLATE_TFRT_COMPILE_OPTIONS_H_
15 changes: 15 additions & 0 deletions tensorflow/core/tfrt/graph_executor/graph_execution_options.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"

#include <ostream>

#include "tensorflow/core/protobuf/rewriter_config.pb.h"
// TODO(b/200579737): using FunctionRegistry is simpler than the OSS trick.
#include "tensorflow/core/tfrt/utils/bridge_graph_analysis.h"
Expand Down Expand Up @@ -76,5 +78,18 @@ void UpdateTpuTargetByBridgeCompatibility(
LOG(INFO) << "TFRT uses TPU target " << options.compile_options.tpu_target;
}

std::ostream& operator<<(std::ostream& os,
const GraphExecutionOptions& options) {
return os << "{"
<< "run_placer_grappler_on_functions = "
<< options.run_placer_grappler_on_functions
<< ", enable_grappler_function_optimizer = "
<< options.enable_grappler_function_optimizer
<< ", enable_tfrt_gpu = " << options.enable_tfrt_gpu
<< ", runtime = " << options.runtime
<< ", model_metadata = " << options.model_metadata.DebugString()
<< ", compile_options = " << options.compile_options << "}";
}

} // namespace tfrt_stub
} // namespace tensorflow
5 changes: 5 additions & 0 deletions tensorflow/core/tfrt/graph_executor/graph_execution_options.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ limitations under the License.
#ifndef TENSORFLOW_CORE_TFRT_GRAPH_EXECUTOR_GRAPH_EXECUTION_OPTIONS_H_
#define TENSORFLOW_CORE_TFRT_GRAPH_EXECUTOR_GRAPH_EXECUTION_OPTIONS_H_

#include <ostream>

#include "absl/types/optional.h"
#include "tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.h"
#include "tensorflow/core/protobuf/config.pb.h"
Expand Down Expand Up @@ -52,6 +54,9 @@ struct GraphExecutionOptions {
tensorflow::TfrtCompileOptions compile_options;
};

std::ostream& operator<<(std::ostream& os,
const GraphExecutionOptions& options);

// Per-request options for graph execution.
struct GraphExecutionRunOptions {
absl::optional<std::chrono::system_clock::time_point> deadline;
Expand Down
1 change: 1 addition & 0 deletions tensorflow/core/tfrt/run_handler_thread_pool/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ cc_library(
":run_handler",
"//tensorflow/core/platform:strcat",
"//tensorflow/core/tfrt/runtime:work_queue_interface",
"@com_google_absl//absl/strings",
"@llvm-project//llvm:Support",
"@tf_runtime//:hostcontext",
"@tf_runtime//:support",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,14 @@ limitations under the License.
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.h"

#include <memory>
#include <ostream>
#include <utility>

#include "absl/strings/str_join.h"
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler.h"
#include "tfrt/host_context/async_dispatch.h" // from @tf_runtime
#include "tfrt/host_context/async_value.h" // from @tf_runtime
#include "tfrt/host_context/execution_context.h" // from @tf_runtime
#include "tfrt/support/error_util.h" // from @tf_runtime

namespace tfrt {
namespace tf {
Expand Down Expand Up @@ -106,5 +108,31 @@ bool RunHandlerThreadWorkQueue::IsInWorkerThread() const {
return true;
}

std::ostream& operator<<(std::ostream& strm,
const RunHandlerThreadWorkQueue::Options& options) {
return strm << "{"
<< "num_main_threads = " << options.num_main_threads
<< ", num_complementary_threads = "
<< options.num_complementary_threads
<< ", init_timeout_ms = " << options.init_timeout_ms
<< ", max_concurrent_handler = " << options.max_concurrent_handler
<< ", num_sub_thread_pool = " << options.num_sub_thread_pool
<< ", num_threads_in_sub_thread_pool = ["
<< absl::StrJoin(options.num_threads_in_sub_thread_pool, ",")
<< "]"
<< ", sub_thread_request_percentage = ["
<< absl::StrJoin(options.sub_thread_request_percentage, ",")
<< "]"
<< ", non_blocking_threads_sleep_time_micro_sec = "
<< options.non_blocking_threads_sleep_time_micro_sec
<< ", blocking_threads_max_sleep_time_micro_sec = "
<< options.blocking_threads_max_sleep_time_micro_sec
<< ", use_adaptive_waiting_time = "
<< options.use_adaptive_waiting_time
<< ", wait_if_no_active_request = "
<< options.wait_if_no_active_request
<< ", enable_wake_up = " << options.enable_wake_up << "}";
}

} // namespace tf
} // namespace tfrt
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ limitations under the License.
#define TENSORFLOW_CORE_TFRT_RUN_HANDLER_THREAD_POOL_RUN_HANDLER_CONCURRENT_WORK_QUEUE_H_

#include <memory>
#include <ostream>
#include <string>
#include <vector>

#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler.h"
Expand Down Expand Up @@ -77,7 +80,7 @@ class RunHandlerThreadWorkQueue
};

explicit RunHandlerThreadWorkQueue(const Options& options);
~RunHandlerThreadWorkQueue() override {}
~RunHandlerThreadWorkQueue() override = default;

std::string name() const override {
return tensorflow::strings::StrCat(
Expand Down Expand Up @@ -130,6 +133,8 @@ class RunHandlerThreadWorkQueue
blocking_work_queue_;
};

std::ostream& operator<<(std::ostream& strm,
const RunHandlerThreadWorkQueue::Options& options);
} // namespace tf
} // namespace tfrt

Expand Down

0 comments on commit 80f0668

Please sign in to comment.