Skip to content

Commit

Permalink
Temporary change for debugging
Browse files Browse the repository at this point in the history
Signed-off-by: Gigon Bae <[email protected]>
  • Loading branch information
gigony committed Sep 25, 2024
1 parent 3846d87 commit 0f31796
Show file tree
Hide file tree
Showing 9 changed files with 215 additions and 87 deletions.
4 changes: 4 additions & 0 deletions .vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -801,6 +801,10 @@
{
"name": "LD_LIBRARY_PATH",
"value": "${env:LD_LIBRARY_PATH}:/usr/lib/aarch64-linux-gnu/tegra/"
},
{
"name": "HOLOSCAN_LOG_LEVEL",
"value": "DEBUG"
}
],
"stopAtEntry": false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@
#include <lstm_tensor_rt_inference.hpp>
#include <tool_tracking_postprocessor.hpp>

#include <holoscan/operators/format_converter/format_converter.hpp>
#include <holoscan/operators/holoviz/holoviz.hpp>

#include "grpc_ops.hpp"
#include "resource_queue.hpp"

Expand All @@ -36,6 +39,70 @@ class AppCloud : public AppBase {
HOLOSCAN_LOG_INFO("===============AppCloud===============");
using namespace holoscan;

uint32_t width = 854;
uint32_t height = 480;
int64_t source_block_size = width * height * 3 * 4;
int64_t source_num_blocks = 2;

// auto bitstream_reader = make_operator<VideoReadBitstreamOp>(
// "bitstream_reader",
// from_config("bitstream_reader"),
// Arg("input_file_path", datapath_ + "/surgical_video.264"),
// make_condition<CountCondition>(750),
// make_condition<PeriodicCondition>("periodic-condition",
// Arg("recess_period") = std::string("25hz")),
// Arg("pool") =
// make_resource<BlockMemoryPool>(
// "pool", 0, source_block_size, source_num_blocks));

// auto response_condition =
// make_condition<AsynchronousCondition>("response_condition");
// auto video_decoder_context = make_resource<VideoDecoderContext>(
// "decoder-context", Arg("async_scheduling_term") = response_condition);

// auto request_condition =
// make_condition<AsynchronousCondition>("request_condition");
// auto video_decoder_request = make_operator<VideoDecoderRequestOp>(
// "video_decoder_request",
// from_config("video_decoder_request"),
// // make_condition<CountCondition>(30),
// Arg("async_scheduling_term") = request_condition,
// Arg("videodecoder_context") = video_decoder_context);

// auto video_decoder_response = make_operator<VideoDecoderResponseOp>(
// "video_decoder_response",
// from_config("video_decoder_response"),
// // make_condition<CountCondition>(30),
// Arg("pool") =
// make_resource<BlockMemoryPool>(
// "pool", 1, source_block_size, source_num_blocks),
// Arg("videodecoder_context") = video_decoder_context);

// auto decoder_output_format_converter =
// make_operator<ops::FormatConverterOp>("decoder_output_format_converter",
// from_config("decoder_output_format_converter"),
// Arg("pool") = make_resource<BlockMemoryPool>(
// "pool", 1, source_block_size, source_num_blocks));

std::shared_ptr<BlockMemoryPool> visualizer_allocator =
make_resource<BlockMemoryPool>(
"allocator", 1, source_block_size, source_num_blocks);
auto visualizer = make_operator<ops::HolovizOp>("holoviz",
from_config("holoviz"),
Arg("width") = width,
Arg("height") = height,
Arg("enable_render_buffer_input") = false,
Arg("enable_render_buffer_output") = false,
Arg("allocator") = visualizer_allocator);

// add_flow(bitstream_reader, video_decoder_request,
// {{"output_transmitter", "input_frame"}});
// add_flow(video_decoder_response, decoder_output_format_converter,
// {{"output_transmitter", "source_video"}});
// add_flow(decoder_output_format_converter, visualizer,
// {{"tensor", "receivers"}});


auto request_available_condition =
make_condition<AsynchronousCondition>("request_available_condition");
request_queue_ =
Expand All @@ -46,6 +113,8 @@ class AppCloud : public AppBase {
auto grpc_request_op = make_operator<GrpcServerRequestOp>(
"grpc_request_op",
Arg("server_address") = std::string("0.0.0.0:50051"),
// make_condition<PeriodicCondition>("periodic-condition",
// Arg("recess_period") = std::string("60hz")),
Arg("request_queue") = request_queue_,
Arg("response_queue") = response_queue_,
Arg("condition") = request_available_condition,
Expand Down Expand Up @@ -95,20 +164,38 @@ class AppCloud : public AppBase {
Arg("device_allocator") = make_resource<UnboundedAllocator>("device_allocator"),
Arg("host_allocator") = make_resource<UnboundedAllocator>("host_allocator"));

auto grpc_response = make_operator<GrpcServerResponseOp>(
"grpc_response", Arg("response_queue") = response_queue_);
// auto grpc_response = make_operator<GrpcServerResponseOp>(
// "grpc_response", Arg("response_queue") = response_queue_);

add_flow(grpc_request_op, video_decoder_request, {{"output", "input_frame"}});
// add_flow(bitstream_reader, video_decoder_request,
// {{"output_transmitter", "input_frame"}});


add_flow(video_decoder_response,
decoder_output_format_converter,
{{"output_transmitter", "source_video"}});
add_flow(
decoder_output_format_converter, rgb_float_format_converter, {{"tensor", "source_video"}});
add_flow(rgb_float_format_converter, lstm_inferer);
add_flow(lstm_inferer, tool_tracking_postprocessor, {{"tensor", "in"}});
// add_flow(tool_tracking_postprocessor,
// grpc_response,
// {{"out_coords", "input"}, {"out_mask", "input"}});
// Arg("pool") = make_resource<UnboundedAllocator>("pool"));
// add_flow(tool_tracking_postprocessor,
// grpc_response,
// {{"out_coords", "input"}, {"out_mask", "input"}});

add_flow(tool_tracking_postprocessor,
grpc_response,
{{"out_coords", "input"}, {"out_mask", "input"}});
visualizer,
{{"out_coords", "receivers"}, {"out_mask", "receivers"}});
add_flow(decoder_output_format_converter, visualizer,
{{"tensor", "receivers"}});

// visualizer,
// // {{"tensor", "receivers"}});

}

private:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,14 @@ class AppEdge : public AppBase {

auto video_in = make_fragment<VideoInputFragment>(
"video_in", datapath_);
auto viz = make_fragment<VizFragment>("viz", width, height);
// auto viz = make_fragment<VizFragment>("viz", width, height);

add_flow(video_in,
viz,
{{"decoder_output_format_converter.tensor", "visualizer_op.receivers"},
{"incoming_responses.output", "visualizer_op.receivers"}});
add_fragment(video_in);

// add_flow(video_in,
// viz,
// {{"decoder_output_format_converter.tensor", "visualizer_op.receivers"},
// {"incoming_responses.output", "visualizer_op.receivers"}});

}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,18 @@ extensions:
- libgxf_videoencoderio.so
- ../../../../gxf_extensions/lstm_tensor_rt_inference/libgxf_lstm_tensor_rt_inference.so

# extensions:
# - libgxf_std.so
# - libgxf_cuda.so
# - libgxf_multimedia.so
# - libgxf_videodecoder.so
# - libgxf_videodecoderio.so

scheduler:
check_recession_period_ms: 0
worker_thread_number: 8
stop_on_deadlock: true
stop_on_deadlock_timeout: 500
stop_on_deadlock_timeout: 10000

replayer:
basename: "surgical_video"
Expand Down Expand Up @@ -125,29 +132,37 @@ holoviz:
- Irrigator
- Spec.Bag

holoviz_overlay:
headless: true
tensors:
- name: mask
type: color
opacity: 1.0
priority: 1
- name: scaled_coords
type: crosses
opacity: 1.0
line_width: 4
color: [1.0, 0.0, 0.0, 1.0]
priority: 2
- name: scaled_coords
type: text
opacity: 1.0
priority: 3
color: [1.0, 1.0, 1.0, 0.9]
text:
- Grasper
- Bipolar
- Hook
- Scissors
- Clipper
- Irrigator
- Spec.Bag
# holoviz_overlay:
# headless: true
# tensors:
# - name: mask
# type: color
# opacity: 1.0
# priority: 1
# - name: scaled_coords
# type: crosses
# opacity: 1.0
# line_width: 4
# color: [1.0, 0.0, 0.0, 1.0]
# priority: 2
# - name: scaled_coords
# type: text
# opacity: 1.0
# priority: 3
# color: [1.0, 1.0, 1.0, 0.9]
# text:
# - Grasper
# - Bipolar
# - Hook
# - Scissors
# - Clipper
# - Irrigator
# - Spec.Bag


# holoviz:
# tensors:
# - name: ""
# type: color
# opacity: 1.0
# priority: 0
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ class GrpcClientRequestOp : public holoscan::Operator {
request = request_queue_->pop();
request->set_service("endoscopy_tool_tracking");

HOLOSCAN_LOG_DEBUG("endoscopy_tool_tracking: Sending request");
HOLOSCAN_LOG_INFO("endoscopy_tool_tracking: Sending request");
return EntityClient::WRITE;
},
// Handle incoming responses
Expand All @@ -109,7 +109,7 @@ class GrpcClientRequestOp : public holoscan::Operator {
holoscan::ops::TensorProto::entity_response_to_tensor(
response, out_message.value(), gxf_allocator.value());
response_queue_->push(out_message.value());
HOLOSCAN_LOG_DEBUG("Response received and queued");
HOLOSCAN_LOG_INFO("Response received and queued");
},
// Complete the requests
[this](const grpc::Status& status) {
Expand Down Expand Up @@ -198,18 +198,23 @@ class GrpcServerRequestOp : public holoscan::Operator {

void compute(InputContext& op_input, OutputContext& op_output,
ExecutionContext& context) override {
HOLOSCAN_LOG_INFO("GrpcServerRequestOp::compute");
auto request = request_queue_->pop();
auto result = nvidia::gxf::Entity(std::move(request));
op_output.emit(result, "output");

if (request_queue_->empty()) { condition_->event_state(AsynchronousEventState::EVENT_WAITING); }
if (request_queue_->empty()) {
HOLOSCAN_LOG_INFO("GrpcServerRequestOp::compute: request_queue_ is empty");
condition_->event_state(AsynchronousEventState::EVENT_WAITING);
}
}

private:
void StartInternal() {
HoloscanEntityServiceImpl service(
// Handle incoming requests
[this](EntityRequest& request) {
HOLOSCAN_LOG_INFO("GrpcServerRequestOp::StartInternal: request received");
auto route = request.service();
if (route == "endoscopy_tool_tracking") {
auto gxf_allocator = nvidia::gxf::Handle<nvidia::gxf::Allocator>::Create(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,43 +57,44 @@ class VideoInputFragment : public holoscan::Fragment {
Arg("response_queue") = response_queue_,
Arg("allocator") = make_resource<UnboundedAllocator>("pool"));

auto incoming_responses = make_operator<GrpcClientResponseOp>(
"incoming_responses",
Arg("response_queue") = response_queue_,
Arg("allocator") = make_resource<UnboundedAllocator>("pool"),
Arg("condition") = condition_);

auto response_condition = make_condition<AsynchronousCondition>("response_condition");
auto video_decoder_context = make_resource<VideoDecoderContext>(
"decoder-context", Arg("async_scheduling_term") = response_condition);

auto request_condition = make_condition<AsynchronousCondition>("request_condition");
auto video_decoder_request =
make_operator<VideoDecoderRequestOp>("video_decoder_request",
from_config("video_decoder_request"),
request_condition,
Arg("async_scheduling_term") = request_condition,
Arg("videodecoder_context") = video_decoder_context);

auto video_decoder_response = make_operator<VideoDecoderResponseOp>(
"video_decoder_response",
from_config("video_decoder_response"),
response_condition,
Arg("pool") = make_resource<UnboundedAllocator>("pool"),
Arg("videodecoder_context") = video_decoder_context);

auto decoder_output_format_converter = make_operator<ops::FormatConverterOp>(
"decoder_output_format_converter",
from_config("decoder_output_format_converter"),
Arg("pool") = make_resource<UnboundedAllocator>("pool"));
// auto incoming_responses = make_operator<GrpcClientResponseOp>(
// "incoming_responses",
// Arg("response_queue") = response_queue_,
// Arg("allocator") = make_resource<UnboundedAllocator>("pool"),
// Arg("condition") = condition_);

// auto response_condition = make_condition<AsynchronousCondition>("response_condition");
// auto video_decoder_context = make_resource<VideoDecoderContext>(
// "decoder-context", Arg("async_scheduling_term") = response_condition);

// auto request_condition = make_condition<AsynchronousCondition>("request_condition");
// auto video_decoder_request =
// make_operator<VideoDecoderRequestOp>("video_decoder_request",
// from_config("video_decoder_request"),
// request_condition,
// Arg("async_scheduling_term") = request_condition,
// Arg("videodecoder_context") = video_decoder_context);

// auto video_decoder_response = make_operator<VideoDecoderResponseOp>(
// "video_decoder_response",
// from_config("video_decoder_response"),
// response_condition,
// Arg("pool") = make_resource<UnboundedAllocator>("pool"),
// Arg("videodecoder_context") = video_decoder_context);

// auto decoder_output_format_converter = make_operator<ops::FormatConverterOp>(
// "decoder_output_format_converter",
// from_config("decoder_output_format_converter"),
// Arg("pool") = make_resource<UnboundedAllocator>("pool"));

add_flow(bitstream_reader, outgoing_requests, {{"output_transmitter", "input"}});
add_flow(bitstream_reader, video_decoder_request, {{"output_transmitter", "input_frame"}});
add_flow(video_decoder_response,
decoder_output_format_converter,
{{"output_transmitter", "source_video"}});

add_operator(incoming_responses);
// add_flow(bitstream_reader, video_decoder_request, {{"output_transmitter", "input_frame"}});
// add_flow(video_decoder_response,
// decoder_output_format_converter,
// {{"output_transmitter", "source_video"}});

// add_operator(incoming_responses);
}

private:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,12 @@ extensions:
- libgxf_videodecoder.so
- libgxf_videodecoderio.so

scheduler:
check_recession_period_ms: 0
worker_thread_number: 8
stop_on_deadlock: true
stop_on_deadlock_timeout: 10000

bitstream_reader:
outbuf_storage_type: 0
aud_nal_present: 0
Expand Down
Loading

0 comments on commit 0f31796

Please sign in to comment.