Skip to content

Commit

Permalink
clean up code
Browse files Browse the repository at this point in the history
Signed-off-by: fishbell <[email protected]>
  • Loading branch information
songbell committed Dec 23, 2024
1 parent 59a00b0 commit a827247
Show file tree
Hide file tree
Showing 6 changed files with 6 additions and 49 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -223,17 +223,12 @@ struct reorder : public primitive_base<reorder> {
memory_type input_mem_type = memory_type::buffer;
/// @brief Parameters required for reorder weights.
std::shared_ptr<WeightsReorderParams> weights_reorder_params = {};
/// @brief Parameters required for source transpose.
std::vector<int> src_permutation;

inline bool has_surface_input() const {
return input.size() == 1 &&
input_mem_type == memory_type::surface;
}

void set_src_permutation(const std::vector<int> & src_perm) {
this->src_permutation = src_perm;
}

/// @brief Convert truncation Mode
bool truncate = false;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,6 @@ void compile_graph::run(program& p) {
std::string fail_reason = "";
try {
if (selected_impl_manager) {
if (node->id() == "reorder:/detect/Reshape_14_reorder")
std::cout << "break" << std::endl;
node->selected_impl = selected_impl_manager->create(*node, *params);
}
} catch (std::exception& e) {
Expand All @@ -64,8 +62,8 @@ void compile_graph::run(program& p) {
});
}
}
for (auto& iter : tasks)
task_executor->run_and_wait({iter});

task_executor->run_and_wait(tasks);
tasks.clear();

if (exception) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,6 @@ void prepare_primitive_fusing::fuse_reorders(program &p) {

program_helpers::do_for_types<reorder>(*node, [&p](reorder_node& node) {
auto& input = node.input();

// Restrictions:
// - inputs cannot be padded
// - primitives input cannot be output
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,7 @@ struct reorder_onednn : typed_primitive_onednn_impl<reorder, dnnl::reorder::prim
auto input_layout = impl_params.get_input_layout(0);
auto output_layout = impl_params.get_output_layout();

auto test_md = onednn::layout_to_memory_desc(input_layout);
auto input_md = test_md;
auto permute_order = impl_params.typed_desc<reorder>()->src_permutation;
if (permute_order.size())
input_md = test_md.permute_axes({0, 3, 1, 2});
auto input_md = onednn::layout_to_memory_desc(input_layout);
auto output_md = onednn::layout_to_memory_desc(output_layout);

OPENVINO_ASSERT(input_md.get_format_kind() != dnnl::memory::format_kind::any,
Expand Down
1 change: 0 additions & 1 deletion src/plugins/intel_gpu/src/graph/program.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -598,7 +598,6 @@ void program::pre_optimize_graph(bool is_internal) {
apply_opt_pass<prepare_buffer_fusing>();
}

apply_opt_pass<handle_reshape>();
// check if there exists some layout incompatibilities and add an reorder node if required
apply_opt_pass<add_required_reorders>();

Expand Down
34 changes: 2 additions & 32 deletions src/plugins/intel_gpu/src/graph/reorder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,22 +170,7 @@ layout reorder_inst::calc_output_layout(reorder_node const& node, kernel_impl_pa
// TODO Shouldn't transform be called every time ifmt != ofmt?
return layout(odt, ofmt, input_layout.get_tensor().transform(ofmt, 1), op);
} else {
// debug code, to fuse reorder with src permute
auto org_ps = input_layout.get_partial_shape();
auto output_shape = ov::PartialShape();
int64_t input_static_rank = org_ps.rank().get_length();
auto permute_order = desc->src_permutation;
if (permute_order.empty()) {
for (int64_t i = 0; i <= input_static_rank - 1; ++i) {
permute_order.emplace_back(i); // for compliance first
}
}

for (int64_t i = 0; i < input_static_rank; ++i) {
output_shape.push_back(org_ps[permute_order[i]]);
}
return { layout(odt, ofmt, ov::intel_gpu::tensor_from_dims(output_shape.to_shape()), desc->output_paddings[0]) };
//return layout(odt, ofmt, input_layout.get_tensor(), op);
return layout(odt, ofmt, input_layout.get_tensor(), op);
}
}

Expand All @@ -206,22 +191,7 @@ std::vector<layout> reorder_inst::calc_output_layouts(reorder_node const& /*node
#endif // ENABLE_ONEDNN_FOR_GPU
return { desc->weights_reorder_params->get_output_layout() };
} else {
// debug code, to fuse reorder with src permute
auto org_ps = input_layout.get<ShapeType>();
ShapeType output_shape;
int64_t input_static_rank = org_ps.rank().get_length();
auto permute_order = desc->src_permutation;
if (permute_order.empty()) {
for (int64_t i = 0; i <= input_static_rank - 1; ++i) {
permute_order.emplace_back(i); // for compliance first
}
}

for (int64_t i = 0; i < input_static_rank; ++i) {
output_shape.push_back(org_ps[permute_order[i]]);
}
return { layout(output_shape, desc->output_data_types[0].value(), ofmt, desc->output_paddings[0]) };
//return { layout(input_layout.get<ShapeType>(), desc->output_data_types[0].value(), ofmt, desc->output_paddings[0]) };
return { layout(input_layout.get<ShapeType>(), desc->output_data_types[0].value(), ofmt, desc->output_paddings[0]) };
}
}

Expand Down

0 comments on commit a827247

Please sign in to comment.