Skip to content

Commit

Permalink
Merge branch 'master' into r0.14
Browse files Browse the repository at this point in the history
  • Loading branch information
avijit-nervana committed May 21, 2019
2 parents 0ef8393 + ae7191c commit b59e6fe
Show file tree
Hide file tree
Showing 4 changed files with 58 additions and 3 deletions.
29 changes: 29 additions & 0 deletions src/ngraph_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2232,6 +2232,34 @@ static Status TranslateL2LossOp(
return Status::OK();
}

static Status TranslateLogSoftmaxOp(
const Node* op, const std::vector<const Tensor*>& static_input_map,
Builder::OpMap& ng_op_map) {
shared_ptr<ng::Node> ng_inp;
TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, &ng_inp));
auto inp_shape = ng_inp->get_shape();
int rank = inp_shape.size();
auto ng_axis = ng::AxisSet{rank - 1};
// Batch i, class j
// logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
// Actually implementing: logsoftmax[i, j] = logits[i, j] - max(logits[i]) -
// log(sum(exp(logits[i] - max(logits[i]))))
auto ng_max = ConstructNgNode<ng::op::Broadcast>(
op->name(), ConstructNgNode<ng::op::Max>(op->name(), ng_inp, ng_axis),
inp_shape, ng_axis);
auto ng_inp_minus_max =
ConstructNgNode<ng::op::Subtract>(op->name(), ng_inp, ng_max);
auto ng_exp = ConstructNgNode<ng::op::Exp>(op->name(), ng_inp_minus_max);
auto ng_log_sum = ConstructNgNode<ng::op::Log>(
op->name(), ConstructNgNode<ng::op::Sum>(op->name(), ng_exp, ng_axis));
auto ng_broadcast = ConstructNgNode<ng::op::Broadcast>(
op->name(), ng_log_sum, ng_inp->get_shape(), ng_axis);
auto ng_output = ConstructNgNode<ng::op::Subtract>(
op->name(), ng_inp_minus_max, ng_broadcast);
SaveNgOp(ng_op_map, op->name(), ng_output);
return Status::OK();
}

static Status TranslateMatMulOp(
const Node* op, const std::vector<const Tensor*>& static_input_map,
Builder::OpMap& ng_op_map) {
Expand Down Expand Up @@ -4525,6 +4553,7 @@ const static std::map<
{"HorovodAllreduce", TranslateAllreduceOp},
{"Identity", TranslateIdentityOp},
{"L2Loss", TranslateL2LossOp},
{"LogSoftmax", TranslateLogSoftmaxOp},
{"Less", TranslateBinaryOp<ngraph::op::Less>},
{"LessEqual", TranslateBinaryOp<ngraph::op::LessEq>},
{"Log", TranslateUnaryOp<ngraph::op::Log>},
Expand Down
2 changes: 2 additions & 0 deletions src/ngraph_mark_for_clustering.cc
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,7 @@ Status MarkForClustering(Graph* graph,
#endif
confirmation_function_map["Identity"] = SimpleConfirmationFunction();
confirmation_function_map["L2Loss"] = SimpleConfirmationFunction();
confirmation_function_map["LogSoftmax"] = SimpleConfirmationFunction();
confirmation_function_map["Less"] = SimpleConfirmationFunction();
confirmation_function_map["LessEqual"] = SimpleConfirmationFunction();
confirmation_function_map["Log"] = SimpleConfirmationFunction();
Expand Down Expand Up @@ -449,6 +450,7 @@ Status MarkForClustering(Graph* graph,
#endif
type_constraint_map["Identity"]["T"] = NGraphDTypes();
type_constraint_map["L2Loss"]["T"] = NGraphNumericDTypes();
type_constraint_map["LogSoftmax"]["T"] = NGraphRealDTypes();
type_constraint_map["Less"]["T"] = NGraphDTypes();
type_constraint_map["LessEqual"]["T"] = NGraphDTypes();
type_constraint_map["Log"]["T"] = NGraphNumericDTypes();
Expand Down
6 changes: 3 additions & 3 deletions test/python/tensorflow/python_tests_list_gpu.txt
Original file line number Diff line number Diff line change
Expand Up @@ -407,10 +407,10 @@ slice_op_test.SliceTest.testSliceOfSlice
#softmax_op_test.SoftmaxTest.test1DTensorAsInputNoReshape
#softmax_op_test.SoftmaxTest.test3DTensorAsInput
#softmax_op_test.SoftmaxTest.test3DTensorAsInputNoReshape
softmax_op_test.SoftmaxTest.testAlongFirstDimension
softmax_op_test.SoftmaxTest.testAlongSecondDimension
#softmax_op_test.SoftmaxTest.testAlongFirstDimension
#softmax_op_test.SoftmaxTest.testAlongSecondDimension
softmax_op_test.SoftmaxTest.testDimTooLarge
softmax_op_test.SoftmaxTest.testDouble
#softmax_op_test.SoftmaxTest.testDouble
softmax_op_test.SoftmaxTest.testEmptyInput
softmax_op_test.SoftmaxTest.testFloat
#softmax_op_test.SoftmaxTest.testFloatGPU
Expand Down
24 changes: 24 additions & 0 deletions test/test_nn_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1116,6 +1116,30 @@ TEST(NNOps, L2Loss) {
}
}

// Test Op :"LogSoftmax"
TEST(NNOps, LogSoftmax) {
std::vector<std::vector<int64>> input_sizes = {
{3}, {3, 2}, {5, 6}, {3, 4, 5}, {2, 3, 4, 5}};

vector<int> static_input_indexes = {};

for (auto const& input_size : input_sizes) {
Scope root = Scope::NewRootScope();

Tensor input_data(DT_FLOAT, TensorShape(input_size));
AssignInputValuesRandom<float>(input_data, -2, 2);

auto R = ops::LogSoftmax(root, input_data);
vector<DataType> output_datatypes = {DT_FLOAT};
std::vector<Output> sess_run_fetchoutputs = {R};

OpExecuter opexecuter(root, "LogSoftmax", static_input_indexes,
output_datatypes, sess_run_fetchoutputs);

opexecuter.RunTest();
}
}

// Test Op :"MaxPool3D"
TEST(NNOps, MaxPool3DNDHWCSame) {
std::vector<std::vector<int64>> input_sizes;
Expand Down

0 comments on commit b59e6fe

Please sign in to comment.