VLOG(1) << "MklInputConversionOp: Shape is same, but format is "
"different, "
<< "need to convert to same format";
-
- // Convert input0, and keep input1 unchanged
- // Create MklDnnShape for output mkl tensor based on input0
+ // TODO: For now, input0 is converted and input1 is unchanged
+ // we should choose the optimal MKL format to convert to.
Tensor* tensor_out;
MklDnnShape mkl_output_mkl_shape;
mkl_output_mkl_shape.SetMklTensor(true);
// with MKL tensors)
VLOG(1) << "MklInputConversionOp: Broadcast needed, "
<< "converted MKL inputs to TF format";
-
+ // TODO: Cleanup op_data_type and has_avx512f_ after these two parameters
+ // are removed from ConvertMklToTf
MklToTfOp<Device, T>::ConvertMklToTf(this, context, data_format_str,
op_data_type, has_avx512f_, kInputIndex_0);
MklToTfOp<Device, T>::ConvertMklToTf(this, context, data_format_str,
}
// Broadcast is needed if the shapes are not the same
- bool broadcast_needed;
-
- size_t in0_size = 1;
- for (size_t i = 0; i < mkl_shape->GetDimension(); ++i)
- in0_size *= mkl_shape->TfDimSize(i);
-
- size_t in1_size = 1;
- for (size_t i = 0; i < tf_tensor->shape().dims(); ++i)
- in1_size *= tf_tensor->shape().dim_size(i);
-
- broadcast_needed = (in0_size != in1_size);
-
- if (!broadcast_needed) {
+ if (mkl_shape->GetTfShape().num_elements() == tf_tensor->shape().num_elements() ) {
// Both shapes are same, convert the TF input to MKL
VLOG(1) << "MklInputConversionOp: No broadcast needed.";
VLOG(1) << "MklInputConversionOp: Converting input " << tf_tensor_index
// Create reorder between tensorflow layout and Mkl layout if necessary
std::vector<primitive> net;
- tf_input.CheckReorderToOpMem(
+ bool reordered = tf_input.CheckReorderToOpMem(
memory::primitive_desc(output_mkl_md, cpu_engine),
tensor_out, &net);
- stream(stream::kind::eager).submit(net).wait();
+ if(!reordered) {
+ // This is the case that the TF tensor has the same shape and format of
+ // mkl tensor. However, tf_tensor can not be simply forwarded to the output
+ // tensor since mkl data tensor is always one dimensional tensor.
+ // Tensor::CopyFrom shares the buffer of the other tensor while set its shape
+ // to the other tensor.
+ tensor_out->CopyFrom(*tf_tensor, tensor_out->shape());
+ }
+ else
+ stream(stream::kind::eager).submit(net).wait();
// -- The tensor in MKL format passes through --
ForwardMklTensorInToOut(context, mkl_tensor_index, mkl_tensor_index);