REGISTER_GRADIENT_OP("Softmax", SoftmaxGrad);
Status LogSoftmaxGrad(const Scope& scope, const Operation& op,
- const std::vector<Output>& grad_inputs,
- std::vector<Output>* grad_outputs) {
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
auto softmax = Exp(scope, op.output(0));
auto sum = Sum(scope, grad_inputs[0], {1}, Sum::KeepDims(true));
auto mul = Mul(scope, sum, softmax);
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
string data_format;
- BiasAddGrad::Attrs input_attrs;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.output(0).node()->attrs(), "data_format", &data_format));
- input_attrs.DataFormat(data_format);
- auto dx_1 = BiasAddGrad(scope, grad_inputs[0], input_attrs);
+ auto dx_1 =
+ BiasAddGrad(scope, grad_inputs[0], BiasAddGrad::DataFormat(data_format));
grad_outputs->push_back(Identity(scope, grad_inputs[0]));
grad_outputs->push_back(dx_1);
return scope.status();
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "use_cudnn_on_gpu", &use_cudnn_on_gpu));
- Conv2DBackpropInput::Attrs input_attrs;
- input_attrs.DataFormat(data_format);
- input_attrs.UseCudnnOnGpu(use_cudnn_on_gpu);
- auto dx_1 = Conv2DBackpropInput(scope, Shape(scope, op.input(0)),
- op.input(1), grad_inputs[0],
- strides, padding, input_attrs);
+ auto dx_1 = Conv2DBackpropInput(scope, Shape(scope, op.input(0)), op.input(1),
+ grad_inputs[0], strides, padding,
+ Conv2DBackpropInput::DataFormat(data_format)
+ .UseCudnnOnGpu(use_cudnn_on_gpu));
grad_outputs->push_back(dx_1);
- Conv2DBackpropFilter::Attrs filter_attrs;
- filter_attrs.DataFormat(data_format);
- filter_attrs.UseCudnnOnGpu(use_cudnn_on_gpu);
- auto dx_2 = Conv2DBackpropFilter(scope, op.input(0),
- Shape(scope, op.input(1)), grad_inputs[0],
- strides, padding, filter_attrs);
+ auto dx_2 =
+ Conv2DBackpropFilter(scope, op.input(0), Shape(scope, op.input(1)),
+ grad_inputs[0], strides, padding,
+ Conv2DBackpropFilter::DataFormat(data_format)
+ .UseCudnnOnGpu(use_cudnn_on_gpu));
grad_outputs->push_back(dx_2);
return scope.status();
}
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &ksize));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
- internal::MaxPoolGrad::Attrs grad_attrs;
- grad_attrs.DataFormat(data_format);
- auto dx = internal::MaxPoolGrad(scope, op.input(0),
- op.output(0),
- grad_inputs[0],
- ksize, strides,
- padding, grad_attrs);
+ auto dx = internal::MaxPoolGrad(
+ scope, op.input(0), op.output(0), grad_inputs[0], ksize, strides, padding,
+ internal::MaxPoolGrad::DataFormat(data_format));
grad_outputs->push_back(dx);
return scope.status();
}
auto attrs = op.output(0).node()->attrs();
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
- MaxPoolGradV2::Attrs grad_attrs;
- grad_attrs.DataFormat(data_format);
- auto dx = MaxPoolGradV2(scope, op.input(0),
- op.output(0),
- grad_inputs[0],
- op.input(1),
- op.input(2),
- padding,
- grad_attrs);
+ auto dx = MaxPoolGradV2(scope, op.input(0), op.output(0), grad_inputs[0],
+ op.input(1), op.input(2), padding,
+ MaxPoolGradV2::DataFormat(data_format));
grad_outputs->push_back(dx);
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
Status LRNGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
- std::vector<Output>* grad_outputs){
- internal::LRNGrad::Attrs grad_attrs;
-
- auto dx = internal::LRNGrad(scope, grad_inputs[0], op.input(0), op.output(0),
- grad_attrs);
+ std::vector<Output>* grad_outputs) {
+ auto dx = internal::LRNGrad(scope, grad_inputs[0], op.input(0), op.output(0));
grad_outputs->push_back(dx);
return scope.status();
}