From 4b5308ef4698ea47eec25cf93ae09ae0c49cff8b Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Thu, 10 May 2018 11:38:04 -0700 Subject: [PATCH] Use "```" (backtick) for code blocks in adding_an_op.md (#19187) * Use "```" (backtick) for code blocks in adding_an_op.md In adding_an_op.md, most of the code blocks uses "```" (backtick) and annotations are added automatically. Though there was one place where the code block are done with manual html code. This is really error-prune and hard to change if there is an update in the future. This fix converts to "```c++" (backticks) so that it is easy to maintain in the future. Signed-off-by: Yong Tang * Fix extra `\` at the beginning of the block Signed-off-by: Yong Tang * Update adding_an_op.md add new lines where the
tags were. --- tensorflow/docs_src/extend/adding_an_op.md | 63 ++++++++++++++++-------------- 1 file changed, 34 insertions(+), 29 deletions(-) diff --git a/tensorflow/docs_src/extend/adding_an_op.md b/tensorflow/docs_src/extend/adding_an_op.md index c379549..1b028be 100644 --- a/tensorflow/docs_src/extend/adding_an_op.md +++ b/tensorflow/docs_src/extend/adding_an_op.md @@ -863,48 +863,53 @@ REGISTER_OP("ZeroOut") Instead of writing another `OpKernel` with redundant code as above, often you will be able to use a C++ template instead. You will still have one kernel registration (`REGISTER_KERNEL_BUILDER` call) per overload. -

-template <typename T>
+```c++
+template 
 class ZeroOutOp : public OpKernel {
  public:
-  explicit ZeroOutOp(OpKernelConstruction\* context) : OpKernel(context) {}
- void Compute(OpKernelContext\* context) override { + explicit ZeroOutOp(OpKernelConstruction* context) : OpKernel(context) {} + + void Compute(OpKernelContext* context) override { // Grab the input tensor - const Tensor& input\_tensor = context->input(0); - auto input = input\_tensor.flat<T>();
+ const Tensor& input_tensor = context->input(0); + auto input = input_tensor.flat(); + // Create an output tensor Tensor* output = NULL; - OP\_REQUIRES\_OK(context, - context->allocate\_output(0, input_tensor.shape(), &output)); - auto output\_flat = output->template flat<T>();
+ OP_REQUIRES_OK(context, + context->allocate_output(0, input_tensor.shape(), &output)); + auto output_flat = output->template flat(); + // Set all the elements of the output tensor to 0 const int N = input.size(); - for (int i = 0; i < N; i++) { - output\_flat(i) = 0; - }
+ for (int i = 0; i < N; i++) { + output_flat(i) = 0; + } + // Preserve the first input value - if (N > 0) output\_flat(0) = input(0); + if (N > 0) output_flat(0) = input(0); } -};
-// Note that TypeConstraint<int32>("T") means that attr "T" (defined +}; + +// Note that TypeConstraint("T") means that attr "T" (defined // in the op registration above) must be "int32" to use this template -// instantiation. -REGISTER\_KERNEL\_BUILDER( +// instantiation. +REGISTER_KERNEL_BUILDER( Name("ZeroOut") - .Device(DEVICE\_CPU) - .TypeConstraint<int32>("T"), - ZeroOutOp<int32>); -REGISTER\_KERNEL\_BUILDER( + .Device(DEVICE_CPU) + .TypeConstraint("T"), + ZeroOutOp); +REGISTER_KERNEL_BUILDER( Name("ZeroOut") - .Device(DEVICE\_CPU) - .TypeConstraint<float>("T"), - ZeroOutOp<float>); -REGISTER\_KERNEL\_BUILDER( + .Device(DEVICE_CPU) + .TypeConstraint("T"), + ZeroOutOp); +REGISTER_KERNEL_BUILDER( Name("ZeroOut") - .Device(DEVICE\_CPU) - .TypeConstraint<double>("T"), - ZeroOutOp<double>); -
+ .Device(DEVICE_CPU) + .TypeConstraint("T"), + ZeroOutOp); +``` If you have more than a couple overloads, you can put the registration in a macro. -- 2.7.4