From 51645f15b3854447c887abf0e92d0465d79ea92c Mon Sep 17 00:00:00 2001 From: Akshay Agrawal Date: Thu, 24 May 2018 13:30:15 -0700 Subject: [PATCH] Fix bugs with the code blocks in defun's docstring. PiperOrigin-RevId: 197943921 --- tensorflow/python/eager/function.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/tensorflow/python/eager/function.py b/tensorflow/python/eager/function.py index 120b298..b46e061 100644 --- a/tensorflow/python/eager/function.py +++ b/tensorflow/python/eager/function.py @@ -777,7 +777,7 @@ def defun(func=None, compiled=False): def h(): return f(x, y) - assert h().numpy() == f(x, y) + assert (h().numpy() == f(x, y).numpy()).all() # `defun` automatically lifts variables out of the graphs it creates, # allowing you to compile the `call` methods of `tf.keras.layers.Layer` and @@ -785,6 +785,7 @@ def defun(func=None, compiled=False): class MyModel(tf.keras.Model): def __init__(self, keep_probability=0.2): + super(MyModel, self).__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) self.keep_probability = keep_probability @@ -804,7 +805,7 @@ def defun(func=None, compiled=False): # `defun`-compiled functions are differentiable. optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) with tf.GradientTape() as tape: - outputs = model(inputs) + outputs = model(x) gradient = tape.gradient(outputs, model.trainable_variables) optimizer.apply_gradients((grad, var) for grad, var in zip(gradient, model.trainable_variables)) @@ -840,6 +841,8 @@ def defun(func=None, compiled=False): import tensorflow as tf import numpy as np + tf.enable_eager_execution() + matrix = tf.eye(5) # `matrix` is assumed to be a Tensor def add_noise(): @@ -862,6 +865,8 @@ def defun(func=None, compiled=False): ```python import tensorflow as tf + tf.enable_eager_execution() + @tf.contrib.eager.defun def lossy_matmul(W, x, training=True): outputs = tf.matmul(W, x) @@ -869,6 +874,9 @@ def defun(func=None, compiled=False): outputs = tf.nn.dropout(outputs, keep_probability=0.2) return outputs + W = tf.random_normal((3, 5)) + x = tf.random_normal((5, 1)) + # Executes a graph that applies dropout. lossy_outputs = lossy_matmul(W, x, training=True) @@ -919,14 +927,14 @@ def defun(func=None, compiled=False): # `fn` is a Python function, so x is created, initialized, and destroyed upon # every invocation - assert(fn().numpy() == fn().numpy() == 1.0) + assert fn().numpy() == fn().numpy() == 1.0 compiled = tf.contrib.eager.defun(fn) # Compiling `fn` with `defun` hoists all variables outside of the generated # graph, so initialization happens exactly once. - assert(compiled().numpy() == 1.0) - assert(compiled().numpy() == 2.0) + assert compiled().numpy() == 1.0 + assert compiled().numpy() == 2.0 ``` Finally, because each input signature is bound to a unique graph, if your -- 2.7.4