from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
+from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
all_inputs = list(args) + list(kwargs.values())
# The variables that grad_fn needs to return gradients for are the set of
# variables used that are *not* part of the inputs.
- variables = list(set(tape.watched_variables()) - set(all_inputs))
+ variable_inputs = [
+ arg for arg in all_inputs
+ if isinstance(arg, resource_variable_ops.ResourceVariable)
+ ]
+ variables = list(set(tape.watched_variables()) - set(variable_inputs))
flat_result = nest.flatten(result)
# TODO(apassos) consider removing the identity below.
flat_result = [gen_array_ops.identity(x) for x in flat_result]
self.assertEqual(6., math_ops.reduce_sum(dx).numpy())
self.assertEqual(8., math_ops.reduce_sum(dw).numpy())
+ def testWithNumpyInputs(self):
+ with context.eager_mode():
+
+ @custom_gradient.custom_gradient
+ def F(x):
+ out = x
+
+ def Grad(_):
+ return (None, None)
+
+ return out, Grad
+
+ x = np.ones((3, 2), dtype=np.float32)
+ # Smoke test to ensure numpy inputs are accepted
+ F(x)
+
if __name__ == "__main__":
googletest.main()