math_ops.accumulate_n([a], tensor_dtype=np.int32)
+class PolyvalTest(test.TestCase):
+
+ def _runtest(self, dtype, degree):
+ x = np.random.rand(2, 2).astype(dtype)
+ coeffs = [np.random.rand(2, 2).astype(dtype) for _ in range(degree + 1)]
+ np_val = np.polyval(coeffs, x)
+ with self.test_session():
+ tf_val = math_ops.polyval(coeffs, x)
+ self.assertAllClose(np_val, tf_val.eval())
+
+ def testSimple(self):
+ for dtype in [
+ np.int32, np.float32, np.float64, np.complex64, np.complex128
+ ]:
+ for degree in range(5):
+ self._runtest(dtype, degree)
+
+ def testBroadcast(self):
+ dtype = np.float32
+ degree = 3
+ shapes = [(1,), (2, 1), (1, 2), (2, 2)]
+ for x_shape in shapes:
+ for coeff_shape in shapes:
+ x = np.random.rand(*x_shape).astype(dtype)
+ coeffs = [
+ np.random.rand(*coeff_shape).astype(dtype)
+ for _ in range(degree + 1)
+ ]
+ np_val = np.polyval(coeffs, x)
+ with self.test_session():
+ tf_val = math_ops.polyval(coeffs, x)
+ self.assertAllClose(np_val, tf_val.eval())
+
+ def testEmpty(self):
+ x = np.random.rand(2, 2).astype(np.float32)
+ coeffs = []
+ np_val = np.polyval(coeffs, x)
+ with self.test_session():
+ tf_val = math_ops.polyval(coeffs, x)
+ self.assertAllClose(np_val, tf_val.eval())
+
+
if __name__ == "__main__":
test.main()
@@igammac
@@zeta
@@polygamma
+@@polyval
@@betainc
@@rint
@@diag
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
+from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
tf_export("arg_max")(arg_max)
tf_export("arg_min")(arg_min)
-
# This is set by resource_variable_ops.py. It is included in this way since
# there is a circular dependency between math_ops and resource_variable_ops
_resource_variable_type = None
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x._rank() is not None: # pylint: disable=protected-access
- return constant_op.constant(
- np.arange(x._rank()), dtype=dtypes.int32) # pylint: disable=protected-access
+ return constant_op.constant(np.arange(x._rank()), dtype=dtypes.int32) # pylint: disable=protected-access
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
rank = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
+
def _input_error():
- return ValueError(
- "inputs must be a list of at least one Tensor with the "
- "same dtype and shape")
+ return ValueError("inputs must be a list of at least one Tensor with the "
+ "same dtype and shape")
+
if not inputs or not isinstance(inputs, (list, tuple)):
raise _input_error()
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
# tensor_dtype is for safety only; operator's output type computed in C++
if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
- raise TypeError("tensor_dtype is {}, but input is of type {}"
- .format(tensor_dtype, inputs[0].dtype))
+ raise TypeError("tensor_dtype is {}, but input is of type {}".format(
+ tensor_dtype, inputs[0].dtype))
if len(inputs) == 1 and name is None:
return inputs[0]
name=name)
else:
return gen_math_ops.sparse_segment_sum(
- data=data,
- indices=indices,
- segment_ids=segment_ids,
- name=name)
+ data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse_segment_mean")
-def sparse_segment_mean(data, indices, segment_ids, name=None,
+def sparse_segment_mean(data,
+ indices,
+ segment_ids,
+ name=None,
num_segments=None):
r"""Computes the mean along sparse segments of a tensor.
name=name)
else:
return gen_math_ops.sparse_segment_mean(
- data=data,
- indices=indices,
- segment_ids=segment_ids,
- name=name)
+ data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse_segment_sqrt_n")
-def sparse_segment_sqrt_n(data, indices, segment_ids, name=None,
+def sparse_segment_sqrt_n(data,
+ indices,
+ segment_ids,
+ name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
name=name)
else:
return gen_math_ops.sparse_segment_sqrt_n(
- data=data,
- indices=indices,
- segment_ids=segment_ids,
- name=name)
+ data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("tensordot", "linalg.tensordot")
return product
+@tf_export("math.polyval")
+def polyval(coeffs, x, name=None):
+ r"""Computes the elementwise value of a polynomial.
+
+ If `x` is a tensor and `coeffs` is a list n + 1 tensors, this function returns
+ the value of the n-th order polynomial
+
+ p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1)
+
+ evaluated using Horner's method, i.e.
+
+ p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] +
+ x * coeffs[0]))
+
+ Args:
+ coeffs: A list of `Tensor` representing the coefficients of the polynomial.
+ x: A `Tensor` representing the variable of the polynomial.
+ name: A name for the operation (optional).
+
+ Returns:
+ A `tensor` of the shape as the expression p(x) with usual broadcasting rules
+ for element-wise addition and multiplication applied.
+
+ @compatibility(numpy)
+ Equivalent to numpy.polyval.
+ @end_compatibility
+ """
+
+ with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name:
+ x = ops.convert_to_tensor(x, name="x")
+ if len(coeffs) < 1:
+ return array_ops.zeros_like(x, name=name)
+ coeffs = [
+ ops.convert_to_tensor(coeff, name=("coeff_%d" % index))
+ for index, coeff in enumerate(coeffs)
+ ]
+ p = coeffs[0]
+ for c in coeffs[1:]:
+ p = c + p * x
+ return p
+
# FFT ops were moved to tf.spectral. tf.fft symbols were part of the TensorFlow
# 1.0 API so we leave these here for backwards compatibility.
fft = gen_spectral_ops.fft