* Fix warning about keep_dims. keep_dims -> keepdims for tf.reduce_sum().
* fix test failure.
# Calculate softmax probabilities for each class.
unnormalized_probs = math_ops.exp(logits)
- normalizers = math_ops.reduce_sum(unnormalized_probs, 1, keep_dims=True)
+ normalizers = math_ops.reduce_sum(unnormalized_probs, 1, keepdims=True)
softmax_predictions = math_ops.divide(unnormalized_probs,
math_ops.add(normalizers, eps))
update_op: An update operation to update the loss's internal state.
"""
unweighted_loss = math_ops.reduce_sum(
- math_ops.square(predictions - labels), 1, keep_dims=True)
+ math_ops.square(predictions - labels), 1, keepdims=True)
return unweighted_loss * weights, control_flow_ops.no_op()
* math_ops.log(self.temperature))
# compute the unnormalized density
log_softmax = nn_ops.log_softmax(logits_2d - x_2d * self._temperature_2d)
- log_unnorm_prob = math_ops.reduce_sum(log_softmax, [-1], keep_dims=False)
+ log_unnorm_prob = math_ops.reduce_sum(log_softmax, [-1], keepdims=False)
# combine unnormalized density with normalization constant
log_prob = log_norm_const + log_unnorm_prob
# Reshapes log_prob to be consistent with shape of user-supplied logits
# Computes Euclidean distance. Note the first and third terms are
# broadcast additions.
squared_distance = (
- math_ops.reduce_sum(math_ops.square(inp), 1, keep_dims=True) -
+ math_ops.reduce_sum(math_ops.square(inp), 1, keepdims=True) -
2 * math_ops.matmul(inp, clusters, transpose_b=True) +
array_ops.transpose(
math_ops.reduce_sum(
- math_ops.square(clusters), 1, keep_dims=True)))
+ math_ops.square(clusters), 1, keepdims=True)))
output.append(squared_distance)
return output
else:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.weighted_moments(
- inputs, moments_axes, batch_weights, keep_dims=True)
+ inputs, moments_axes, batch_weights, keepdims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
softmax_attention = nn.softmax(features / temperature)
expected_x = math_ops.reduce_sum(
- pos_x * softmax_attention, [1], keep_dims=True)
+ pos_x * softmax_attention, [1], keepdims=True)
expected_y = math_ops.reduce_sum(
- pos_y * softmax_attention, [1], keep_dims=True)
+ pos_y * softmax_attention, [1], keepdims=True)
expected_xy = array_ops.concat([expected_x, expected_y], 1)
feature_keypoints = array_ops.reshape(expected_xy,
[-1, num_channels.value * 2])
"""
with ops.name_scope(name, 'poincare_normalize', [x]) as name:
x = ops.convert_to_tensor(x, name='x')
- square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keep_dims=True)
+ square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keepdims=True)
x_inv_norm = math_ops.rsqrt(square_sum)
x_inv_norm = math_ops.minimum((1. - epsilon) * x_inv_norm, 1.)
return math_ops.multiply(x, x_inv_norm, name=name)
math_ops.reduce_sum(
math_ops.square(feature),
axis=[1],
- keep_dims=True),
+ keepdims=True),
math_ops.reduce_sum(
math_ops.square(
array_ops.transpose(feature)),
axis=[0],
- keep_dims=True)) - 2.0 * math_ops.matmul(
+ keepdims=True)) - 2.0 * math_ops.matmul(
feature, array_ops.transpose(feature))
# Deal with numerical inaccuracies. Set small negatives to zero.
masked_maximums: N-D `Tensor`.
The maximized dimension is of size 1 after the operation.
"""
- axis_minimums = math_ops.reduce_min(data, dim, keep_dims=True)
+ axis_minimums = math_ops.reduce_min(data, dim, keepdims=True)
masked_maximums = math_ops.reduce_max(
math_ops.multiply(
- data - axis_minimums, mask), dim, keep_dims=True) + axis_minimums
+ data - axis_minimums, mask), dim, keepdims=True) + axis_minimums
return masked_maximums
masked_minimums: N-D `Tensor`.
The minimized dimension is of size 1 after the operation.
"""
- axis_maximums = math_ops.reduce_max(data, dim, keep_dims=True)
+ axis_maximums = math_ops.reduce_max(data, dim, keepdims=True)
masked_minimums = math_ops.reduce_min(
math_ops.multiply(
- data - axis_maximums, mask), dim, keep_dims=True) + axis_maximums
+ data - axis_maximums, mask), dim, keepdims=True) + axis_maximums
return masked_minimums
math_ops.greater(
math_ops.reduce_sum(
math_ops.cast(
- mask, dtype=dtypes.float32), 1, keep_dims=True),
+ mask, dtype=dtypes.float32), 1, keepdims=True),
0.0), [batch_size, batch_size])
mask_final = array_ops.transpose(mask_final)
labels_remapped = math_ops.to_float(
math_ops.equal(labels, array_ops.transpose(labels)))
- labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keep_dims=True)
+ labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
multilabel_adjacency_matrix = _build_multilabel_adjacency(sparse_labels)
labels_remapped = math_ops.to_float(multilabel_adjacency_matrix)
- labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keep_dims=True)
+ labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
# Safe maximum: Temporarily shift negative distances
# above zero before taking max.
# this is to take the max only among negatives.
- row_minimums = math_ops.reduce_min(diff, 1, keep_dims=True)
+ row_minimums = math_ops.reduce_min(diff, 1, keepdims=True)
row_negative_maximums = math_ops.reduce_max(
math_ops.multiply(
- diff - row_minimums, mask), 1, keep_dims=True) + row_minimums
+ diff - row_minimums, mask), 1, keepdims=True) + row_minimums
# Compute the loss.
# Keep track of matrix of maximums where M_ij = max(m_i, m_j)
math_ops.reduce_sum(math_ops.multiply(
math_ops.exp(
diff_tiled - max_elements_vect),
- mask_tiled), 1, keep_dims=True), [batch_size, batch_size])
+ mask_tiled), 1, keepdims=True), [batch_size, batch_size])
loss_mat = max_elements + math_ops.log(
loss_exp_left + array_ops.transpose(loss_exp_left))
array_ops.reshape(pairwise_distances_candidate, [1, -1])
], 0),
axis=0,
- keep_dims=True), [num_candidates, -1]),
+ keepdims=True), [num_candidates, -1]),
axis=1)
nmi_scores = array_ops.zeros([num_candidates])
overlaps = -(-frame_length // frame_step) # Ceiling division.
denom = array_ops.pad(denom, [(0, overlaps * frame_step - frame_length)])
denom = array_ops.reshape(denom, [overlaps, frame_step])
- denom = math_ops.reduce_sum(denom, 0, keep_dims=True)
+ denom = math_ops.reduce_sum(denom, 0, keepdims=True)
denom = array_ops.tile(denom, [overlaps, 1])
denom = array_ops.reshape(denom, [overlaps * frame_step])
" \n",
" # Compute the similarity between minibatch examples and all embeddings.\n",
" # We use the cosine distance:\n",
- " norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n",
+ " norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))\n",
" normalized_embeddings = embeddings / norm\n",
" valid_embeddings = tf.nn.embedding_lookup(\n",
" normalized_embeddings, valid_dataset)\n",
def Cell(v):
# If v is a vector [n, 1], x is a big square matrix.
x = math_ops.tanh(v + array_ops.transpose(v, [1, 0]))
- return math_ops.reduce_sum(x, 1, keep_dims=True)
+ return math_ops.reduce_sum(x, 1, keepdims=True)
@function.Defun(dtype)
def Forward(x):
class BaseReductionTest(test.TestCase):
- def _tf_reduce(self, x, reduction_axes, keep_dims):
+ def _tf_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
- def _np_reduce(self, x, reduction_axes, keep_dims):
+ def _np_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
def _makeIncremental(self, shape, dtype):
data -= 2j * data
return data
- def _compare(self, x, reduction_axes, keep_dims, feed_dict=None):
- np_ans = self._np_reduce(x, reduction_axes, keep_dims)
+ def _compare(self, x, reduction_axes, keepdims, feed_dict=None):
+ np_ans = self._np_reduce(x, reduction_axes, keepdims)
with self.test_session(use_gpu=True) as sess:
- tf_ans = self._tf_reduce(x, reduction_axes, keep_dims)
+ tf_ans = self._tf_reduce(x, reduction_axes, keepdims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
- self._compare(x, reduction_axes, keep_dims=False, feed_dict=feed_dict)
- self._compare(x, reduction_axes, keep_dims=True, feed_dict=feed_dict)
+ self._compare(x, reduction_axes, keepdims=False, feed_dict=feed_dict)
+ self._compare(x, reduction_axes, keepdims=True, feed_dict=feed_dict)
def _compareAllAxes(self, x, feed_dict=None):
self._compareAll(x, None)
class SumReductionTest(BaseReductionTest):
- def _tf_reduce(self, x, reduction_axes, keep_dims):
- return math_ops.reduce_sum(x, reduction_axes, keep_dims)
+ def _tf_reduce(self, x, reduction_axes, keepdims):
+ return math_ops.reduce_sum(x, reduction_axes, keepdims)
- def _np_reduce(self, x, reduction_axes, keep_dims):
+ def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
- return np.sum(x, axis=reduction_axes, keepdims=keep_dims)
+ return np.sum(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
c_known_rank = array_ops.placeholder(dtypes.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(ndims=3))
s_known_rank = math_ops.reduce_sum(
- c_known_rank, reduction_axes, keep_dims=True)
+ c_known_rank, reduction_axes, keepdims=True)
self.assertEqual(3, s_known_rank.get_shape().ndims)
np_input = np.random.randn(3, 3, 3)
unknown_indices = array_ops.placeholder(dtypes.int32)
c_unknown_indices = constant_op.constant([[10.0], [20.0]])
s_unknown_indices = math_ops.reduce_sum(
- c_unknown_indices, unknown_indices, keep_dims=False)
+ c_unknown_indices, unknown_indices, keepdims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = math_ops.reduce_sum(
- c_unknown_indices, unknown_indices, keep_dims=True)
+ c_unknown_indices, unknown_indices, keepdims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().ndims)
def testWrongShapeForReductionIndices(self):
class MeanReductionTest(BaseReductionTest):
- def _tf_reduce(self, x, reduction_axes, keep_dims):
- return math_ops.reduce_mean(x, reduction_axes, keep_dims)
+ def _tf_reduce(self, x, reduction_axes, keepdims):
+ return math_ops.reduce_mean(x, reduction_axes, keepdims)
- def _np_reduce(self, x, reduction_axes, keep_dims):
+ def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
# np.mean automatically converts integer inputs to float, while TensorFlow's
# reduce_mean does not. For integer inputs, we emulate TensorFlow's behavior
# using np.sum and truncating division.
- np_sum = np.sum(x, axis=reduction_axes, keepdims=keep_dims)
+ np_sum = np.sum(x, axis=reduction_axes, keepdims=keepdims)
if np.issubdtype(x.dtype, np.integer):
return np_sum // count
return np_sum / count
class ProdReductionTest(BaseReductionTest):
- def _tf_reduce(self, x, reduction_axes, keep_dims):
- return math_ops.reduce_prod(x, reduction_axes, keep_dims)
+ def _tf_reduce(self, x, reduction_axes, keepdims):
+ return math_ops.reduce_prod(x, reduction_axes, keepdims)
- def _np_reduce(self, x, reduction_axes, keep_dims):
+ def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
- return np.prod(x, axis=reduction_axes, keepdims=keep_dims)
+ return np.prod(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
class MinReductionTest(test.TestCase):
- def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
+ def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
- np_ans = np.amin(np_ans, keepdims=keep_dims)
+ np_ans = np.amin(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
- np_ans = np.amin(np_ans, axis=ra, keepdims=keep_dims)
+ np_ans = np.amin(np_ans, axis=ra, keepdims=keepdims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
- tf_ans = math_ops.reduce_min(x, reduction_axes, keep_dims)
+ tf_ans = math_ops.reduce_min(x, reduction_axes, keepdims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
class MaxReductionTest(test.TestCase):
- def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
+ def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
- np_ans = np.amax(np_ans, keepdims=keep_dims)
+ np_ans = np.amax(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
- np_ans = np.amax(np_ans, axis=ra, keepdims=keep_dims)
+ np_ans = np.amax(np_ans, axis=ra, keepdims=keepdims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
- tf_ans = math_ops.reduce_max(x, reduction_axes, keep_dims)
+ tf_ans = math_ops.reduce_max(x, reduction_axes, keepdims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
class AllReductionTest(test.TestCase):
- def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
+ def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
- np_ans = np.all(np_ans, keepdims=keep_dims)
+ np_ans = np.all(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
- np_ans = np.all(np_ans, axis=ra, keepdims=keep_dims)
+ np_ans = np.all(np_ans, axis=ra, keepdims=keepdims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
- tf_ans = math_ops.reduce_all(x, reduction_axes, keep_dims)
+ tf_ans = math_ops.reduce_all(x, reduction_axes, keepdims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
class AnyReductionTest(test.TestCase):
- def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
+ def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
- np_ans = np.any(np_ans, keepdims=keep_dims)
+ np_ans = np.any(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
- np_ans = np.any(np_ans, axis=ra, keepdims=keep_dims)
+ np_ans = np.any(np_ans, axis=ra, keepdims=keepdims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
- tf_ans = math_ops.reduce_any(x, reduction_axes, keep_dims)
+ tf_ans = math_ops.reduce_any(x, reduction_axes, keepdims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compare(self,
x,
reduction_axes,
- keep_dims,
+ keepdims,
use_gpu=False,
feed_dict=None):
np_ans = (x != 0).astype(np.int32)
if reduction_axes is None:
- np_ans = np.sum(np_ans, keepdims=keep_dims)
+ np_ans = np.sum(np_ans, keepdims=keepdims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
- np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
+ np_ans = np.sum(np_ans, axis=ra, keepdims=keepdims)
with self.test_session(use_gpu=use_gpu) as sess:
- tf_ans = math_ops.count_nonzero(x, reduction_axes, keep_dims)
+ tf_ans = math_ops.count_nonzero(x, reduction_axes, keepdims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
class BaseReductionTest(test.TestCase):
- def _tf_reduce(self, x, reduction_axes, keep_dims):
+ def _tf_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
class BigReductionTest(BaseReductionTest):
"""Test reductions for sum and boolean all over a wide range of shapes."""
- def _tf_reduce_max(self, x, reduction_axes, keep_dims):
- return math_ops.reduce_max(x, reduction_axes, keep_dims)
+ def _tf_reduce_max(self, x, reduction_axes, keepdims):
+ return math_ops.reduce_max(x, reduction_axes, keepdims)
- def _tf_reduce_all(self, x, reduction_axes, keep_dims):
- return math_ops.reduce_all(x, reduction_axes, keep_dims)
+ def _tf_reduce_all(self, x, reduction_axes, keepdims):
+ return math_ops.reduce_all(x, reduction_axes, keepdims)
- def _tf_reduce_mean(self, x, reduction_axes, keep_dims):
- return math_ops.reduce_mean(x, reduction_axes, keep_dims)
+ def _tf_reduce_mean(self, x, reduction_axes, keepdims):
+ return math_ops.reduce_mean(x, reduction_axes, keepdims)
- def _tf_reduce_sum(self, x, reduction_axes, keep_dims):
- return math_ops.reduce_sum(x, reduction_axes, keep_dims)
+ def _tf_reduce_sum(self, x, reduction_axes, keepdims):
+ return math_ops.reduce_sum(x, reduction_axes, keepdims)
def testFloat32Sum(self):
# make sure we test all possible kernel invocations
t = ops.convert_to_tensor(t, name="t")
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
- l2norm = math_ops.sqrt(math_ops.reduce_sum(t * t, axes, keep_dims=True))
+ l2norm = math_ops.sqrt(math_ops.reduce_sum(t * t, axes, keepdims=True))
intermediate = t * clip_norm
# Assert that the shape is compatible with the initial shape,
# to prevent unintentional broadcasting.
if per_batch:
return math_ops.reduce_sum(
present, axis=math_ops.range(1, array_ops.rank(present)),
- keep_dims=True, name=scope)
+ keepdims=True, name=scope)
return math_ops.reduce_sum(present, name=scope)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
- losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keep_dims=True)
+ losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keepdims=True)
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs), reduction_indices=reduction_indices,
- keep_dims=True)
+ keepdims=True)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * _safe_div(sum_squares_diff_per_batch,
num_present_per_batch - 1)
sum_diff = math_ops.reduce_sum(
- diffs, reduction_indices=reduction_indices, keep_dims=True)
+ diffs, reduction_indices=reduction_indices, keepdims=True)
term2 = 2.0 * _safe_div(
math_ops.square(sum_diff),
math_ops.multiply(num_present_per_batch, num_present_per_batch - 1))
grad_y = math_ops.cast(grad_y, dtypes.float32)
if is_training:
if data_format == b"NHWC":
- keep_dims = False
+ keepdims = False
reduce_axis = [0, 1, 2]
else:
- keep_dims = True
+ keepdims = True
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(scale), 1, 1]
scale = array_ops.reshape(scale, shape)
- mean_grad_y = math_ops.reduce_mean(grad_y, reduce_axis, keep_dims=keep_dims)
- mean_x = math_ops.reduce_mean(x, reduce_axis, keep_dims=keep_dims)
+ mean_grad_y = math_ops.reduce_mean(grad_y, reduce_axis, keepdims=keepdims)
+ mean_x = math_ops.reduce_mean(x, reduce_axis, keepdims=keepdims)
var_x = math_ops.reduce_mean(
math_ops.squared_difference(x, array_ops.stop_gradient(mean_x)),
reduce_axis,
- keep_dims=keep_dims)
+ keepdims=keepdims)
grad_y_offset = grad_y - mean_grad_y
x_offset = x - mean_x
mean = math_ops.reduce_mean(
- grad_y * x_offset, axis=reduce_axis, keep_dims=keep_dims)
+ grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
grad_x = scale * math_ops.rsqrt(var_x + epsilon) * (
grad_y_offset - math_ops.reciprocal(var_x + epsilon) * mean * x_offset)
grad_scale = math_ops.rsqrt(var_x + epsilon) * math_ops.reduce_sum(
- grad_y * x_offset, axis=reduce_axis, keep_dims=keep_dims)
+ grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
if data_format == b"NCHW":
grad_scale = array_ops.squeeze(grad_scale)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)