y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
+ def testNameScopeWorksCorrectly(self):
+ x = tfd.Normal(loc=0., scale=1., name="x")
+ x_duplicate = tfd.Normal(loc=0., scale=1., name="x")
+ with ops.name_scope("y") as name:
+ y = tfd.Bernoulli(logits=0., name=name)
+ x_sample = x.sample(name="custom_sample")
+ x_sample_duplicate = x.sample(name="custom_sample")
+ x_log_prob = x.log_prob(0., name="custom_log_prob")
+
+ self.assertEqual(x.name, "x")
+ self.assertEqual(x_duplicate.name, "x_1")
+ self.assertEqual(y.name, "y")
+ self.assertTrue(x_sample.name.startswith("x/custom_sample"))
+ self.assertTrue(x_sample_duplicate.name.startswith("x/custom_sample_1"))
+ self.assertTrue(x_log_prob.name.startswith("x/custom_log_prob"))
+
def testStrWorksCorrectlyScalar(self):
normal = tfd.Normal(loc=np.float16(0), scale=np.float16(1))
self.assertEqual(
ValueError: if `num_steps < 1`.
"""
parameters = locals()
- with ops.name_scope(name):
+ with ops.name_scope(name) as name:
self._distribution_fn = distribution_fn
self._sample0 = sample0
self._distribution0 = (distribution_fn() if sample0 is None
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
- with ops.name_scope(name, values=[total_count, logits, probs]):
+ with ops.name_scope(name, values=[total_count, logits, probs]) as name:
self._total_count = self._maybe_assert_valid_total_count(
ops.convert_to_tensor(total_count, name="total_count"),
validate_args)
TypeError: if `loc` and `scale` have different `dtype`.
"""
parameters = locals()
- with ops.name_scope(name, values=[loc, scale]):
+ with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)]
if validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
# not true in the parent class "gamma." therefore, passing
# allow_nan_stats=True
# through to the parent class results in unnecessary asserts.
- with ops.name_scope(name, values=[df]):
+ with ops.name_scope(name, values=[df]) as name:
with ops.control_dependencies([
check_ops.assert_positive(df),
] if validate_args else []):
allow_nan_stats=True,
name="Chi2WithAbsDf"):
parameters = locals()
- with ops.name_scope(name, values=[df]):
+ with ops.name_scope(name, values=[df]) as name:
super(Chi2WithAbsDf, self).__init__(
df=math_ops.floor(
math_ops.abs(df, name="abs_df"),
ValueError: If `loc` is a scalar.
"""
parameters = locals()
- with ops.name_scope(name, values=[loc, atol, rtol]):
+ with ops.name_scope(name, values=[loc, atol, rtol]) as name:
loc = ops.convert_to_tensor(loc, name="loc")
if is_vector and validate_args:
msg = "Argument loc must be at least rank 1."
"""
parameters = locals()
- with ops.name_scope(name, values=[logits, probs]):
+ with ops.name_scope(name, values=[logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits, probs, validate_args=validate_args, name=name)
TypeError: if loc and scale are different dtypes.
"""
parameters = locals()
- with ops.name_scope(name, values=[loc, scale]):
+ with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
- with ops.name_scope(name, values=[scale]):
+ with ops.name_scope(name, values=[scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._scale = array_ops.identity(scale, name="scale")
parameters = locals()
name = name or "Independent" + distribution.name
self._distribution = distribution
- with ops.name_scope(name):
+ with ops.name_scope(name) as name:
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = self._get_default_reinterpreted_batch_ndims(
distribution)
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = locals()
- with ops.name_scope(name, values=[concentration, rate]):
+ with ops.name_scope(name, values=[concentration, rate]) as name:
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
allow_nan_stats=True,
name="InverseGammaWithSoftplusConcentrationRate"):
parameters = locals()
- with ops.name_scope(name, values=[concentration, rate]):
+ with ops.name_scope(name, values=[concentration, rate]) as name:
super(InverseGammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
- concentration1 = ops.convert_to_tensor(
- concentration1, name="concentration1")
- concentration0 = ops.convert_to_tensor(
- concentration0, name="concentration0")
+ with ops.name_scope(name, values=[concentration1, concentration0]) as name:
+ concentration1 = ops.convert_to_tensor(
+ concentration1, name="concentration1")
+ concentration0 = ops.convert_to_tensor(
+ concentration0, name="concentration0")
super(Kumaraswamy, self).__init__(
distribution=uniform.Uniform(
low=array_ops.zeros([], dtype=concentration1.dtype),
TypeError: if loc and scale are different dtypes.
"""
parameters = locals()
- with ops.name_scope(name, values=[loc, scale]):
+ with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
- with ops.name_scope(name, values=[cat.logits]):
+ with ops.name_scope(name, values=[cat.logits]) as name:
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
`components_distribution` rightmost batch shape.
"""
parameters = locals()
- with ops.name_scope(name):
+ with ops.name_scope(name) as name:
self._mixture_distribution = mixture_distribution
self._components_distribution = components_distribution
self._runtime_assertions = []
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = locals()
- with ops.name_scope(name):
+ with ops.name_scope(name) as name:
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier]):
# No need to validate_args while making diag_scale. The returned
allow_nan_stats=True,
name="MultivariateNormalDiagWithSoftplusScale"):
parameters = locals()
- with ops.name_scope(name, values=[scale_diag]):
+ with ops.name_scope(name, values=[scale_diag]) as name:
super(MultivariateNormalDiagWithSoftplusScale, self).__init__(
loc=loc,
scale_diag=nn.softplus(scale_diag),
parameters = locals()
def _convert_to_tensor(x, name):
return None if x is None else ops.convert_to_tensor(x, name=name)
- with ops.name_scope(name):
+ with ops.name_scope(name) as name:
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier, scale_perturb_factor,
scale_perturb_diag]):
parameters = locals()
# Convert the covariance_matrix up to a scale_tril and call MVNTriL.
- with ops.name_scope(name):
+ with ops.name_scope(name) as name:
with ops.name_scope("init", values=[loc, covariance_matrix]):
if covariance_matrix is None:
scale_tril = None
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
- with ops.name_scope(name, values=[loc] + scale.graph_parents):
+ with ops.name_scope(name, values=[loc] + scale.graph_parents) as name:
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
return None if x is None else ops.convert_to_tensor(x, name=name)
if loc is None and scale_tril is None:
raise ValueError("Must specify one or both of `loc`, `scale_tril`.")
- with ops.name_scope(name):
+ with ops.name_scope(name) as name:
with ops.name_scope("init", values=[loc, scale_tril]):
loc = _convert_to_tensor(loc, name="loc")
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
"""
parameters = locals()
- with ops.name_scope(name, values=[total_count, logits, probs]):
+ with ops.name_scope(name, values=[total_count, logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits, probs, validate_args=validate_args, name=name)
with ops.control_dependencies(
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
- with ops.name_scope(name, values=[logits, probs]):
+ with ops.name_scope(name, values=[logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
name=name, logits=logits, probs=probs, validate_args=validate_args,
multidimensional=True)
TypeError: if `log_rate` is not a float-type.
"""
parameters = locals()
- with ops.name_scope(name, values=[rate]):
+ with ops.name_scope(name, values=[rate]) as name:
if (rate is None) == (log_rate is None):
raise ValueError("Must specify exactly one of `rate` and `log_rate`.")
elif log_rate is None:
`dtype`.
"""
parameters = locals()
- with ops.name_scope(name, values=[loc, scale]):
+ with ops.name_scope(name, values=[loc, scale]) as name:
if loc is not None:
loc = ops.convert_to_tensor(loc, name="loc")
if scale is not None:
values = (
list(distribution.parameters.values()) +
[low, high])
- with ops.name_scope(name, values=values):
+ with ops.name_scope(name, values=values) as name:
self._dist = distribution
if low is not None:
ValueError: If both `probs` and `logits` are passed, or if neither.
"""
parameters = locals()
- with ops.name_scope(name, values=[logits, probs, temperature]):
+ with ops.name_scope(name, values=[logits, probs, temperature]) as name:
with ops.control_dependencies([check_ops.assert_positive(temperature)]
if validate_args else []):
self._temperature = array_ops.identity(temperature, name="temperature")
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
- with ops.name_scope(name, values=[logits, probs, temperature]):
+ with ops.name_scope(name, values=[logits, probs, temperature]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
name=name, logits=logits, probs=probs, validate_args=validate_args,
"""
parameters = locals()
- with ops.name_scope(name, values=[loc, scale, skewness, tailweight]):
+ with ops.name_scope(name,
+ values=[loc, scale, skewness, tailweight]) as name:
loc = ops.convert_to_tensor(loc, name="loc")
dtype = loc.dtype
scale = ops.convert_to_tensor(scale, name="scale", dtype=dtype)
ValueError: if `not distribution.is_scalar_event`.
"""
parameters = locals()
- with ops.name_scope(name, values=[mix_loc, temperature]):
+ with ops.name_scope(name, values=[mix_loc, temperature]) as name:
if not scale or len(scale) < 2:
raise ValueError("Must specify list (or list-like object) of scale "
"LinearOperators, one for each component with "
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = locals()
- with ops.name_scope(name):
+ with ops.name_scope(name) as name:
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier]):
# No need to validate_args while making diag_scale. The returned
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
- with ops.name_scope(name, values=[loc] + scale.graph_parents):
+ with ops.name_scope(name, values=[loc] + scale.graph_parents) as name:
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
name,
values=[
loc, scale_diag, scale_identity_multiplier, skewness, tailweight
- ]):
+ ]) as name:
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
tailweight = 1. if tailweight is None else tailweight
has_default_skewness = skewness is None
parameters = locals()
graph_parents = [df, loc, scale_identity_multiplier, scale_diag,
scale_tril, scale_perturb_factor, scale_perturb_diag]
- with ops.name_scope(name):
+ with ops.name_scope(name) as name:
with ops.name_scope("init", values=graph_parents):
# The shape of the _VectorStudentT distribution is governed by the
# relationship between df.batch_shape and affine.batch_shape. In
"""
parameters = locals()
self._cholesky_input_output_matrices = cholesky_input_output_matrices
- with ops.name_scope(name) as ns:
+ with ops.name_scope(name) as name:
with ops.name_scope("init", values=[df, scale_operator]):
if not scale_operator.dtype.is_floating:
raise TypeError(
parameters=parameters,
graph_parents=([self._df, self._dimension] +
self._scale_operator.graph_parents),
- name=ns)
+ name=name)
@property
def df(self):
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
- with ops.name_scope(name, values=[scale]):
+ with ops.name_scope(name, values=[scale]) as name:
with ops.name_scope("init", values=[scale]):
scale = ops.convert_to_tensor(scale)
if validate_args:
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
- with ops.name_scope(name) as ns:
+ with ops.name_scope(name) as name:
with ops.name_scope("init", values=[scale]):
scale = ops.convert_to_tensor(scale)
if validate_args:
cholesky_input_output_matrices=cholesky_input_output_matrices,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
- name=ns)
+ name=name)
self._parameters = parameters
ValueError: If p and logits are passed, or if neither are passed.
"""
parameters = locals()
- with ops.name_scope(name):
+ with ops.name_scope(name) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
- with ops.name_scope(name, values=[concentration1, concentration0]):
+ with ops.name_scope(name, values=[concentration1, concentration0]) as name:
self._concentration1 = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration1, name="concentration1"),
validate_args)
name="BetaWithSoftplusConcentration"):
parameters = locals()
with ops.name_scope(name, values=[concentration1,
- concentration0]) as ns:
+ concentration0]) as name:
super(BetaWithSoftplusConcentration, self).__init__(
concentration1=nn.softplus(concentration1,
name="softplus_concentration1"),
name="softplus_concentration0"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
- name=ns)
+ name=name)
self._parameters = parameters
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
- with ops.name_scope(name, values=[logits, probs]):
+ with ops.name_scope(name, values=[logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
- with ops.name_scope(name, values=[concentration]):
+ with ops.name_scope(name, values=[concentration]) as name:
self._concentration = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration, name="concentration"),
validate_args)
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
- with ops.name_scope(name, values=[total_count, concentration]):
+ with ops.name_scope(name, values=[total_count, concentration]) as name:
# Broadcasting works because:
# * The broadcasting convention is to prepend dimensions of size [1], and
# we use the last dimension for the distribution, whereas
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
+ if name is None:
+ with ops.name_scope(type(self).__name__) as name:
+ pass
+ # If a name ends with a '/' it is a "name scope" and we use it as-is, after
+ # removing the trailing '/'.
+ name = name[:-1] if (name and name[-1] == "/") else name
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
- self._name = name or type(self).__name__
+ self._name = name
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
- with ops.name_scope(self.name):
+ with ops.name_scope(self.name + "/"): # use absolute name scope
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
# true in the parent class "Gamma." Therefore, passing
# allow_nan_stats=True
# through to the parent class results in unnecessary asserts.
- with ops.name_scope(name, values=[rate]):
+ with ops.name_scope(name, values=[rate]) as name:
self._rate = ops.convert_to_tensor(rate, name="rate")
super(Exponential, self).__init__(
concentration=array_ops.ones([], dtype=self._rate.dtype),
allow_nan_stats=True,
name="ExponentialWithSoftplusRate"):
parameters = locals()
- with ops.name_scope(name, values=[rate]):
+ with ops.name_scope(name, values=[rate]) as name:
super(ExponentialWithSoftplusRate, self).__init__(
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = locals()
- with ops.name_scope(name, values=[concentration, rate]):
+ with ops.name_scope(name, values=[concentration, rate]) as name:
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
allow_nan_stats=True,
name="GammaWithSoftplusConcentrationRate"):
parameters = locals()
- with ops.name_scope(name, values=[concentration, rate]):
+ with ops.name_scope(name, values=[concentration, rate]) as name:
super(GammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
TypeError: if `loc` and `scale` are of different dtype.
"""
parameters = locals()
- with ops.name_scope(name, values=[loc, scale]):
+ with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
allow_nan_stats=True,
name="LaplaceWithSoftplusScale"):
parameters = locals()
- with ops.name_scope(name, values=[loc, scale]):
+ with ops.name_scope(name, values=[loc, scale]) as name:
super(LaplaceWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
- with ops.name_scope(name, values=[total_count, logits, probs]):
+ with ops.name_scope(name, values=[total_count, logits, probs]) as name:
self._total_count = ops.convert_to_tensor(total_count, name="total_count")
if validate_args:
self._total_count = (
TypeError: if `loc` and `scale` have different `dtype`.
"""
parameters = locals()
- with ops.name_scope(name, values=[loc, scale]):
+ with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
allow_nan_stats=True,
name="NormalWithSoftplusScale"):
parameters = locals()
- with ops.name_scope(name, values=[scale]):
+ with ops.name_scope(name, values=[scale]) as name:
super(NormalWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
TypeError: if loc and scale are different dtypes.
"""
parameters = locals()
- with ops.name_scope(name, values=[df, loc, scale]):
+ with ops.name_scope(name, values=[df, loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(df)]
if validate_args else []):
self._df = array_ops.identity(df, name="df")
allow_nan_stats=True,
name="StudentTWithAbsDfSoftplusScale"):
parameters = locals()
- with ops.name_scope(name, values=[df, scale]):
+ with ops.name_scope(name, values=[df, scale]) as name:
super(StudentTWithAbsDfSoftplusScale, self).__init__(
df=math_ops.floor(math_ops.abs(df)),
loc=loc,
parameters = locals()
name = name or (("" if bijector is None else bijector.name) +
distribution.name)
- with ops.name_scope(name, values=[event_shape, batch_shape]):
+ with ops.name_scope(name, values=[event_shape, batch_shape]) as name:
# For convenience we define some handy constants.
self._zero = constant_op.constant(0, dtype=dtypes.int32, name="zero")
self._empty = constant_op.constant([], dtype=dtypes.int32, name="empty")
InvalidArgumentError: if `low >= high` and `validate_args=False`.
"""
parameters = locals()
- with ops.name_scope(name, values=[low, high]):
+ with ops.name_scope(name, values=[low, high]) as name:
with ops.control_dependencies([
check_ops.assert_less(
low, high, message="uniform not defined when low >= high.")