train_op = lr.minimize()
- def Minimize():
+ def minimize():
with self._single_threaded_test_session():
for _ in range(_MAX_ITERATIONS):
- train_op.run()
+ train_op.run() # pylint: disable=cell-var-from-loop
threads = []
for _ in range(num_loss_partitions):
- threads.append(threading.Thread(target=Minimize))
+ threads.append(threading.Thread(target=minimize))
threads[-1].start()
for t in threads:
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
self.assertAllClose(
- 0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
+ 0.0, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testFractionalExampleLabel(self):
# Setup test data with 1 positive, and 1 mostly-negative example.
make_example_proto({
'age': [1],
'gender': [1]
- }, 1),
+ }, 0.9),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
}
attr {
name: "adaptative"
+ default_value {
+ b: True
+ }
description: <<END
-Whether to use Adapative SDCA for the inner loop.
+Whether to use Adaptive SDCA for the inner loop.
END
}
attr {
}
// Examples contains all the training examples that SDCA uses for a mini-batch.
-Status Examples::SampleAdaptativeProbabilities(
+Status Examples::SampleAdaptiveProbabilities(
const int num_loss_partitions, const Regularizations& regularization,
const ModelWeights& model_weights,
const TTypes<float>::Matrix example_state_data,
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_KERNELS_SDCA_INTERNAL_H_
-#define TENSORFLOW_KERNELS_SDCA_INTERNAL_H_
+#ifndef TENSORFLOW_CORE_KERNELS_SDCA_INTERNAL_H_
+#define TENSORFLOW_CORE_KERNELS_SDCA_INTERNAL_H_
#define EIGEN_USE_THREADS
class Regularizations {
public:
- Regularizations(){};
+ Regularizations() {}
// Initialize() must be called immediately after construction.
Status Initialize(OpKernelConstruction* const context) {
FeatureWeightsDenseStorage(const TTypes<const float>::Matrix nominals,
TTypes<float>::Matrix deltas)
: nominals_(nominals), deltas_(deltas) {
- CHECK(deltas.rank() > 1);
+ CHECK_GT(deltas.rank(), 1);
}
// Check if a feature index is with-in the bounds.
return examples_.at(example_index);
}
- int sampled_index(const int id, const bool adaptative) const {
- if (adaptative) return sampled_index_[id];
+ int sampled_index(const int id, const bool adaptive) const {
+ if (adaptive) return sampled_index_[id];
return id;
}
// Adaptive SDCA in the current implementation only works for
// binary classification, where the input argument for num_weight_vectors
// is 1.
- Status SampleAdaptativeProbabilities(
+ Status SampleAdaptiveProbabilities(
const int num_loss_partitions, const Regularizations& regularization,
const ModelWeights& model_weights,
const TTypes<float>::Matrix example_state_data,
// All examples in the batch.
std::vector<Example> examples_;
- // Adaptative sampling variables
+ // Adaptive sampling variables.
std::vector<float> probabilities_;
std::vector<int> sampled_index_;
std::vector<int> sampled_count_;
} // namespace sdca
} // namespace tensorflow
-#endif // TENSORFLOW_KERNELS_SDCA_INTERNAL_H_
+#endif // TENSORFLOW_CORE_KERNELS_SDCA_INTERNAL_H_
context, false,
errors::InvalidArgument("Unsupported loss type: ", loss_type));
}
- OP_REQUIRES_OK(context, context->GetAttr("adaptative", &adaptative));
+ OP_REQUIRES_OK(context, context->GetAttr("adaptative", &adaptive));
OP_REQUIRES_OK(
context, context->GetAttr("num_sparse_features", &num_sparse_features));
OP_REQUIRES_OK(context, context->GetAttr("num_sparse_features_with_values",
int num_dense_features = 0;
int num_inner_iterations = 0;
int num_loss_partitions = 0;
- bool adaptative = false;
+ bool adaptive = true;
Regularizations regularizations;
};
OP_REQUIRES_OK(context, context->set_output("out_example_state_data",
mutable_example_state_data_t));
- if (options.adaptative) {
+ if (options.adaptive) {
OP_REQUIRES_OK(context,
- examples.SampleAdaptativeProbabilities(
+ examples.SampleAdaptiveProbabilities(
options.num_loss_partitions, options.regularizations,
model_weights, example_state_data, options.loss_updater,
/*num_weight_vectors =*/1));
// num_examples which is an int.
for (int id = static_cast<int>(begin); id < end; ++id) {
const int64 example_index =
- examples.sampled_index(++atomic_index, options.adaptative);
+ examples.sampled_index(++atomic_index, options.adaptive);
const Example& example = examples.example(example_index);
const float dual = example_state_data(example_index, 0);
const float example_weight = example.example_weight();
}
member_method {
name: "sdca_optimizer"
- argspec: "args=[\'sparse_example_indices\', \'sparse_feature_indices\', \'sparse_feature_values\', \'dense_features\', \'example_weights\', \'example_labels\', \'sparse_indices\', \'sparse_weights\', \'dense_weights\', \'example_state_data\', \'loss_type\', \'l1\', \'l2\', \'num_loss_partitions\', \'num_inner_iterations\', \'adaptative\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ argspec: "args=[\'sparse_example_indices\', \'sparse_feature_indices\', \'sparse_feature_values\', \'dense_features\', \'example_weights\', \'example_labels\', \'sparse_indices\', \'sparse_weights\', \'dense_weights\', \'example_state_data\', \'loss_type\', \'l1\', \'l2\', \'num_loss_partitions\', \'num_inner_iterations\', \'adaptative\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
}
member_method {
name: "sdca_shrink_l1"