From b34c2788fc4b879f47048f2cac6139b6052d5f1b Mon Sep 17 00:00:00 2001 From: Asim Shankar Date: Wed, 21 Feb 2018 12:32:52 -0800 Subject: [PATCH] eager/mnist: Point to the example in tensorflow/models instead. PiperOrigin-RevId: 186502375 --- tensorflow/contrib/eager/python/examples/BUILD | 1 - .../contrib/eager/python/examples/mnist/BUILD | 36 --- .../contrib/eager/python/examples/mnist/README.md | 11 +- .../contrib/eager/python/examples/mnist/mnist.py | 264 --------------------- .../python/examples/mnist/mnist_graph_test.py | 65 ----- .../eager/python/examples/mnist/mnist_test.py | 80 ------- tensorflow/contrib/eager/python/g3doc/guide.md | 12 +- 7 files changed, 6 insertions(+), 463 deletions(-) delete mode 100644 tensorflow/contrib/eager/python/examples/mnist/BUILD delete mode 100644 tensorflow/contrib/eager/python/examples/mnist/mnist.py delete mode 100644 tensorflow/contrib/eager/python/examples/mnist/mnist_graph_test.py delete mode 100644 tensorflow/contrib/eager/python/examples/mnist/mnist_test.py diff --git a/tensorflow/contrib/eager/python/examples/BUILD b/tensorflow/contrib/eager/python/examples/BUILD index 15a2188..c1fd9e0 100644 --- a/tensorflow/contrib/eager/python/examples/BUILD +++ b/tensorflow/contrib/eager/python/examples/BUILD @@ -8,7 +8,6 @@ py_library( deps = [ "//tensorflow/contrib/eager/python/examples/gan:mnist", "//tensorflow/contrib/eager/python/examples/linear_regression", - "//tensorflow/contrib/eager/python/examples/mnist", "//tensorflow/contrib/eager/python/examples/resnet50", "//tensorflow/contrib/eager/python/examples/rnn_colorbot", "//tensorflow/contrib/eager/python/examples/rnn_ptb", diff --git a/tensorflow/contrib/eager/python/examples/mnist/BUILD b/tensorflow/contrib/eager/python/examples/mnist/BUILD deleted file mode 100644 index c61ec2d..0000000 --- a/tensorflow/contrib/eager/python/examples/mnist/BUILD +++ /dev/null @@ -1,36 +0,0 @@ -licenses(["notice"]) # Apache 2.0 - -package(default_visibility = ["//tensorflow:internal"]) - -load("//tensorflow:tensorflow.bzl", "cuda_py_test") - -py_binary( - name = "mnist", - srcs = ["mnist.py"], - srcs_version = "PY2AND3", - deps = [ - "//tensorflow:tensorflow_py", - "//tensorflow/contrib/eager/python:tfe", - "//tensorflow/examples/tutorials/mnist:input_data", - ], -) - -cuda_py_test( - name = "mnist_test", - srcs = ["mnist_test.py"], - additional_deps = [ - ":mnist", - "//tensorflow/contrib/eager/python:tfe", - "//tensorflow:tensorflow_py", - ], -) - -cuda_py_test( - name = "mnist_graph_test", - srcs = ["mnist_graph_test.py"], - additional_deps = [ - ":mnist", - "//third_party/py/numpy", - "//tensorflow:tensorflow_py", - ], -) diff --git a/tensorflow/contrib/eager/python/examples/mnist/README.md b/tensorflow/contrib/eager/python/examples/mnist/README.md index e987996..d1c079f 100644 --- a/tensorflow/contrib/eager/python/examples/mnist/README.md +++ b/tensorflow/contrib/eager/python/examples/mnist/README.md @@ -1,10 +1 @@ -Classification model for the MNIST dataset using eager execution. - -To run: - -``` -python mnist.py -``` - -`mnist_graph_test.py` demonstrates that the same code that is executed eagerly -in `mnist.py` is used to construct a TensorFlow graph. +See https://github.com/tensorflow/models/tree/master/official/mnist/mnist_eager.py diff --git a/tensorflow/contrib/eager/python/examples/mnist/mnist.py b/tensorflow/contrib/eager/python/examples/mnist/mnist.py deleted file mode 100644 index 58b1e89..0000000 --- a/tensorflow/contrib/eager/python/examples/mnist/mnist.py +++ /dev/null @@ -1,264 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A deep MNIST classifier using convolutional layers. - -Sample usage: - python mnist.py --help -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import os -import sys -import time - -import tensorflow as tf - -import tensorflow.contrib.eager as tfe -from tensorflow.examples.tutorials.mnist import input_data - -FLAGS = None - - -class MNISTModel(tf.keras.Model): - """MNIST Network. - - Network structure is equivalent to: - https://github.com/tensorflow/tensorflow/blob/r1.6/tensorflow/examples/tutorials/mnist/mnist_deep.py - and - https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py - - But written using the tf.layers API. - """ - - def __init__(self, data_format): - """Creates a model for classifying a hand-written digit. - - Args: - data_format: Either 'channels_first' or 'channels_last'. - 'channels_first' is typically faster on GPUs while 'channels_last' is - typically faster on CPUs. See - https://www.tensorflow.org/performance/performance_guide#data_formats - """ - super(MNISTModel, self).__init__(name='') - if data_format == 'channels_first': - self._input_shape = [-1, 1, 28, 28] - else: - assert data_format == 'channels_last' - self._input_shape = [-1, 28, 28, 1] - self.conv1 = tf.layers.Conv2D( - 32, 5, data_format=data_format, activation=tf.nn.relu) - self.conv2 = tf.layers.Conv2D( - 64, 5, data_format=data_format, activation=tf.nn.relu) - self.fc1 = tf.layers.Dense(1024, activation=tf.nn.relu) - self.fc2 = tf.layers.Dense(10) - self.dropout = tf.layers.Dropout(0.5) - self.max_pool2d = tf.layers.MaxPooling2D( - (2, 2), (2, 2), padding='SAME', data_format=data_format) - - def call(self, inputs, training=False): - """Computes labels from inputs. - - Users should invoke __call__ to run the network, which delegates to this - method (and not call this method directly). - - Args: - inputs: A batch of images as a Tensor with shape [batch_size, 784]. - training: True if invoked in the context of training (causing dropout to - be applied). False otherwise. - - Returns: - A Tensor with shape [batch_size, 10] containing the predicted logits - for each image in the batch, for each of the 10 classes. - """ - - x = tf.reshape(inputs, self._input_shape) - x = self.conv1(x) - x = self.max_pool2d(x) - x = self.conv2(x) - x = self.max_pool2d(x) - x = tf.layers.flatten(x) - x = self.fc1(x) - x = self.dropout(x, training=training) - x = self.fc2(x) - return x - - -def loss(predictions, labels): - return tf.reduce_mean( - tf.nn.softmax_cross_entropy_with_logits( - logits=predictions, labels=labels)) - - -def compute_accuracy(predictions, labels): - return tf.reduce_sum( - tf.cast( - tf.equal( - tf.argmax(predictions, axis=1, - output_type=tf.int64), - tf.argmax(labels, axis=1, - output_type=tf.int64)), - dtype=tf.float32)) / float(predictions.shape[0].value) - - -def train_one_epoch(model, optimizer, dataset, log_interval=None): - """Trains model on `dataset` using `optimizer`.""" - - tf.train.get_or_create_global_step() - - for (batch, (images, labels)) in enumerate(tfe.Iterator(dataset)): - with tf.contrib.summary.record_summaries_every_n_global_steps(10): - with tfe.GradientTape() as tape: - prediction = model(images, training=True) - loss_value = loss(prediction, labels) - tf.contrib.summary.scalar('loss', loss_value) - tf.contrib.summary.scalar('accuracy', - compute_accuracy(prediction, labels)) - grads = tape.gradient(loss_value, model.variables) - optimizer.apply_gradients(zip(grads, model.variables)) - if log_interval and batch % log_interval == 0: - print('Batch #%d\tLoss: %.6f' % (batch, loss_value)) - - -def test(model, dataset): - """Perform an evaluation of `model` on the examples from `dataset`.""" - avg_loss = tfe.metrics.Mean('loss') - accuracy = tfe.metrics.Accuracy('accuracy') - - for (images, labels) in tfe.Iterator(dataset): - predictions = model(images, training=False) - avg_loss(loss(predictions, labels)) - accuracy(tf.argmax(predictions, axis=1, output_type=tf.int64), - tf.argmax(labels, axis=1, output_type=tf.int64)) - print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' % - (avg_loss.result(), 100 * accuracy.result())) - with tf.contrib.summary.always_record_summaries(): - tf.contrib.summary.scalar('loss', avg_loss.result()) - tf.contrib.summary.scalar('accuracy', accuracy.result()) - - -def load_data(data_dir): - """Returns training and test tf.data.Dataset objects.""" - data = input_data.read_data_sets(data_dir, one_hot=True) - train_ds = tf.data.Dataset.from_tensor_slices((data.train.images, - data.train.labels)) - test_ds = tf.data.Dataset.from_tensors((data.test.images, data.test.labels)) - return (train_ds, test_ds) - - -def main(_): - tfe.enable_eager_execution() - - (device, data_format) = ('/gpu:0', 'channels_first') - if FLAGS.no_gpu or tfe.num_gpus() <= 0: - (device, data_format) = ('/cpu:0', 'channels_last') - print('Using device %s, and data format %s.' % (device, data_format)) - - # Load the datasets - (train_ds, test_ds) = load_data(FLAGS.data_dir) - train_ds = train_ds.shuffle(60000).batch(FLAGS.batch_size) - - # Create the model and optimizer - model = MNISTModel(data_format) - optimizer = tf.train.MomentumOptimizer(FLAGS.lr, FLAGS.momentum) - - if FLAGS.output_dir: - train_dir = os.path.join(FLAGS.output_dir, 'train') - test_dir = os.path.join(FLAGS.output_dir, 'eval') - tf.gfile.MakeDirs(FLAGS.output_dir) - else: - train_dir = None - test_dir = None - summary_writer = tf.contrib.summary.create_file_writer( - train_dir, flush_millis=10000) - test_summary_writer = tf.contrib.summary.create_file_writer( - test_dir, flush_millis=10000, name='test') - checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, 'ckpt') - - with tf.device(device): - for epoch in range(1, 11): - with tfe.restore_variables_on_create( - tf.train.latest_checkpoint(FLAGS.checkpoint_dir)): - global_step = tf.train.get_or_create_global_step() - start = time.time() - with summary_writer.as_default(): - train_one_epoch(model, optimizer, train_ds, FLAGS.log_interval) - end = time.time() - print('\nTrain time for epoch #%d (global step %d): %f' % ( - epoch, global_step.numpy(), end - start)) - with test_summary_writer.as_default(): - test(model, test_ds) - all_variables = ( - model.variables - + optimizer.variables() - + [global_step]) - tfe.Saver(all_variables).save( - checkpoint_prefix, global_step=global_step) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument( - '--data-dir', - type=str, - default='/tmp/tensorflow/mnist/input_data', - help='Directory for storing input data') - parser.add_argument( - '--batch-size', - type=int, - default=64, - metavar='N', - help='input batch size for training (default: 64)') - parser.add_argument( - '--log-interval', - type=int, - default=10, - metavar='N', - help='how many batches to wait before logging training status') - parser.add_argument( - '--output_dir', - type=str, - default=None, - metavar='N', - help='Directory to write TensorBoard summaries') - parser.add_argument( - '--checkpoint_dir', - type=str, - default='/tmp/tensorflow/mnist/checkpoints/', - metavar='N', - help='Directory to save checkpoints in (once per epoch)') - parser.add_argument( - '--lr', - type=float, - default=0.01, - metavar='LR', - help='learning rate (default: 0.01)') - parser.add_argument( - '--momentum', - type=float, - default=0.5, - metavar='M', - help='SGD momentum (default: 0.5)') - parser.add_argument( - '--no-gpu', - action='store_true', - default=False, - help='disables GPU usage even if a GPU is available') - - FLAGS, unparsed = parser.parse_known_args() - tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/tensorflow/contrib/eager/python/examples/mnist/mnist_graph_test.py b/tensorflow/contrib/eager/python/examples/mnist/mnist_graph_test.py deleted file mode 100644 index 1af2655..0000000 --- a/tensorflow/contrib/eager/python/examples/mnist/mnist_graph_test.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf -from tensorflow.contrib.eager.python.examples.mnist import mnist - - -def data_format(): - return "channels_first" if tf.test.is_gpu_available() else "channels_last" - - -class MNISTGraphTest(tf.test.TestCase): - - def testTrainGraph(self): - # The MNISTModel class can be executed eagerly (as in mnist.py and - # mnist_test.py) and also be used to construct a TensorFlow graph, which is - # then trained in a session. - with tf.Graph().as_default(): - # Generate some random data. - batch_size = 64 - images = np.random.randn(batch_size, 784).astype(np.float32) - digits = np.random.randint(low=0, high=10, size=batch_size) - labels = np.zeros((batch_size, 10)) - labels[np.arange(batch_size), digits] = 1. - - # Create a model, optimizer, and dataset as would be done - # for eager execution as well. - model = mnist.MNISTModel(data_format()) - optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) - dataset = tf.data.Dataset.from_tensors((images, labels)) - - # Define the loss tensor (as opposed to a loss function when - # using eager execution). - (images, labels) = dataset.make_one_shot_iterator().get_next() - predictions = model(images, training=True) - loss = mnist.loss(predictions, labels) - - train_op = optimizer.minimize(loss) - init = tf.global_variables_initializer() - with tf.Session() as sess: - # Variables have to be initialized in the session. - sess.run(init) - # Train using the optimizer. - sess.run(train_op) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow/contrib/eager/python/examples/mnist/mnist_test.py b/tensorflow/contrib/eager/python/examples/mnist/mnist_test.py deleted file mode 100644 index 136085e..0000000 --- a/tensorflow/contrib/eager/python/examples/mnist/mnist_test.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -import tensorflow.contrib.eager as tfe -from tensorflow.contrib.eager.python.examples.mnist import mnist - - -def device(): - return "/device:GPU:0" if tfe.num_gpus() else "/device:CPU:0" - - -def data_format(): - return "channels_first" if tfe.num_gpus() else "channels_last" - - -def random_dataset(): - batch_size = 64 - images = tf.random_normal([batch_size, 784]) - digits = tf.random_uniform([batch_size], minval=0, maxval=10, dtype=tf.int32) - labels = tf.one_hot(digits, 10) - return tf.data.Dataset.from_tensors((images, labels)) - - -def train_one_epoch(defun=False): - model = mnist.MNISTModel(data_format()) - if defun: - model.call = tfe.defun(model.call) - optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) - dataset = random_dataset() - with tf.device(device()): - tf.train.get_or_create_global_step() - mnist.train_one_epoch(model, optimizer, dataset) - - -def evaluate(defun=False): - model = mnist.MNISTModel(data_format()) - dataset = random_dataset() - if defun: - model.call = tfe.defun(model.call) - with tf.device(device()): - tf.train.get_or_create_global_step() - mnist.test(model, dataset) - - -class MNISTTest(tf.test.TestCase): - - def testTrainOneEpoch(self): - train_one_epoch(defun=False) - - def testTest(self): - evaluate(defun=False) - - def testTrainOneEpochWithDefunCall(self): - train_one_epoch(defun=True) - - def testTestWithDefunCall(self): - evaluate(defun=True) - - -if __name__ == "__main__": - tfe.enable_eager_execution() - tf.test.main() diff --git a/tensorflow/contrib/eager/python/g3doc/guide.md b/tensorflow/contrib/eager/python/g3doc/guide.md index ffc1d03..4724aa4 100644 --- a/tensorflow/contrib/eager/python/g3doc/guide.md +++ b/tensorflow/contrib/eager/python/g3doc/guide.md @@ -570,8 +570,8 @@ for i in range(20001): print("Loss on test set: %f" % loss(model, data.test.images, data.test.labels).numpy()) ``` -For a more complete example, see -[`tensorflow/contrib/eager/python/examples/mnist.py`](https://www.tensorflow.org/code/tensorflow/contrib/eager/python/examples/mnist/mnist.py) +For a more complete example, see [the example in the tensorflow/models +repository](https://github.com/tensorflow/models/tree/master/official/mnist/mnist_eager.py). ### Checkpointing trained variables @@ -860,11 +860,9 @@ eagerly or constructing graphs. This means that you can iteratively develop your model with eager execution enabled and later, if needed, use the same code to reap the benefits of representing models as computational graphs. -For example, -[`mnist.py`](https://www.tensorflow.org/code/tensorflow/contrib/eager/python/examples/mnist/mnist.py) -defines a model that is eagerly executed. That same code is used to construct -and execute a graph in -[`mnist_graph_test.py`](https://www.tensorflow.org/code/tensorflow/contrib/eager/python/examples/mnist/mnist_graph_test.py). +For example, the same model definition used to construct a graph in +[mnist.py`](https://github.com/tensorflow/models/tree/master/official/mnist/mnist.py) +can be trained with eager execution enabled as in [`mnist_eager.py`](https://github.com/tensorflow/models/tree/master/official/mnist/mnist_eager.py). Other models in the [examples directory](https://www.tensorflow.org/code/tensorflow/contrib/eager/python/examples/) -- 2.7.4