From 62fa49ff5dbab9df83362112e17c04f857c72f44 Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Mon, 12 Mar 2018 11:04:59 -0700 Subject: [PATCH] Avoid capturing unused variables in lambda functions PiperOrigin-RevId: 188747641 --- tensorflow/cc/framework/while_gradients.cc | 6 +++--- tensorflow/contrib/image/kernels/segmentation_ops.cc | 4 ++-- tensorflow/core/common_runtime/memory_types.cc | 4 ++-- tensorflow/core/distributed_runtime/graph_mgr.cc | 2 +- tensorflow/core/distributed_runtime/worker.cc | 4 ++-- tensorflow/core/kernels/data/iterator_ops.cc | 2 +- tensorflow/core/kernels/mutex_ops.cc | 12 ++++++------ tensorflow/core/kernels/resource_variable_ops.cc | 2 +- tensorflow/core/kernels/sparse_cross_op.cc | 2 +- tensorflow/core/kernels/split_v_op.cc | 8 ++++---- 10 files changed, 23 insertions(+), 23 deletions(-) diff --git a/tensorflow/cc/framework/while_gradients.cc b/tensorflow/cc/framework/while_gradients.cc index 0734075..81870a0 100644 --- a/tensorflow/cc/framework/while_gradients.cc +++ b/tensorflow/cc/framework/while_gradients.cc @@ -72,9 +72,9 @@ Status AddForwardLoopCounter(WhileContext* while_ctx, const Scope& scope, }; // Body function that adds one to input. - BodyGraphBuilderFn body_fn = [while_ctx](const Scope& scope, - const std::vector& inputs, - std::vector* outputs) { + BodyGraphBuilderFn body_fn = [](const Scope& scope, + const std::vector& inputs, + std::vector* outputs) { DCHECK_EQ(inputs.size(), 1); outputs->emplace_back(ops::Add(scope, inputs[0], 1)); return scope.status(); diff --git a/tensorflow/contrib/image/kernels/segmentation_ops.cc b/tensorflow/contrib/image/kernels/segmentation_ops.cc index fe8bf6e..9372289 100644 --- a/tensorflow/contrib/image/kernels/segmentation_ops.cc +++ b/tensorflow/contrib/image/kernels/segmentation_ops.cc @@ -101,8 +101,8 @@ struct ImageConnectedComponentsFunctor { int cost = (union_find.block_height() + union_find.block_width()) * 20; Shard(worker_threads->num_threads, worker_threads->workers, num_images * num_blocks_vertically * num_blocks_horizontally, cost, - [&union_find, num_images, num_blocks_vertically, - num_blocks_horizontally](int64 start_block, int64 limit_block) { + [&union_find, num_blocks_vertically, num_blocks_horizontally]( + int64 start_block, int64 limit_block) { for (int64 i = start_block; i < limit_block; i++) { int64 block_x = i % num_blocks_horizontally; int64 block_y = diff --git a/tensorflow/core/common_runtime/memory_types.cc b/tensorflow/core/common_runtime/memory_types.cc index 090a16e..116750f 100644 --- a/tensorflow/core/common_runtime/memory_types.cc +++ b/tensorflow/core/common_runtime/memory_types.cc @@ -92,7 +92,7 @@ static Status ProcessMemoryTypes( Status ValidateMemoryTypes(const DeviceType& device_type, const Graph* g) { return ProcessMemoryTypes( - device_type, g, [g](const Edge* e, MemoryType sm, MemoryType dm) { + device_type, g, [](const Edge* e, MemoryType sm, MemoryType dm) { if (sm == dm) { return Status::OK(); } @@ -155,7 +155,7 @@ Status EnsureMemoryTypes(const DeviceType& device_type, }; std::vector edges; TF_RETURN_IF_ERROR(ProcessMemoryTypes( - device_type, g, [g, &edges](const Edge* e, MemoryType sm, MemoryType dm) { + device_type, g, [&edges](const Edge* e, MemoryType sm, MemoryType dm) { if (sm == dm) { return Status::OK(); } diff --git a/tensorflow/core/distributed_runtime/graph_mgr.cc b/tensorflow/core/distributed_runtime/graph_mgr.cc index 9768a24..8447c55 100644 --- a/tensorflow/core/distributed_runtime/graph_mgr.cc +++ b/tensorflow/core/distributed_runtime/graph_mgr.cc @@ -438,7 +438,7 @@ void GraphMgr::ExecuteAsync(const string& handle, const int64 step_id, StartParallelExecutors(handle, step_id, item, rendezvous, collector, cost_graph, cancellation_manager, - [this, item, rendezvous, done](const Status& s) { + [item, rendezvous, done](const Status& s) { done(s); rendezvous->Unref(); item->Unref(); diff --git a/tensorflow/core/distributed_runtime/worker.cc b/tensorflow/core/distributed_runtime/worker.cc index 6345549..598652f 100644 --- a/tensorflow/core/distributed_runtime/worker.cc +++ b/tensorflow/core/distributed_runtime/worker.cc @@ -215,7 +215,7 @@ void Worker::DoPartialRunGraph(CallOptions* opts, GraphMgr::NamedTensors in; GraphMgr::NamedTensors* out = new GraphMgr::NamedTensors; Status s = PrepareRunGraph(request, &in, out); - auto finish = [this, done, out, opts](const Status& s) { + auto finish = [done, out, opts](const Status& s) { opts->ClearCancelCallback(); delete out; done(s); @@ -247,7 +247,7 @@ void Worker::DoPartialRunGraph(CallOptions* opts, session->graph_mgr->ExecuteAsync( graph_handle, step_id, session.get(), request->exec_opts(), nullptr /* collector */, nullptr /* response */, cm, in, - [this, token, step_id, session, cm](Status s) { + [this, token, step_id, session](Status s) { { mutex_lock l(mu_); cancellation_manager_->DeregisterCallback(token); diff --git a/tensorflow/core/kernels/data/iterator_ops.cc b/tensorflow/core/kernels/data/iterator_ops.cc index 6fe3746..780f927 100644 --- a/tensorflow/core/kernels/data/iterator_ops.cc +++ b/tensorflow/core/kernels/data/iterator_ops.cc @@ -867,7 +867,7 @@ class IteratorGetNextOp : public AsyncOpKernel { // inter-op thread pool thread, so we issue the call from the // owned thread pool. thread_pool_->Schedule(std::bind( - [this, ctx, iterator](DoneCallback done) { + [ctx, iterator](DoneCallback done) { std::vector components; bool end_of_sequence = false; diff --git a/tensorflow/core/kernels/mutex_ops.cc b/tensorflow/core/kernels/mutex_ops.cc index b02a584..ddb7a60 100644 --- a/tensorflow/core/kernels/mutex_ops.cc +++ b/tensorflow/core/kernels/mutex_ops.cc @@ -127,7 +127,7 @@ class Mutex : public ResourceBase { } } thread_pool_->Schedule(std::bind( - [this, c, cm, cancelled, + [this, cm, cancelled, token](std::function fn_) { bool local_locked; @@ -173,7 +173,7 @@ class MutexLockOp : public AsyncOpKernel { OP_REQUIRES_OK_ASYNC( c, LookupOrCreateResource(c, HandleFromInput(c, 0), &mutex, - [this, c](Mutex** ptr) { + [c](Mutex** ptr) { *ptr = new Mutex( c, HandleFromInput(c, 0).name()); return Status::OK(); @@ -186,10 +186,10 @@ class MutexLockOp : public AsyncOpKernel { mutex->AcquireAsync( c, std::bind( - [this, c, variant, mutex](DoneCallback done_, - // End of bound arguments. - const Status& s, - Mutex::SharedLockReleaser&& lock) { + [c, variant, mutex](DoneCallback done_, + // End of bound arguments. + const Status& s, + Mutex::SharedLockReleaser&& lock) { VLOG(2) << "Finished locking mutex " << mutex << " with lock: " << lock.shared_lock.get() << " status: " << s.ToString(); diff --git a/tensorflow/core/kernels/resource_variable_ops.cc b/tensorflow/core/kernels/resource_variable_ops.cc index f254036..aecad01 100644 --- a/tensorflow/core/kernels/resource_variable_ops.cc +++ b/tensorflow/core/kernels/resource_variable_ops.cc @@ -351,7 +351,7 @@ class AssignVariableOp : public OpKernel { Var* variable = nullptr; OP_REQUIRES_OK(context, LookupOrCreateResource( context, HandleFromInput(context, 0), &variable, - [this, context](Var** ptr) { + [](Var** ptr) { // Created on host. *ptr = new Var(DT_VARIANT); return Status::OK(); diff --git a/tensorflow/core/kernels/sparse_cross_op.cc b/tensorflow/core/kernels/sparse_cross_op.cc index 7cd4532..4b5df7a 100644 --- a/tensorflow/core/kernels/sparse_cross_op.cc +++ b/tensorflow/core/kernels/sparse_cross_op.cc @@ -327,7 +327,7 @@ class SparseCrossOp : public OpKernel { typename CrossTraits::Updater updater( output_start_indices, indices_out, values_out); - auto do_work = [this, &columns, crosser, updater](int64 begin, int64 end) { + auto do_work = [&columns, crosser, updater](int64 begin, int64 end) { for (int b = begin; b < end; b++) { ProductIterator product_iterator(columns, b); int64 cross_count = 0; diff --git a/tensorflow/core/kernels/split_v_op.cc b/tensorflow/core/kernels/split_v_op.cc index 0ce0b55..5c19a45 100644 --- a/tensorflow/core/kernels/split_v_op.cc +++ b/tensorflow/core/kernels/split_v_op.cc @@ -208,10 +208,10 @@ class SplitVOpCPUImpl { input_element_count >= std::max(num_threads, num_split) * 4096 && input_element_count < num_split * 180 * 1024); - auto range_output_func = [&indices, context, &input_shape, prefix_dim_size, - split_dim, &split_sizes_vec, &split_start_points, - suffix_dim_size, use_parallelism_between_outputs, - &input_reshaped, &make_sizes, + auto range_output_func = [&indices, context, &input_shape, split_dim, + &split_sizes_vec, &split_start_points, + use_parallelism_between_outputs, &input_reshaped, + &make_sizes, &reshape_result](int64 start, int64 limit) { for (int64 i = start; i < limit; ++i) { TensorShape output_shape(input_shape); -- 2.7.4