Reserve vectors that we know the size in advance for. (#16201)
authorShahzad Lone <shahzadlone@gmail.com>
Tue, 22 Jan 2019 16:00:00 +0000 (08:00 -0800)
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>
Tue, 22 Jan 2019 16:02:40 +0000 (08:02 -0800)
Summary:
Save reallocation costs, by reserving vectors according to how many elements we expect to put in.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/16201

Differential Revision: D13762594

Pulled By: ezyang

fbshipit-source-id: 7e3bfe421489dde48a2ddb0920dd155f69baecc0

caffe2/core/blob_serialization.cc

index e421719..976647d 100644 (file)
@@ -138,7 +138,6 @@ void TensorSerializer::SerializeWithChunkSize(
   };
 
 #ifndef __ANDROID__
-  std::vector<std::future<void>> futures;
   // Poorman's IOBound ThreadPool
   SimpleQueue<size_t> chunkQueue;
   auto task = [&]() {
@@ -147,7 +146,9 @@ void TensorSerializer::SerializeWithChunkSize(
       processChunk(chunkStart);
     }
   };
+  std::vector<std::future<void>> futures;
   if (tensor.numel() > chunk_size) {
+    futures.reserve(FLAGS_caffe2_max_tensor_serializer_threads);
     for (int i = 0; i < FLAGS_caffe2_max_tensor_serializer_threads; ++i) {
       futures.emplace_back(std::async(std::launch::async, task));
     }
@@ -391,6 +392,7 @@ void DeserializeBlob(const BlobProto& blob_proto, Blob* result) {
 // Get dimensions from Tensor proto
 static std::vector<int64_t> DimsFromTensorProto(const TensorProto& proto) {
   std::vector<int64_t> dims;
+  dims.reserve(proto.dims().size());
   for (const int64_t d : proto.dims()) {
     dims.push_back(d);
   }