Imported Upstream version 2.18.0
[platform/upstream/git.git] / bulk-checkin.c
1 /*
2  * Copyright (c) 2011, Google Inc.
3  */
4 #include "cache.h"
5 #include "bulk-checkin.h"
6 #include "repository.h"
7 #include "csum-file.h"
8 #include "pack.h"
9 #include "strbuf.h"
10 #include "packfile.h"
11
12 static struct bulk_checkin_state {
13         unsigned plugged:1;
14
15         char *pack_tmp_name;
16         struct hashfile *f;
17         off_t offset;
18         struct pack_idx_option pack_idx_opts;
19
20         struct pack_idx_entry **written;
21         uint32_t alloc_written;
22         uint32_t nr_written;
23 } state;
24
25 static void finish_bulk_checkin(struct bulk_checkin_state *state)
26 {
27         struct object_id oid;
28         struct strbuf packname = STRBUF_INIT;
29         int i;
30
31         if (!state->f)
32                 return;
33
34         if (state->nr_written == 0) {
35                 close(state->f->fd);
36                 unlink(state->pack_tmp_name);
37                 goto clear_exit;
38         } else if (state->nr_written == 1) {
39                 finalize_hashfile(state->f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
40         } else {
41                 int fd = finalize_hashfile(state->f, oid.hash, 0);
42                 fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
43                                          state->nr_written, oid.hash,
44                                          state->offset);
45                 close(fd);
46         }
47
48         strbuf_addf(&packname, "%s/pack/pack-", get_object_directory());
49         finish_tmp_packfile(&packname, state->pack_tmp_name,
50                             state->written, state->nr_written,
51                             &state->pack_idx_opts, oid.hash);
52         for (i = 0; i < state->nr_written; i++)
53                 free(state->written[i]);
54
55 clear_exit:
56         free(state->written);
57         memset(state, 0, sizeof(*state));
58
59         strbuf_release(&packname);
60         /* Make objects we just wrote available to ourselves */
61         reprepare_packed_git(the_repository);
62 }
63
64 static int already_written(struct bulk_checkin_state *state, struct object_id *oid)
65 {
66         int i;
67
68         /* The object may already exist in the repository */
69         if (has_sha1_file(oid->hash))
70                 return 1;
71
72         /* Might want to keep the list sorted */
73         for (i = 0; i < state->nr_written; i++)
74                 if (!oidcmp(&state->written[i]->oid, oid))
75                         return 1;
76
77         /* This is a new object we need to keep */
78         return 0;
79 }
80
81 /*
82  * Read the contents from fd for size bytes, streaming it to the
83  * packfile in state while updating the hash in ctx. Signal a failure
84  * by returning a negative value when the resulting pack would exceed
85  * the pack size limit and this is not the first object in the pack,
86  * so that the caller can discard what we wrote from the current pack
87  * by truncating it and opening a new one. The caller will then call
88  * us again after rewinding the input fd.
89  *
90  * The already_hashed_to pointer is kept untouched by the caller to
91  * make sure we do not hash the same byte when we are called
92  * again. This way, the caller does not have to checkpoint its hash
93  * status before calling us just in case we ask it to call us again
94  * with a new pack.
95  */
96 static int stream_to_pack(struct bulk_checkin_state *state,
97                           git_hash_ctx *ctx, off_t *already_hashed_to,
98                           int fd, size_t size, enum object_type type,
99                           const char *path, unsigned flags)
100 {
101         git_zstream s;
102         unsigned char obuf[16384];
103         unsigned hdrlen;
104         int status = Z_OK;
105         int write_object = (flags & HASH_WRITE_OBJECT);
106         off_t offset = 0;
107
108         git_deflate_init(&s, pack_compression_level);
109
110         hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), type, size);
111         s.next_out = obuf + hdrlen;
112         s.avail_out = sizeof(obuf) - hdrlen;
113
114         while (status != Z_STREAM_END) {
115                 unsigned char ibuf[16384];
116
117                 if (size && !s.avail_in) {
118                         ssize_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf);
119                         ssize_t read_result = read_in_full(fd, ibuf, rsize);
120                         if (read_result < 0)
121                                 die_errno("failed to read from '%s'", path);
122                         if (read_result != rsize)
123                                 die("failed to read %d bytes from '%s'",
124                                     (int)rsize, path);
125                         offset += rsize;
126                         if (*already_hashed_to < offset) {
127                                 size_t hsize = offset - *already_hashed_to;
128                                 if (rsize < hsize)
129                                         hsize = rsize;
130                                 if (hsize)
131                                         the_hash_algo->update_fn(ctx, ibuf, hsize);
132                                 *already_hashed_to = offset;
133                         }
134                         s.next_in = ibuf;
135                         s.avail_in = rsize;
136                         size -= rsize;
137                 }
138
139                 status = git_deflate(&s, size ? 0 : Z_FINISH);
140
141                 if (!s.avail_out || status == Z_STREAM_END) {
142                         if (write_object) {
143                                 size_t written = s.next_out - obuf;
144
145                                 /* would we bust the size limit? */
146                                 if (state->nr_written &&
147                                     pack_size_limit_cfg &&
148                                     pack_size_limit_cfg < state->offset + written) {
149                                         git_deflate_abort(&s);
150                                         return -1;
151                                 }
152
153                                 hashwrite(state->f, obuf, written);
154                                 state->offset += written;
155                         }
156                         s.next_out = obuf;
157                         s.avail_out = sizeof(obuf);
158                 }
159
160                 switch (status) {
161                 case Z_OK:
162                 case Z_BUF_ERROR:
163                 case Z_STREAM_END:
164                         continue;
165                 default:
166                         die("unexpected deflate failure: %d", status);
167                 }
168         }
169         git_deflate_end(&s);
170         return 0;
171 }
172
173 /* Lazily create backing packfile for the state */
174 static void prepare_to_stream(struct bulk_checkin_state *state,
175                               unsigned flags)
176 {
177         if (!(flags & HASH_WRITE_OBJECT) || state->f)
178                 return;
179
180         state->f = create_tmp_packfile(&state->pack_tmp_name);
181         reset_pack_idx_option(&state->pack_idx_opts);
182
183         /* Pretend we are going to write only one object */
184         state->offset = write_pack_header(state->f, 1);
185         if (!state->offset)
186                 die_errno("unable to write pack header");
187 }
188
189 static int deflate_to_pack(struct bulk_checkin_state *state,
190                            struct object_id *result_oid,
191                            int fd, size_t size,
192                            enum object_type type, const char *path,
193                            unsigned flags)
194 {
195         off_t seekback, already_hashed_to;
196         git_hash_ctx ctx;
197         unsigned char obuf[16384];
198         unsigned header_len;
199         struct hashfile_checkpoint checkpoint;
200         struct pack_idx_entry *idx = NULL;
201
202         seekback = lseek(fd, 0, SEEK_CUR);
203         if (seekback == (off_t) -1)
204                 return error("cannot find the current offset");
205
206         header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX,
207                                type_name(type), (uintmax_t)size) + 1;
208         the_hash_algo->init_fn(&ctx);
209         the_hash_algo->update_fn(&ctx, obuf, header_len);
210
211         /* Note: idx is non-NULL when we are writing */
212         if ((flags & HASH_WRITE_OBJECT) != 0)
213                 idx = xcalloc(1, sizeof(*idx));
214
215         already_hashed_to = 0;
216
217         while (1) {
218                 prepare_to_stream(state, flags);
219                 if (idx) {
220                         hashfile_checkpoint(state->f, &checkpoint);
221                         idx->offset = state->offset;
222                         crc32_begin(state->f);
223                 }
224                 if (!stream_to_pack(state, &ctx, &already_hashed_to,
225                                     fd, size, type, path, flags))
226                         break;
227                 /*
228                  * Writing this object to the current pack will make
229                  * it too big; we need to truncate it, start a new
230                  * pack, and write into it.
231                  */
232                 if (!idx)
233                         BUG("should not happen");
234                 hashfile_truncate(state->f, &checkpoint);
235                 state->offset = checkpoint.offset;
236                 finish_bulk_checkin(state);
237                 if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
238                         return error("cannot seek back");
239         }
240         the_hash_algo->final_fn(result_oid->hash, &ctx);
241         if (!idx)
242                 return 0;
243
244         idx->crc32 = crc32_end(state->f);
245         if (already_written(state, result_oid)) {
246                 hashfile_truncate(state->f, &checkpoint);
247                 state->offset = checkpoint.offset;
248                 free(idx);
249         } else {
250                 oidcpy(&idx->oid, result_oid);
251                 ALLOC_GROW(state->written,
252                            state->nr_written + 1,
253                            state->alloc_written);
254                 state->written[state->nr_written++] = idx;
255         }
256         return 0;
257 }
258
259 int index_bulk_checkin(struct object_id *oid,
260                        int fd, size_t size, enum object_type type,
261                        const char *path, unsigned flags)
262 {
263         int status = deflate_to_pack(&state, oid, fd, size, type,
264                                      path, flags);
265         if (!state.plugged)
266                 finish_bulk_checkin(&state);
267         return status;
268 }
269
270 void plug_bulk_checkin(void)
271 {
272         state.plugged = 1;
273 }
274
275 void unplug_bulk_checkin(void)
276 {
277         state.plugged = 0;
278         if (state.f)
279                 finish_bulk_checkin(&state);
280 }