aboutsummaryrefslogtreecommitdiffstats
path: root/reftable/writer.c
diff options
context:
space:
mode:
Diffstat (limited to 'reftable/writer.c')
-rw-r--r--reftable/writer.c103
1 files changed, 60 insertions, 43 deletions
diff --git a/reftable/writer.c b/reftable/writer.c
index f3ab1035d6..3b4ebdd6dc 100644
--- a/reftable/writer.c
+++ b/reftable/writer.c
@@ -1,10 +1,10 @@
/*
-Copyright 2020 Google LLC
-
-Use of this source code is governed by a BSD-style
-license that can be found in the LICENSE file or at
-https://developers.google.com/open-source/licenses/bsd
-*/
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style
+ * license that can be found in the LICENSE file or at
+ * https://developers.google.com/open-source/licenses/bsd
+ */
#include "writer.h"
@@ -57,8 +57,10 @@ static int padded_write(struct reftable_writer *w, uint8_t *data, size_t len,
return -1;
n = w->write(w->write_arg, zeroed, w->pending_padding);
- if (n < 0)
+ if (n < 0) {
+ reftable_free(zeroed);
return n;
+ }
w->pending_padding = 0;
reftable_free(zeroed);
@@ -99,9 +101,9 @@ static int writer_write_header(struct reftable_writer *w, uint8_t *dest)
dest[4] = writer_version(w);
- put_be24(dest + 5, w->opts.block_size);
- put_be64(dest + 8, w->min_update_index);
- put_be64(dest + 16, w->max_update_index);
+ reftable_put_be24(dest + 5, w->opts.block_size);
+ reftable_put_be64(dest + 8, w->min_update_index);
+ reftable_put_be64(dest + 16, w->max_update_index);
if (writer_version(w) == 2) {
uint32_t hash_id;
@@ -116,7 +118,7 @@ static int writer_write_header(struct reftable_writer *w, uint8_t *dest)
return -1;
}
- put_be32(dest + 24, hash_id);
+ reftable_put_be32(dest + 24, hash_id);
}
return header_size(writer_version(w));
@@ -158,7 +160,7 @@ int reftable_writer_new(struct reftable_writer **out,
opts = *_opts;
options_set_defaults(&opts);
if (opts.block_size >= (1 << 24))
- BUG("configured block size exceeds 16MB");
+ return REFTABLE_API_ERROR;
reftable_buf_init(&wp->block_writer_data.last_key);
reftable_buf_init(&wp->last_key);
@@ -172,7 +174,7 @@ int reftable_writer_new(struct reftable_writer **out,
wp->write_arg = writer_arg;
wp->opts = opts;
wp->flush = flush_func;
- writer_reinit_block_writer(wp, BLOCK_TYPE_REF);
+ writer_reinit_block_writer(wp, REFTABLE_BLOCK_TYPE_REF);
*out = wp;
@@ -256,8 +258,10 @@ static int writer_index_hash(struct reftable_writer *w, struct reftable_buf *has
reftable_buf_reset(&key->hash);
err = reftable_buf_add(&key->hash, hash->buf, hash->len);
- if (err < 0)
+ if (err < 0) {
+ reftable_free(key);
return err;
+ }
tree_insert(&w->obj_index_tree, key,
&obj_index_tree_node_compare);
} else {
@@ -302,19 +306,19 @@ static int writer_add_record(struct reftable_writer *w,
}
if (block_writer_type(w->block_writer) != reftable_record_type(rec))
- BUG("record of type %d added to writer of type %d",
- reftable_record_type(rec), block_writer_type(w->block_writer));
+ return REFTABLE_API_ERROR;
/*
* Try to add the record to the writer. If this succeeds then we're
* done. Otherwise the block writer may have hit the block size limit
* and needs to be flushed.
*/
- if (!block_writer_add(w->block_writer, rec)) {
- err = 0;
+ err = block_writer_add(w->block_writer, rec);
+ if (err == 0)
goto done;
- }
+ if (err != REFTABLE_ENTRY_TOO_BIG_ERROR)
+ goto done;
/*
* The current block is full, so we need to flush and reinitialize the
* writer to start writing the next block.
@@ -329,16 +333,10 @@ static int writer_add_record(struct reftable_writer *w,
/*
* Try to add the record to the writer again. If this still fails then
* the record does not fit into the block size.
- *
- * TODO: it would be great to have `block_writer_add()` return proper
- * error codes so that we don't have to second-guess the failure
- * mode here.
*/
err = block_writer_add(w->block_writer, rec);
- if (err) {
- err = REFTABLE_ENTRY_TOO_BIG_ERROR;
+ if (err)
goto done;
- }
done:
return err;
@@ -348,7 +346,7 @@ int reftable_writer_add_ref(struct reftable_writer *w,
struct reftable_ref_record *ref)
{
struct reftable_record rec = {
- .type = BLOCK_TYPE_REF,
+ .type = REFTABLE_BLOCK_TYPE_REF,
.u = {
.ref = *ref
},
@@ -412,13 +410,13 @@ static int reftable_writer_add_log_verbatim(struct reftable_writer *w,
struct reftable_log_record *log)
{
struct reftable_record rec = {
- .type = BLOCK_TYPE_LOG,
+ .type = REFTABLE_BLOCK_TYPE_LOG,
.u = {
.log = *log,
},
};
if (w->block_writer &&
- block_writer_type(w->block_writer) == BLOCK_TYPE_REF) {
+ block_writer_type(w->block_writer) == REFTABLE_BLOCK_TYPE_REF) {
int err = writer_finish_public_section(w);
if (err < 0)
return err;
@@ -538,7 +536,7 @@ static int writer_finish_section(struct reftable_writer *w)
max_level++;
index_start = w->next;
- err = writer_reinit_block_writer(w, BLOCK_TYPE_INDEX);
+ err = writer_reinit_block_writer(w, REFTABLE_BLOCK_TYPE_INDEX);
if (err < 0)
return err;
@@ -550,7 +548,7 @@ static int writer_finish_section(struct reftable_writer *w)
w->index_cap = 0;
for (i = 0; i < idx_len; i++) {
struct reftable_record rec = {
- .type = BLOCK_TYPE_INDEX,
+ .type = REFTABLE_BLOCK_TYPE_INDEX,
.u = {
.idx = idx[i],
},
@@ -615,7 +613,7 @@ static void write_object_record(void *void_arg, void *key)
struct write_record_arg *arg = void_arg;
struct obj_index_tree_node *entry = key;
struct reftable_record
- rec = { .type = BLOCK_TYPE_OBJ,
+ rec = { .type = REFTABLE_BLOCK_TYPE_OBJ,
.u.obj = {
.hash_prefix = (uint8_t *)entry->hash.buf,
.hash_prefix_len = arg->w->stats.object_id_len,
@@ -625,22 +623,41 @@ static void write_object_record(void *void_arg, void *key)
if (arg->err < 0)
goto done;
+ /*
+ * Try to add the record to the writer. If this succeeds then we're
+ * done. Otherwise the block writer may have hit the block size limit
+ * and needs to be flushed.
+ */
arg->err = block_writer_add(arg->w->block_writer, &rec);
if (arg->err == 0)
goto done;
+ if (arg->err != REFTABLE_ENTRY_TOO_BIG_ERROR)
+ goto done;
+
+ /*
+ * The current block is full, so we need to flush and reinitialize the
+ * writer to start writing the next block.
+ */
arg->err = writer_flush_block(arg->w);
if (arg->err < 0)
goto done;
- arg->err = writer_reinit_block_writer(arg->w, BLOCK_TYPE_OBJ);
+ arg->err = writer_reinit_block_writer(arg->w, REFTABLE_BLOCK_TYPE_OBJ);
if (arg->err < 0)
goto done;
+ /*
+ * If this still fails then we may need to reset record's offset
+ * length to reduce the data size to be written.
+ */
arg->err = block_writer_add(arg->w->block_writer, &rec);
if (arg->err == 0)
goto done;
+ if (arg->err != REFTABLE_ENTRY_TOO_BIG_ERROR)
+ goto done;
+
rec.u.obj.offset_len = 0;
arg->err = block_writer_add(arg->w->block_writer, &rec);
@@ -650,7 +667,7 @@ static void write_object_record(void *void_arg, void *key)
done:;
}
-static void object_record_free(void *void_arg UNUSED, void *key)
+static void object_record_free(void *void_arg REFTABLE_UNUSED, void *key)
{
struct obj_index_tree_node *entry = key;
@@ -671,7 +688,7 @@ static int writer_dump_object_index(struct reftable_writer *w)
infix_walk(w->obj_index_tree, &update_common, &common);
w->stats.object_id_len = common.max + 1;
- err = writer_reinit_block_writer(w, BLOCK_TYPE_OBJ);
+ err = writer_reinit_block_writer(w, REFTABLE_BLOCK_TYPE_OBJ);
if (err < 0)
return err;
@@ -695,7 +712,7 @@ static int writer_finish_public_section(struct reftable_writer *w)
err = writer_finish_section(w);
if (err < 0)
return err;
- if (typ == BLOCK_TYPE_REF && !w->opts.skip_index_objects &&
+ if (typ == REFTABLE_BLOCK_TYPE_REF && !w->opts.skip_index_objects &&
w->stats.ref_stats.index_blocks > 0) {
err = writer_dump_object_index(w);
if (err < 0)
@@ -731,19 +748,19 @@ int reftable_writer_close(struct reftable_writer *w)
}
p += writer_write_header(w, footer);
- put_be64(p, w->stats.ref_stats.index_offset);
+ reftable_put_be64(p, w->stats.ref_stats.index_offset);
p += 8;
- put_be64(p, (w->stats.obj_stats.offset) << 5 | w->stats.object_id_len);
+ reftable_put_be64(p, (w->stats.obj_stats.offset) << 5 | w->stats.object_id_len);
p += 8;
- put_be64(p, w->stats.obj_stats.index_offset);
+ reftable_put_be64(p, w->stats.obj_stats.index_offset);
p += 8;
- put_be64(p, w->stats.log_stats.offset);
+ reftable_put_be64(p, w->stats.log_stats.offset);
p += 8;
- put_be64(p, w->stats.log_stats.index_offset);
+ reftable_put_be64(p, w->stats.log_stats.index_offset);
p += 8;
- put_be32(p, crc32(0, footer, p - footer));
+ reftable_put_be32(p, crc32(0, footer, p - footer));
p += 4;
err = w->flush(w->write_arg);
@@ -800,7 +817,7 @@ static int writer_flush_nonempty_block(struct reftable_writer *w)
* By default, all records except for log records are padded to the
* block size.
*/
- if (!w->opts.unpadded && typ != BLOCK_TYPE_LOG)
+ if (!w->opts.unpadded && typ != REFTABLE_BLOCK_TYPE_LOG)
padding = w->opts.block_size - raw_bytes;
bstats = writer_reftable_block_stats(w, typ);