1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
|
/* Copyright (c) 2007-2018 Dovecot authors, see the included COPYING file */
/*
Expunging works like:
1. Lock map index by beginning a map sync.
2. Write map UID refcount changes to map index (=> tail != head).
3. Expunge messages from mailbox index.
4. Finish map sync, which updates tail=head and unlocks map index.
If something crashes after 2 but before 4 is finished, tail != head and
reader can do a full resync to figure out what got broken.
*/
#include "lib.h"
#include "array.h"
#include "ioloop.h"
#include "str.h"
#include "mdbox-storage.h"
#include "mdbox-storage-rebuild.h"
#include "mdbox-map.h"
#include "mdbox-file.h"
#include "mdbox-sync.h"
#include "mailbox-recent-flags.h"
/* returns -1 on error, 1 on success, 0 if guid is empty/missing */
static int
dbox_sync_verify_expunge_guid(struct mdbox_sync_context *ctx, uint32_t seq,
const guid_128_t guid_128)
{
const void *data;
uint32_t uid;
mail_index_lookup_uid(ctx->sync_view, seq, &uid);
mail_index_lookup_ext(ctx->sync_view, seq,
ctx->mbox->guid_ext_id, &data, NULL);
if ((data == NULL) || guid_128_is_empty(data))
return 0;
if (guid_128_is_empty(guid_128) ||
memcmp(data, guid_128, GUID_128_SIZE) == 0)
return 1;
mailbox_set_critical(&ctx->mbox->box,
"Expunged GUID mismatch for UID %u: %s vs %s",
uid, guid_128_to_string(data), guid_128_to_string(guid_128));
mdbox_storage_set_corrupted(ctx->mbox->storage);
return -1;
}
static int mdbox_sync_expunge(struct mdbox_sync_context *ctx, uint32_t seq,
const guid_128_t guid_128)
{
uint32_t map_uid;
int ret;
if (seq_range_array_add(&ctx->expunged_seqs, seq)) {
/* already marked as expunged in this sync */
return 0;
}
ret = dbox_sync_verify_expunge_guid(ctx, seq, guid_128);
if (ret <= 0)
return ret;
if (mdbox_mail_lookup(ctx->mbox, ctx->sync_view, seq, &map_uid) < 0)
return -1;
if (mdbox_map_update_refcount(ctx->map_trans, map_uid, -1) < 0)
return -1;
return 0;
}
static int mdbox_sync_rec(struct mdbox_sync_context *ctx,
const struct mail_index_sync_rec *sync_rec)
{
uint32_t seq, seq1, seq2;
if (sync_rec->type != MAIL_INDEX_SYNC_TYPE_EXPUNGE) {
/* not interested */
return 0;
}
if (!mail_index_lookup_seq_range(ctx->sync_view,
sync_rec->uid1, sync_rec->uid2,
&seq1, &seq2)) {
/* already expunged everything. nothing to do. */
return 0;
}
for (seq = seq1; seq <= seq2; seq++) {
if (mdbox_sync_expunge(ctx, seq, sync_rec->guid_128) < 0)
return -1;
}
return 0;
}
static int dbox_sync_mark_expunges(struct mdbox_sync_context *ctx)
{
enum mail_index_transaction_flags flags =
MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL;
struct mailbox *box = &ctx->mbox->box;
struct mail_index_transaction *trans;
struct seq_range_iter iter;
unsigned int n;
const void *data;
uint32_t seq, uid;
/* use a separate transaction here so that we can commit the changes
during map transaction */
trans = mail_index_transaction_begin(ctx->sync_view, flags);
seq_range_array_iter_init(&iter, &ctx->expunged_seqs); n = 0;
while (seq_range_array_iter_nth(&iter, n++, &seq)) {
mail_index_lookup_uid(ctx->sync_view, seq, &uid);
mail_index_lookup_ext(ctx->sync_view, seq,
ctx->mbox->guid_ext_id, &data, NULL);
if ((data == NULL) || guid_128_is_empty(data))
mail_index_expunge(trans, seq);
else
mail_index_expunge_guid(trans, seq, data);
}
if (mail_index_transaction_commit(&trans) < 0)
return -1;
box->tmp_sync_view = ctx->sync_view;
seq_range_array_iter_init(&iter, &ctx->expunged_seqs); n = 0;
while (seq_range_array_iter_nth(&iter, n++, &seq)) {
mail_index_lookup_uid(ctx->sync_view, seq, &uid);
mailbox_sync_notify(box, uid, MAILBOX_SYNC_TYPE_EXPUNGE);
}
box->tmp_sync_view = NULL;
return 0;
}
static int mdbox_sync_index(struct mdbox_sync_context *ctx)
{
struct mailbox *box = &ctx->mbox->box;
const struct mail_index_header *hdr;
struct mail_index_sync_rec sync_rec;
uint32_t seq1, seq2;
int ret = 0;
hdr = mail_index_get_header(ctx->sync_view);
if (hdr->uid_validity == 0) {
/* newly created index file */
if (hdr->next_uid == 1) {
/* could be just a race condition where we opened the
mailbox between mkdir and index creation. fix this
silently. */
if (mdbox_mailbox_create_indexes(box, NULL, ctx->trans) < 0)
return -1;
return 1;
}
mailbox_set_critical(box, "Broken index: missing UIDVALIDITY");
return 0;
}
/* mark the newly seen messages as recent */
if (mail_index_lookup_seq_range(ctx->sync_view, hdr->first_recent_uid,
hdr->next_uid, &seq1, &seq2)) {
mailbox_recent_flags_set_seqs(&ctx->mbox->box, ctx->sync_view,
seq1, seq2);
}
/* handle syncing records without map being locked. */
if (mdbox_map_atomic_is_locked(ctx->atomic)) {
ctx->map_trans = mdbox_map_transaction_begin(ctx->atomic, FALSE);
i_array_init(&ctx->expunged_seqs, 64);
}
while (mail_index_sync_next(ctx->index_sync_ctx, &sync_rec)) {
if ((ret = mdbox_sync_rec(ctx, &sync_rec)) < 0)
break;
}
/* write refcount changes to map index. transaction commit updates the
log head, while tail is left behind. */
if (mdbox_map_atomic_is_locked(ctx->atomic)) {
if (ret == 0)
ret = mdbox_map_transaction_commit(ctx->map_trans, "mdbox syncing");
/* write changes to mailbox index */
if (ret == 0)
ret = dbox_sync_mark_expunges(ctx);
/* finish the map changes and unlock the map. this also updates
map's tail -> head. */
if (ret < 0)
mdbox_map_atomic_set_failed(ctx->atomic);
mdbox_map_transaction_free(&ctx->map_trans);
ctx->expunged_count = seq_range_count(&ctx->expunged_seqs);
array_free(&ctx->expunged_seqs);
}
mailbox_sync_notify(box, 0, 0);
return ret == 0 ? 1 :
(ctx->mbox->storage->corrupted ? 0 : -1);
}
static int mdbox_sync_try_begin(struct mdbox_sync_context *ctx,
enum mail_index_sync_flags sync_flags)
{
struct mdbox_mailbox *mbox = ctx->mbox;
int ret;
ret = index_storage_expunged_sync_begin(&mbox->box, &ctx->index_sync_ctx,
&ctx->sync_view, &ctx->trans, sync_flags);
if (mail_index_reset_fscked(mbox->box.index))
mdbox_storage_set_corrupted(mbox->storage);
if (ret <= 0)
return ret; /* error / nothing to do */
if (!mdbox_map_atomic_is_locked(ctx->atomic) &&
mail_index_sync_has_expunges(ctx->index_sync_ctx)) {
/* we have expunges, so we need to write to map.
it needs to be locked before mailbox index. */
mail_index_sync_set_reason(ctx->index_sync_ctx, "mdbox expunge check");
mail_index_sync_rollback(&ctx->index_sync_ctx);
index_storage_expunging_deinit(&ctx->mbox->box);
if (mdbox_map_atomic_lock(ctx->atomic, "mdbox syncing with expunges") < 0)
return -1;
return mdbox_sync_try_begin(ctx, sync_flags);
}
return 1;
}
int mdbox_sync_begin(struct mdbox_mailbox *mbox, enum mdbox_sync_flags flags,
struct mdbox_map_atomic_context *atomic,
struct mdbox_sync_context **ctx_r)
{
const struct mail_index_header *hdr =
mail_index_get_header(mbox->box.view);
struct mdbox_sync_context *ctx;
const char *reason;
enum mail_index_sync_flags sync_flags;
int ret;
bool rebuild, storage_rebuilt = FALSE;
*ctx_r = NULL;
/* avoid race conditions with mailbox creation, don't check for dbox
headers until syncing has locked the mailbox */
rebuild = mbox->storage->corrupted ||
(hdr->flags & MAIL_INDEX_HDR_FLAG_FSCKD) != 0 ||
mdbox_map_is_fscked(mbox->storage->map) ||
(flags & MDBOX_SYNC_FLAG_FORCE_REBUILD) != 0;
if (rebuild && (flags & MDBOX_SYNC_FLAG_NO_REBUILD) == 0) {
if (mdbox_storage_rebuild_in_context(mbox->storage, atomic) < 0)
return -1;
mailbox_recent_flags_reset(&mbox->box);
storage_rebuilt = TRUE;
}
ctx = i_new(struct mdbox_sync_context, 1);
ctx->mbox = mbox;
ctx->flags = flags;
ctx->atomic = atomic;
sync_flags = index_storage_get_sync_flags(&mbox->box);
if (!rebuild && (flags & MDBOX_SYNC_FLAG_FORCE) == 0)
sync_flags |= MAIL_INDEX_SYNC_FLAG_REQUIRE_CHANGES;
if ((flags & MDBOX_SYNC_FLAG_FSYNC) != 0)
sync_flags |= MAIL_INDEX_SYNC_FLAG_FSYNC;
/* don't write unnecessary dirty flag updates */
sync_flags |= MAIL_INDEX_SYNC_FLAG_AVOID_FLAG_UPDATES;
ret = mdbox_sync_try_begin(ctx, sync_flags);
if (ret <= 0) {
/* failed / nothing to do */
index_storage_expunging_deinit(&mbox->box);
i_free(ctx);
return ret;
}
if ((ret = mdbox_sync_index(ctx)) <= 0) {
mail_index_sync_set_reason(ctx->index_sync_ctx,
ret < 0 ? "mdbox syncing failed" :
"mdbox syncing found corruption");
mail_index_sync_rollback(&ctx->index_sync_ctx);
index_storage_expunging_deinit(&mbox->box);
i_free_and_null(ctx);
if (ret < 0)
return -1;
/* corrupted */
if (storage_rebuilt) {
mailbox_set_critical(&mbox->box,
"mdbox: Storage keeps breaking");
return -1;
}
/* we'll need to rebuild storage.
try again from the beginning. */
mdbox_storage_set_corrupted(mbox->storage);
if ((flags & MDBOX_SYNC_FLAG_NO_REBUILD) != 0) {
mailbox_set_critical(&mbox->box,
"mdbox: Can't rebuild storage");
return -1;
}
return mdbox_sync_begin(mbox, flags, atomic, ctx_r);
}
index_storage_expunging_deinit(&mbox->box);
if (!mdbox_map_atomic_is_locked(ctx->atomic))
reason = "mdbox synced";
else {
/* may be 0 msgs, but that still informs that the map
was locked */
reason = t_strdup_printf("mdbox synced - %u msgs expunged",
ctx->expunged_count);
}
mail_index_sync_set_reason(ctx->index_sync_ctx, reason);
*ctx_r = ctx;
return 0;
}
int mdbox_sync_finish(struct mdbox_sync_context **_ctx, bool success)
{
struct mdbox_sync_context *ctx = *_ctx;
struct mail_storage *storage = &ctx->mbox->storage->storage.storage;
int ret = success ? 0 : -1;
*_ctx = NULL;
if (success) {
if (mail_index_sync_commit(&ctx->index_sync_ctx) < 0) {
mailbox_set_index_error(&ctx->mbox->box);
ret = -1;
}
} else {
mail_index_sync_rollback(&ctx->index_sync_ctx);
}
if (storage->rebuild_list_index)
ret = mail_storage_list_index_rebuild_and_set_uncorrupted(storage);
i_free(ctx);
return ret;
}
int mdbox_sync(struct mdbox_mailbox *mbox, enum mdbox_sync_flags flags)
{
struct mdbox_sync_context *sync_ctx;
struct mdbox_map_atomic_context *atomic;
int ret;
atomic = mdbox_map_atomic_begin(mbox->storage->map);
ret = mdbox_sync_begin(mbox, flags, atomic, &sync_ctx);
if (ret == 0 && sync_ctx != NULL)
ret = mdbox_sync_finish(&sync_ctx, TRUE);
if (ret == 0)
mdbox_map_atomic_set_success(atomic);
if (mdbox_map_atomic_finish(&atomic) < 0)
ret = -1;
return ret;
}
struct mailbox_sync_context *
mdbox_storage_sync_init(struct mailbox *box, enum mailbox_sync_flags flags)
{
struct mdbox_mailbox *mbox = MDBOX_MAILBOX(box);
enum mdbox_sync_flags mdbox_sync_flags = 0;
int ret = 0;
if (mail_index_reset_fscked(box->index))
mdbox_storage_set_corrupted(mbox->storage);
if (index_mailbox_want_full_sync(&mbox->box, flags) ||
mbox->storage->corrupted) {
if ((flags & MAILBOX_SYNC_FLAG_FORCE_RESYNC) != 0)
mdbox_sync_flags |= MDBOX_SYNC_FLAG_FORCE_REBUILD;
ret = mdbox_sync(mbox, mdbox_sync_flags);
}
return index_mailbox_sync_init(box, flags, ret < 0);
}
|