summaryrefslogtreecommitdiffstats
path: root/src/lib-index
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/lib-index/Makefile.am150
-rw-r--r--src/lib-index/Makefile.in1285
-rw-r--r--src/lib-index/mail-cache-decisions.c238
-rw-r--r--src/lib-index/mail-cache-fields.c660
-rw-r--r--src/lib-index/mail-cache-lookup.c694
-rw-r--r--src/lib-index/mail-cache-private.h421
-rw-r--r--src/lib-index/mail-cache-purge.c707
-rw-r--r--src/lib-index/mail-cache-sync-update.c68
-rw-r--r--src/lib-index/mail-cache-transaction.c929
-rw-r--r--src/lib-index/mail-cache.c1005
-rw-r--r--src/lib-index/mail-cache.h193
-rw-r--r--src/lib-index/mail-index-alloc-cache.c315
-rw-r--r--src/lib-index/mail-index-alloc-cache.h20
-rw-r--r--src/lib-index/mail-index-dummy-view.c47
-rw-r--r--src/lib-index/mail-index-fsck.c495
-rw-r--r--src/lib-index/mail-index-lock.c63
-rw-r--r--src/lib-index/mail-index-map-hdr.c359
-rw-r--r--src/lib-index/mail-index-map-read.c519
-rw-r--r--src/lib-index/mail-index-map.c595
-rw-r--r--src/lib-index/mail-index-modseq.c733
-rw-r--r--src/lib-index/mail-index-modseq.h66
-rw-r--r--src/lib-index/mail-index-private.h437
-rw-r--r--src/lib-index/mail-index-strmap.c1259
-rw-r--r--src/lib-index/mail-index-strmap.h81
-rw-r--r--src/lib-index/mail-index-sync-ext.c735
-rw-r--r--src/lib-index/mail-index-sync-keywords.c347
-rw-r--r--src/lib-index/mail-index-sync-private.h104
-rw-r--r--src/lib-index/mail-index-sync-update.c1087
-rw-r--r--src/lib-index/mail-index-sync.c1062
-rw-r--r--src/lib-index/mail-index-transaction-export.c533
-rw-r--r--src/lib-index/mail-index-transaction-finish.c350
-rw-r--r--src/lib-index/mail-index-transaction-private.h165
-rw-r--r--src/lib-index/mail-index-transaction-sort-appends.c184
-rw-r--r--src/lib-index/mail-index-transaction-update.c1367
-rw-r--r--src/lib-index/mail-index-transaction-view.c534
-rw-r--r--src/lib-index/mail-index-transaction.c360
-rw-r--r--src/lib-index/mail-index-util.c138
-rw-r--r--src/lib-index/mail-index-util.h22
-rw-r--r--src/lib-index/mail-index-view-private.h120
-rw-r--r--src/lib-index/mail-index-view-sync.c1045
-rw-r--r--src/lib-index/mail-index-view.c651
-rw-r--r--src/lib-index/mail-index-write.c215
-rw-r--r--src/lib-index/mail-index.c1110
-rw-r--r--src/lib-index/mail-index.h817
-rw-r--r--src/lib-index/mail-transaction-log-append.c256
-rw-r--r--src/lib-index/mail-transaction-log-file.c1685
-rw-r--r--src/lib-index/mail-transaction-log-modseq.c298
-rw-r--r--src/lib-index/mail-transaction-log-private.h199
-rw-r--r--src/lib-index/mail-transaction-log-view-private.h33
-rw-r--r--src/lib-index/mail-transaction-log-view.c909
-rw-r--r--src/lib-index/mail-transaction-log.c664
-rw-r--r--src/lib-index/mail-transaction-log.h494
-rw-r--r--src/lib-index/mailbox-log.c292
-rw-r--r--src/lib-index/mailbox-log.h44
-rw-r--r--src/lib-index/test-mail-cache-common.c166
-rw-r--r--src/lib-index/test-mail-cache-fields.c112
-rw-r--r--src/lib-index/test-mail-cache-purge.c1076
-rw-r--r--src/lib-index/test-mail-cache.c764
-rw-r--r--src/lib-index/test-mail-cache.h32
-rw-r--r--src/lib-index/test-mail-index-map.c57
-rw-r--r--src/lib-index/test-mail-index-modseq.c77
-rw-r--r--src/lib-index/test-mail-index-sync-ext.c86
-rw-r--r--src/lib-index/test-mail-index-transaction-finish.c297
-rw-r--r--src/lib-index/test-mail-index-transaction-update.c683
-rw-r--r--src/lib-index/test-mail-index-write.c151
-rw-r--r--src/lib-index/test-mail-index.c169
-rw-r--r--src/lib-index/test-mail-index.h51
-rw-r--r--src/lib-index/test-mail-transaction-log-append.c176
-rw-r--r--src/lib-index/test-mail-transaction-log-file.c418
-rw-r--r--src/lib-index/test-mail-transaction-log-view.c268
70 files changed, 31742 insertions, 0 deletions
diff --git a/src/lib-index/Makefile.am b/src/lib-index/Makefile.am
new file mode 100644
index 0000000..f203201
--- /dev/null
+++ b/src/lib-index/Makefile.am
@@ -0,0 +1,150 @@
+noinst_LTLIBRARIES = libindex.la
+
+AM_CPPFLAGS = \
+ -I$(top_srcdir)/src/lib \
+ -I$(top_srcdir)/src/lib-test \
+ -I$(top_srcdir)/src/lib-mail
+
+libindex_la_SOURCES = \
+ mail-cache.c \
+ mail-cache-decisions.c \
+ mail-cache-fields.c \
+ mail-cache-lookup.c \
+ mail-cache-purge.c \
+ mail-cache-transaction.c \
+ mail-cache-sync-update.c \
+ mail-index.c \
+ mail-index-alloc-cache.c \
+ mail-index-dummy-view.c \
+ mail-index-fsck.c \
+ mail-index-lock.c \
+ mail-index-map.c \
+ mail-index-map-hdr.c \
+ mail-index-map-read.c \
+ mail-index-modseq.c \
+ mail-index-transaction.c \
+ mail-index-transaction-export.c \
+ mail-index-transaction-finish.c \
+ mail-index-transaction-sort-appends.c \
+ mail-index-transaction-update.c \
+ mail-index-transaction-view.c \
+ mail-index-strmap.c \
+ mail-index-sync.c \
+ mail-index-sync-ext.c \
+ mail-index-sync-keywords.c \
+ mail-index-sync-update.c \
+ mail-index-util.c \
+ mail-index-view.c \
+ mail-index-view-sync.c \
+ mail-index-write.c \
+ mail-transaction-log.c \
+ mail-transaction-log-append.c \
+ mail-transaction-log-file.c \
+ mail-transaction-log-modseq.c \
+ mail-transaction-log-view.c \
+ mailbox-log.c
+
+headers = \
+ mail-cache.h \
+ mail-cache-private.h \
+ mail-index.h \
+ mail-index-alloc-cache.h \
+ mail-index-modseq.h \
+ mail-index-private.h \
+ mail-index-strmap.h \
+ mail-index-sync-private.h \
+ mail-index-transaction-private.h \
+ mail-index-util.h \
+ mail-index-view-private.h \
+ mail-transaction-log.h \
+ mail-transaction-log-private.h \
+ mail-transaction-log-view-private.h \
+ mailbox-log.h
+
+test_programs = \
+ test-mail-cache \
+ test-mail-cache-fields \
+ test-mail-cache-purge \
+ test-mail-index \
+ test-mail-index-map \
+ test-mail-index-modseq \
+ test-mail-index-sync-ext \
+ test-mail-index-transaction-finish \
+ test-mail-index-transaction-update \
+ test-mail-index-write \
+ test-mail-transaction-log-append \
+ test-mail-transaction-log-file \
+ test-mail-transaction-log-view
+
+noinst_PROGRAMS = $(test_programs)
+
+test_libs = \
+ mail-index-util.lo \
+ ../lib-test/libtest.la \
+ ../lib/liblib.la
+
+test_deps = $(noinst_LTLIBRARIES) $(test_libs)
+
+test_mail_cache_SOURCES = test-mail-cache-common.c test-mail-cache.c
+test_mail_cache_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_cache_DEPENDENCIES = $(test_deps)
+
+test_mail_cache_fields_SOURCES = test-mail-cache-common.c test-mail-cache-fields.c
+test_mail_cache_fields_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_cache_fields_DEPENDENCIES = $(test_deps)
+
+test_mail_cache_purge_SOURCES = test-mail-cache-common.c test-mail-cache-purge.c
+test_mail_cache_purge_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_cache_purge_DEPENDENCIES = $(test_deps)
+
+test_mail_index_SOURCES = test-mail-index.c
+test_mail_index_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_index_DEPENDENCIES = $(test_deps)
+
+test_mail_index_map_SOURCES = test-mail-index-map.c
+test_mail_index_map_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_index_map_DEPENDENCIES = $(test_deps)
+
+test_mail_index_modseq_SOURCES = test-mail-index-modseq.c
+test_mail_index_modseq_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_index_modseq_DEPENDENCIES = $(test_deps)
+
+test_mail_index_sync_ext_SOURCES = test-mail-index-sync-ext.c
+test_mail_index_sync_ext_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_index_sync_ext_DEPENDENCIES = $(test_deps)
+
+test_mail_index_transaction_finish_SOURCES = test-mail-index-transaction-finish.c
+test_mail_index_transaction_finish_LDADD = mail-index-transaction-finish.lo $(test_libs)
+test_mail_index_transaction_finish_DEPENDENCIES = $(test_deps)
+
+test_mail_index_transaction_update_SOURCES = test-mail-index-transaction-update.c
+test_mail_index_transaction_update_LDADD = mail-index-transaction-update.lo $(test_libs)
+test_mail_index_transaction_update_DEPENDENCIES = $(test_deps)
+
+test_mail_index_write_SOURCES = test-mail-index-write.c
+test_mail_index_write_LDADD = mail-index-write.lo $(test_libs)
+test_mail_index_write_DEPENDENCIES = $(test_deps)
+
+test_mail_transaction_log_append_SOURCES = test-mail-transaction-log-append.c
+test_mail_transaction_log_append_LDADD = mail-transaction-log-append.lo $(test_libs)
+test_mail_transaction_log_append_DEPENDENCIES = $(test_deps)
+
+test_mail_transaction_log_file_SOURCES = test-mail-transaction-log-file.c
+test_mail_transaction_log_file_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_transaction_log_file_DEPENDENCIES = $(test_deps)
+
+test_mail_transaction_log_view_SOURCES = test-mail-transaction-log-view.c
+test_mail_transaction_log_view_LDADD = mail-transaction-log-view.lo $(test_libs)
+test_mail_transaction_log_view_DEPENDENCIES = $(test_deps)
+
+check-local:
+ for bin in $(test_programs); do \
+ if ! $(RUN_TEST) ./$$bin; then exit 1; fi; \
+ done
+
+pkginc_libdir=$(pkgincludedir)
+pkginc_lib_HEADERS = $(headers)
+
+noinst_HEADERS = \
+ test-mail-cache.h \
+ test-mail-index.h
diff --git a/src/lib-index/Makefile.in b/src/lib-index/Makefile.in
new file mode 100644
index 0000000..a25cb29
--- /dev/null
+++ b/src/lib-index/Makefile.in
@@ -0,0 +1,1285 @@
+# Makefile.in generated by automake 1.16.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2018 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+noinst_PROGRAMS = $(am__EXEEXT_1)
+subdir = src/lib-index
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/ac_checktype2.m4 \
+ $(top_srcdir)/m4/ac_typeof.m4 $(top_srcdir)/m4/arc4random.m4 \
+ $(top_srcdir)/m4/blockdev.m4 $(top_srcdir)/m4/c99_vsnprintf.m4 \
+ $(top_srcdir)/m4/clock_gettime.m4 $(top_srcdir)/m4/crypt.m4 \
+ $(top_srcdir)/m4/crypt_xpg6.m4 $(top_srcdir)/m4/dbqlk.m4 \
+ $(top_srcdir)/m4/dirent_dtype.m4 $(top_srcdir)/m4/dovecot.m4 \
+ $(top_srcdir)/m4/fd_passing.m4 $(top_srcdir)/m4/fdatasync.m4 \
+ $(top_srcdir)/m4/flexible_array_member.m4 \
+ $(top_srcdir)/m4/glibc.m4 $(top_srcdir)/m4/gmtime_max.m4 \
+ $(top_srcdir)/m4/gmtime_tm_gmtoff.m4 \
+ $(top_srcdir)/m4/ioloop.m4 $(top_srcdir)/m4/iovec.m4 \
+ $(top_srcdir)/m4/ipv6.m4 $(top_srcdir)/m4/libcap.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/libwrap.m4 \
+ $(top_srcdir)/m4/linux_mremap.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/mmap_write.m4 \
+ $(top_srcdir)/m4/mntctl.m4 $(top_srcdir)/m4/modules.m4 \
+ $(top_srcdir)/m4/notify.m4 $(top_srcdir)/m4/nsl.m4 \
+ $(top_srcdir)/m4/off_t_max.m4 $(top_srcdir)/m4/pkg.m4 \
+ $(top_srcdir)/m4/pr_set_dumpable.m4 \
+ $(top_srcdir)/m4/q_quotactl.m4 $(top_srcdir)/m4/quota.m4 \
+ $(top_srcdir)/m4/random.m4 $(top_srcdir)/m4/rlimit.m4 \
+ $(top_srcdir)/m4/sendfile.m4 $(top_srcdir)/m4/size_t_signed.m4 \
+ $(top_srcdir)/m4/sockpeercred.m4 $(top_srcdir)/m4/sql.m4 \
+ $(top_srcdir)/m4/ssl.m4 $(top_srcdir)/m4/st_tim.m4 \
+ $(top_srcdir)/m4/static_array.m4 $(top_srcdir)/m4/test_with.m4 \
+ $(top_srcdir)/m4/time_t.m4 $(top_srcdir)/m4/typeof.m4 \
+ $(top_srcdir)/m4/typeof_dev_t.m4 \
+ $(top_srcdir)/m4/uoff_t_max.m4 $(top_srcdir)/m4/vararg.m4 \
+ $(top_srcdir)/m4/want_apparmor.m4 \
+ $(top_srcdir)/m4/want_bsdauth.m4 \
+ $(top_srcdir)/m4/want_bzlib.m4 \
+ $(top_srcdir)/m4/want_cassandra.m4 \
+ $(top_srcdir)/m4/want_cdb.m4 \
+ $(top_srcdir)/m4/want_checkpassword.m4 \
+ $(top_srcdir)/m4/want_clucene.m4 $(top_srcdir)/m4/want_db.m4 \
+ $(top_srcdir)/m4/want_gssapi.m4 $(top_srcdir)/m4/want_icu.m4 \
+ $(top_srcdir)/m4/want_ldap.m4 $(top_srcdir)/m4/want_lua.m4 \
+ $(top_srcdir)/m4/want_lz4.m4 $(top_srcdir)/m4/want_lzma.m4 \
+ $(top_srcdir)/m4/want_mysql.m4 $(top_srcdir)/m4/want_pam.m4 \
+ $(top_srcdir)/m4/want_passwd.m4 $(top_srcdir)/m4/want_pgsql.m4 \
+ $(top_srcdir)/m4/want_prefetch.m4 \
+ $(top_srcdir)/m4/want_shadow.m4 \
+ $(top_srcdir)/m4/want_sodium.m4 $(top_srcdir)/m4/want_solr.m4 \
+ $(top_srcdir)/m4/want_sqlite.m4 \
+ $(top_srcdir)/m4/want_stemmer.m4 \
+ $(top_srcdir)/m4/want_systemd.m4 \
+ $(top_srcdir)/m4/want_textcat.m4 \
+ $(top_srcdir)/m4/want_unwind.m4 $(top_srcdir)/m4/want_zlib.m4 \
+ $(top_srcdir)/m4/want_zstd.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(noinst_HEADERS) \
+ $(pkginc_lib_HEADERS) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__EXEEXT_1 = test-mail-cache$(EXEEXT) \
+ test-mail-cache-fields$(EXEEXT) test-mail-cache-purge$(EXEEXT) \
+ test-mail-index$(EXEEXT) test-mail-index-map$(EXEEXT) \
+ test-mail-index-modseq$(EXEEXT) \
+ test-mail-index-sync-ext$(EXEEXT) \
+ test-mail-index-transaction-finish$(EXEEXT) \
+ test-mail-index-transaction-update$(EXEEXT) \
+ test-mail-index-write$(EXEEXT) \
+ test-mail-transaction-log-append$(EXEEXT) \
+ test-mail-transaction-log-file$(EXEEXT) \
+ test-mail-transaction-log-view$(EXEEXT)
+PROGRAMS = $(noinst_PROGRAMS)
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+libindex_la_LIBADD =
+am_libindex_la_OBJECTS = mail-cache.lo mail-cache-decisions.lo \
+ mail-cache-fields.lo mail-cache-lookup.lo mail-cache-purge.lo \
+ mail-cache-transaction.lo mail-cache-sync-update.lo \
+ mail-index.lo mail-index-alloc-cache.lo \
+ mail-index-dummy-view.lo mail-index-fsck.lo mail-index-lock.lo \
+ mail-index-map.lo mail-index-map-hdr.lo mail-index-map-read.lo \
+ mail-index-modseq.lo mail-index-transaction.lo \
+ mail-index-transaction-export.lo \
+ mail-index-transaction-finish.lo \
+ mail-index-transaction-sort-appends.lo \
+ mail-index-transaction-update.lo \
+ mail-index-transaction-view.lo mail-index-strmap.lo \
+ mail-index-sync.lo mail-index-sync-ext.lo \
+ mail-index-sync-keywords.lo mail-index-sync-update.lo \
+ mail-index-util.lo mail-index-view.lo mail-index-view-sync.lo \
+ mail-index-write.lo mail-transaction-log.lo \
+ mail-transaction-log-append.lo mail-transaction-log-file.lo \
+ mail-transaction-log-modseq.lo mail-transaction-log-view.lo \
+ mailbox-log.lo
+libindex_la_OBJECTS = $(am_libindex_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 =
+am_test_mail_cache_OBJECTS = test-mail-cache-common.$(OBJEXT) \
+ test-mail-cache.$(OBJEXT)
+test_mail_cache_OBJECTS = $(am_test_mail_cache_OBJECTS)
+am_test_mail_cache_fields_OBJECTS = test-mail-cache-common.$(OBJEXT) \
+ test-mail-cache-fields.$(OBJEXT)
+test_mail_cache_fields_OBJECTS = $(am_test_mail_cache_fields_OBJECTS)
+am_test_mail_cache_purge_OBJECTS = test-mail-cache-common.$(OBJEXT) \
+ test-mail-cache-purge.$(OBJEXT)
+test_mail_cache_purge_OBJECTS = $(am_test_mail_cache_purge_OBJECTS)
+am_test_mail_index_OBJECTS = test-mail-index.$(OBJEXT)
+test_mail_index_OBJECTS = $(am_test_mail_index_OBJECTS)
+am_test_mail_index_map_OBJECTS = test-mail-index-map.$(OBJEXT)
+test_mail_index_map_OBJECTS = $(am_test_mail_index_map_OBJECTS)
+am_test_mail_index_modseq_OBJECTS = test-mail-index-modseq.$(OBJEXT)
+test_mail_index_modseq_OBJECTS = $(am_test_mail_index_modseq_OBJECTS)
+am_test_mail_index_sync_ext_OBJECTS = \
+ test-mail-index-sync-ext.$(OBJEXT)
+test_mail_index_sync_ext_OBJECTS = \
+ $(am_test_mail_index_sync_ext_OBJECTS)
+am_test_mail_index_transaction_finish_OBJECTS = \
+ test-mail-index-transaction-finish.$(OBJEXT)
+test_mail_index_transaction_finish_OBJECTS = \
+ $(am_test_mail_index_transaction_finish_OBJECTS)
+am_test_mail_index_transaction_update_OBJECTS = \
+ test-mail-index-transaction-update.$(OBJEXT)
+test_mail_index_transaction_update_OBJECTS = \
+ $(am_test_mail_index_transaction_update_OBJECTS)
+am_test_mail_index_write_OBJECTS = test-mail-index-write.$(OBJEXT)
+test_mail_index_write_OBJECTS = $(am_test_mail_index_write_OBJECTS)
+am_test_mail_transaction_log_append_OBJECTS = \
+ test-mail-transaction-log-append.$(OBJEXT)
+test_mail_transaction_log_append_OBJECTS = \
+ $(am_test_mail_transaction_log_append_OBJECTS)
+am_test_mail_transaction_log_file_OBJECTS = \
+ test-mail-transaction-log-file.$(OBJEXT)
+test_mail_transaction_log_file_OBJECTS = \
+ $(am_test_mail_transaction_log_file_OBJECTS)
+am_test_mail_transaction_log_view_OBJECTS = \
+ test-mail-transaction-log-view.$(OBJEXT)
+test_mail_transaction_log_view_OBJECTS = \
+ $(am_test_mail_transaction_log_view_OBJECTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/depcomp
+am__maybe_remake_depfiles = depfiles
+am__depfiles_remade = ./$(DEPDIR)/mail-cache-decisions.Plo \
+ ./$(DEPDIR)/mail-cache-fields.Plo \
+ ./$(DEPDIR)/mail-cache-lookup.Plo \
+ ./$(DEPDIR)/mail-cache-purge.Plo \
+ ./$(DEPDIR)/mail-cache-sync-update.Plo \
+ ./$(DEPDIR)/mail-cache-transaction.Plo \
+ ./$(DEPDIR)/mail-cache.Plo \
+ ./$(DEPDIR)/mail-index-alloc-cache.Plo \
+ ./$(DEPDIR)/mail-index-dummy-view.Plo \
+ ./$(DEPDIR)/mail-index-fsck.Plo \
+ ./$(DEPDIR)/mail-index-lock.Plo \
+ ./$(DEPDIR)/mail-index-map-hdr.Plo \
+ ./$(DEPDIR)/mail-index-map-read.Plo \
+ ./$(DEPDIR)/mail-index-map.Plo \
+ ./$(DEPDIR)/mail-index-modseq.Plo \
+ ./$(DEPDIR)/mail-index-strmap.Plo \
+ ./$(DEPDIR)/mail-index-sync-ext.Plo \
+ ./$(DEPDIR)/mail-index-sync-keywords.Plo \
+ ./$(DEPDIR)/mail-index-sync-update.Plo \
+ ./$(DEPDIR)/mail-index-sync.Plo \
+ ./$(DEPDIR)/mail-index-transaction-export.Plo \
+ ./$(DEPDIR)/mail-index-transaction-finish.Plo \
+ ./$(DEPDIR)/mail-index-transaction-sort-appends.Plo \
+ ./$(DEPDIR)/mail-index-transaction-update.Plo \
+ ./$(DEPDIR)/mail-index-transaction-view.Plo \
+ ./$(DEPDIR)/mail-index-transaction.Plo \
+ ./$(DEPDIR)/mail-index-util.Plo \
+ ./$(DEPDIR)/mail-index-view-sync.Plo \
+ ./$(DEPDIR)/mail-index-view.Plo \
+ ./$(DEPDIR)/mail-index-write.Plo ./$(DEPDIR)/mail-index.Plo \
+ ./$(DEPDIR)/mail-transaction-log-append.Plo \
+ ./$(DEPDIR)/mail-transaction-log-file.Plo \
+ ./$(DEPDIR)/mail-transaction-log-modseq.Plo \
+ ./$(DEPDIR)/mail-transaction-log-view.Plo \
+ ./$(DEPDIR)/mail-transaction-log.Plo \
+ ./$(DEPDIR)/mailbox-log.Plo \
+ ./$(DEPDIR)/test-mail-cache-common.Po \
+ ./$(DEPDIR)/test-mail-cache-fields.Po \
+ ./$(DEPDIR)/test-mail-cache-purge.Po \
+ ./$(DEPDIR)/test-mail-cache.Po \
+ ./$(DEPDIR)/test-mail-index-map.Po \
+ ./$(DEPDIR)/test-mail-index-modseq.Po \
+ ./$(DEPDIR)/test-mail-index-sync-ext.Po \
+ ./$(DEPDIR)/test-mail-index-transaction-finish.Po \
+ ./$(DEPDIR)/test-mail-index-transaction-update.Po \
+ ./$(DEPDIR)/test-mail-index-write.Po \
+ ./$(DEPDIR)/test-mail-index.Po \
+ ./$(DEPDIR)/test-mail-transaction-log-append.Po \
+ ./$(DEPDIR)/test-mail-transaction-log-file.Po \
+ ./$(DEPDIR)/test-mail-transaction-log-view.Po
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+ $(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo " CC " $@;
+am__v_CC_1 =
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+ $(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo " CCLD " $@;
+am__v_CCLD_1 =
+SOURCES = $(libindex_la_SOURCES) $(test_mail_cache_SOURCES) \
+ $(test_mail_cache_fields_SOURCES) \
+ $(test_mail_cache_purge_SOURCES) $(test_mail_index_SOURCES) \
+ $(test_mail_index_map_SOURCES) \
+ $(test_mail_index_modseq_SOURCES) \
+ $(test_mail_index_sync_ext_SOURCES) \
+ $(test_mail_index_transaction_finish_SOURCES) \
+ $(test_mail_index_transaction_update_SOURCES) \
+ $(test_mail_index_write_SOURCES) \
+ $(test_mail_transaction_log_append_SOURCES) \
+ $(test_mail_transaction_log_file_SOURCES) \
+ $(test_mail_transaction_log_view_SOURCES)
+DIST_SOURCES = $(libindex_la_SOURCES) $(test_mail_cache_SOURCES) \
+ $(test_mail_cache_fields_SOURCES) \
+ $(test_mail_cache_purge_SOURCES) $(test_mail_index_SOURCES) \
+ $(test_mail_index_map_SOURCES) \
+ $(test_mail_index_modseq_SOURCES) \
+ $(test_mail_index_sync_ext_SOURCES) \
+ $(test_mail_index_transaction_finish_SOURCES) \
+ $(test_mail_index_transaction_update_SOURCES) \
+ $(test_mail_index_write_SOURCES) \
+ $(test_mail_transaction_log_append_SOURCES) \
+ $(test_mail_transaction_log_file_SOURCES) \
+ $(test_mail_transaction_log_view_SOURCES)
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pkginc_libdir)"
+HEADERS = $(noinst_HEADERS) $(pkginc_lib_HEADERS)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+ACLOCAL_AMFLAGS = @ACLOCAL_AMFLAGS@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+APPARMOR_LIBS = @APPARMOR_LIBS@
+AR = @AR@
+AUTH_CFLAGS = @AUTH_CFLAGS@
+AUTH_LIBS = @AUTH_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BINARY_CFLAGS = @BINARY_CFLAGS@
+BINARY_LDFLAGS = @BINARY_LDFLAGS@
+BISON = @BISON@
+CASSANDRA_CFLAGS = @CASSANDRA_CFLAGS@
+CASSANDRA_LIBS = @CASSANDRA_LIBS@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CDB_LIBS = @CDB_LIBS@
+CFLAGS = @CFLAGS@
+CLUCENE_CFLAGS = @CLUCENE_CFLAGS@
+CLUCENE_LIBS = @CLUCENE_LIBS@
+COMPRESS_LIBS = @COMPRESS_LIBS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRYPT_LIBS = @CRYPT_LIBS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DICT_LIBS = @DICT_LIBS@
+DLLIB = @DLLIB@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FLEX = @FLEX@
+FUZZER_CPPFLAGS = @FUZZER_CPPFLAGS@
+FUZZER_LDFLAGS = @FUZZER_LDFLAGS@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+KRB5CONFIG = @KRB5CONFIG@
+KRB5_CFLAGS = @KRB5_CFLAGS@
+KRB5_LIBS = @KRB5_LIBS@
+LD = @LD@
+LDAP_LIBS = @LDAP_LIBS@
+LDFLAGS = @LDFLAGS@
+LD_NO_WHOLE_ARCHIVE = @LD_NO_WHOLE_ARCHIVE@
+LD_WHOLE_ARCHIVE = @LD_WHOLE_ARCHIVE@
+LIBCAP = @LIBCAP@
+LIBDOVECOT = @LIBDOVECOT@
+LIBDOVECOT_COMPRESS = @LIBDOVECOT_COMPRESS@
+LIBDOVECOT_DEPS = @LIBDOVECOT_DEPS@
+LIBDOVECOT_DSYNC = @LIBDOVECOT_DSYNC@
+LIBDOVECOT_LA_LIBS = @LIBDOVECOT_LA_LIBS@
+LIBDOVECOT_LDA = @LIBDOVECOT_LDA@
+LIBDOVECOT_LDAP = @LIBDOVECOT_LDAP@
+LIBDOVECOT_LIBFTS = @LIBDOVECOT_LIBFTS@
+LIBDOVECOT_LIBFTS_DEPS = @LIBDOVECOT_LIBFTS_DEPS@
+LIBDOVECOT_LOGIN = @LIBDOVECOT_LOGIN@
+LIBDOVECOT_LUA = @LIBDOVECOT_LUA@
+LIBDOVECOT_LUA_DEPS = @LIBDOVECOT_LUA_DEPS@
+LIBDOVECOT_SQL = @LIBDOVECOT_SQL@
+LIBDOVECOT_STORAGE = @LIBDOVECOT_STORAGE@
+LIBDOVECOT_STORAGE_DEPS = @LIBDOVECOT_STORAGE_DEPS@
+LIBEXTTEXTCAT_CFLAGS = @LIBEXTTEXTCAT_CFLAGS@
+LIBEXTTEXTCAT_LIBS = @LIBEXTTEXTCAT_LIBS@
+LIBICONV = @LIBICONV@
+LIBICU_CFLAGS = @LIBICU_CFLAGS@
+LIBICU_LIBS = @LIBICU_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSODIUM_CFLAGS = @LIBSODIUM_CFLAGS@
+LIBSODIUM_LIBS = @LIBSODIUM_LIBS@
+LIBTIRPC_CFLAGS = @LIBTIRPC_CFLAGS@
+LIBTIRPC_LIBS = @LIBTIRPC_LIBS@
+LIBTOOL = @LIBTOOL@
+LIBUNWIND_CFLAGS = @LIBUNWIND_CFLAGS@
+LIBUNWIND_LIBS = @LIBUNWIND_LIBS@
+LIBWRAP_LIBS = @LIBWRAP_LIBS@
+LINKED_STORAGE_LDADD = @LINKED_STORAGE_LDADD@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBICONV = @LTLIBICONV@
+LTLIBOBJS = @LTLIBOBJS@
+LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@
+LUA_CFLAGS = @LUA_CFLAGS@
+LUA_LIBS = @LUA_LIBS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MODULE_LIBS = @MODULE_LIBS@
+MODULE_SUFFIX = @MODULE_SUFFIX@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_CONFIG = @MYSQL_CONFIG@
+MYSQL_LIBS = @MYSQL_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NOPLUGIN_LDFLAGS = @NOPLUGIN_LDFLAGS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PANDOC = @PANDOC@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PG_CONFIG = @PG_CONFIG@
+PIE_CFLAGS = @PIE_CFLAGS@
+PIE_LDFLAGS = @PIE_LDFLAGS@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+QUOTA_LIBS = @QUOTA_LIBS@
+RANLIB = @RANLIB@
+RELRO_LDFLAGS = @RELRO_LDFLAGS@
+RPCGEN = @RPCGEN@
+RUN_TEST = @RUN_TEST@
+SED = @SED@
+SETTING_FILES = @SETTING_FILES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SQLITE_CFLAGS = @SQLITE_CFLAGS@
+SQLITE_LIBS = @SQLITE_LIBS@
+SQL_CFLAGS = @SQL_CFLAGS@
+SQL_LIBS = @SQL_LIBS@
+SSL_CFLAGS = @SSL_CFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SYSTEMD_CFLAGS = @SYSTEMD_CFLAGS@
+SYSTEMD_LIBS = @SYSTEMD_LIBS@
+VALGRIND = @VALGRIND@
+VERSION = @VERSION@
+ZSTD_CFLAGS = @ZSTD_CFLAGS@
+ZSTD_LIBS = @ZSTD_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+dict_drivers = @dict_drivers@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+moduledir = @moduledir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+rundir = @rundir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+sql_drivers = @sql_drivers@
+srcdir = @srcdir@
+ssldir = @ssldir@
+statedir = @statedir@
+sysconfdir = @sysconfdir@
+systemdservicetype = @systemdservicetype@
+systemdsystemunitdir = @systemdsystemunitdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+noinst_LTLIBRARIES = libindex.la
+AM_CPPFLAGS = \
+ -I$(top_srcdir)/src/lib \
+ -I$(top_srcdir)/src/lib-test \
+ -I$(top_srcdir)/src/lib-mail
+
+libindex_la_SOURCES = \
+ mail-cache.c \
+ mail-cache-decisions.c \
+ mail-cache-fields.c \
+ mail-cache-lookup.c \
+ mail-cache-purge.c \
+ mail-cache-transaction.c \
+ mail-cache-sync-update.c \
+ mail-index.c \
+ mail-index-alloc-cache.c \
+ mail-index-dummy-view.c \
+ mail-index-fsck.c \
+ mail-index-lock.c \
+ mail-index-map.c \
+ mail-index-map-hdr.c \
+ mail-index-map-read.c \
+ mail-index-modseq.c \
+ mail-index-transaction.c \
+ mail-index-transaction-export.c \
+ mail-index-transaction-finish.c \
+ mail-index-transaction-sort-appends.c \
+ mail-index-transaction-update.c \
+ mail-index-transaction-view.c \
+ mail-index-strmap.c \
+ mail-index-sync.c \
+ mail-index-sync-ext.c \
+ mail-index-sync-keywords.c \
+ mail-index-sync-update.c \
+ mail-index-util.c \
+ mail-index-view.c \
+ mail-index-view-sync.c \
+ mail-index-write.c \
+ mail-transaction-log.c \
+ mail-transaction-log-append.c \
+ mail-transaction-log-file.c \
+ mail-transaction-log-modseq.c \
+ mail-transaction-log-view.c \
+ mailbox-log.c
+
+headers = \
+ mail-cache.h \
+ mail-cache-private.h \
+ mail-index.h \
+ mail-index-alloc-cache.h \
+ mail-index-modseq.h \
+ mail-index-private.h \
+ mail-index-strmap.h \
+ mail-index-sync-private.h \
+ mail-index-transaction-private.h \
+ mail-index-util.h \
+ mail-index-view-private.h \
+ mail-transaction-log.h \
+ mail-transaction-log-private.h \
+ mail-transaction-log-view-private.h \
+ mailbox-log.h
+
+test_programs = \
+ test-mail-cache \
+ test-mail-cache-fields \
+ test-mail-cache-purge \
+ test-mail-index \
+ test-mail-index-map \
+ test-mail-index-modseq \
+ test-mail-index-sync-ext \
+ test-mail-index-transaction-finish \
+ test-mail-index-transaction-update \
+ test-mail-index-write \
+ test-mail-transaction-log-append \
+ test-mail-transaction-log-file \
+ test-mail-transaction-log-view
+
+test_libs = \
+ mail-index-util.lo \
+ ../lib-test/libtest.la \
+ ../lib/liblib.la
+
+test_deps = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_cache_SOURCES = test-mail-cache-common.c test-mail-cache.c
+test_mail_cache_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_cache_DEPENDENCIES = $(test_deps)
+test_mail_cache_fields_SOURCES = test-mail-cache-common.c test-mail-cache-fields.c
+test_mail_cache_fields_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_cache_fields_DEPENDENCIES = $(test_deps)
+test_mail_cache_purge_SOURCES = test-mail-cache-common.c test-mail-cache-purge.c
+test_mail_cache_purge_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_cache_purge_DEPENDENCIES = $(test_deps)
+test_mail_index_SOURCES = test-mail-index.c
+test_mail_index_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_index_DEPENDENCIES = $(test_deps)
+test_mail_index_map_SOURCES = test-mail-index-map.c
+test_mail_index_map_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_index_map_DEPENDENCIES = $(test_deps)
+test_mail_index_modseq_SOURCES = test-mail-index-modseq.c
+test_mail_index_modseq_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_index_modseq_DEPENDENCIES = $(test_deps)
+test_mail_index_sync_ext_SOURCES = test-mail-index-sync-ext.c
+test_mail_index_sync_ext_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_index_sync_ext_DEPENDENCIES = $(test_deps)
+test_mail_index_transaction_finish_SOURCES = test-mail-index-transaction-finish.c
+test_mail_index_transaction_finish_LDADD = mail-index-transaction-finish.lo $(test_libs)
+test_mail_index_transaction_finish_DEPENDENCIES = $(test_deps)
+test_mail_index_transaction_update_SOURCES = test-mail-index-transaction-update.c
+test_mail_index_transaction_update_LDADD = mail-index-transaction-update.lo $(test_libs)
+test_mail_index_transaction_update_DEPENDENCIES = $(test_deps)
+test_mail_index_write_SOURCES = test-mail-index-write.c
+test_mail_index_write_LDADD = mail-index-write.lo $(test_libs)
+test_mail_index_write_DEPENDENCIES = $(test_deps)
+test_mail_transaction_log_append_SOURCES = test-mail-transaction-log-append.c
+test_mail_transaction_log_append_LDADD = mail-transaction-log-append.lo $(test_libs)
+test_mail_transaction_log_append_DEPENDENCIES = $(test_deps)
+test_mail_transaction_log_file_SOURCES = test-mail-transaction-log-file.c
+test_mail_transaction_log_file_LDADD = $(noinst_LTLIBRARIES) $(test_libs)
+test_mail_transaction_log_file_DEPENDENCIES = $(test_deps)
+test_mail_transaction_log_view_SOURCES = test-mail-transaction-log-view.c
+test_mail_transaction_log_view_LDADD = mail-transaction-log-view.lo $(test_libs)
+test_mail_transaction_log_view_DEPENDENCIES = $(test_deps)
+pkginc_libdir = $(pkgincludedir)
+pkginc_lib_HEADERS = $(headers)
+noinst_HEADERS = \
+ test-mail-cache.h \
+ test-mail-index.h
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/lib-index/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign src/lib-index/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstPROGRAMS:
+ @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \
+ echo " rm -f" $$list; \
+ rm -f $$list || exit $$?; \
+ test -n "$(EXEEXT)" || exit 0; \
+ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+ echo " rm -f" $$list; \
+ rm -f $$list
+
+clean-noinstLTLIBRARIES:
+ -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+ @list='$(noinst_LTLIBRARIES)'; \
+ locs=`for p in $$list; do echo $$p; done | \
+ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+ sort -u`; \
+ test -z "$$locs" || { \
+ echo rm -f $${locs}; \
+ rm -f $${locs}; \
+ }
+
+libindex.la: $(libindex_la_OBJECTS) $(libindex_la_DEPENDENCIES) $(EXTRA_libindex_la_DEPENDENCIES)
+ $(AM_V_CCLD)$(LINK) $(libindex_la_OBJECTS) $(libindex_la_LIBADD) $(LIBS)
+
+test-mail-cache$(EXEEXT): $(test_mail_cache_OBJECTS) $(test_mail_cache_DEPENDENCIES) $(EXTRA_test_mail_cache_DEPENDENCIES)
+ @rm -f test-mail-cache$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(test_mail_cache_OBJECTS) $(test_mail_cache_LDADD) $(LIBS)
+
+test-mail-cache-fields$(EXEEXT): $(test_mail_cache_fields_OBJECTS) $(test_mail_cache_fields_DEPENDENCIES) $(EXTRA_test_mail_cache_fields_DEPENDENCIES)
+ @rm -f test-mail-cache-fields$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(test_mail_cache_fields_OBJECTS) $(test_mail_cache_fields_LDADD) $(LIBS)
+
+test-mail-cache-purge$(EXEEXT): $(test_mail_cache_purge_OBJECTS) $(test_mail_cache_purge_DEPENDENCIES) $(EXTRA_test_mail_cache_purge_DEPENDENCIES)
+ @rm -f test-mail-cache-purge$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(test_mail_cache_purge_OBJECTS) $(test_mail_cache_purge_LDADD) $(LIBS)
+
+test-mail-index$(EXEEXT): $(test_mail_index_OBJECTS) $(test_mail_index_DEPENDENCIES) $(EXTRA_test_mail_index_DEPENDENCIES)
+ @rm -f test-mail-index$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(test_mail_index_OBJECTS) $(test_mail_index_LDADD) $(LIBS)
+
+test-mail-index-map$(EXEEXT): $(test_mail_index_map_OBJECTS) $(test_mail_index_map_DEPENDENCIES) $(EXTRA_test_mail_index_map_DEPENDENCIES)
+ @rm -f test-mail-index-map$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(test_mail_index_map_OBJECTS) $(test_mail_index_map_LDADD) $(LIBS)
+
+test-mail-index-modseq$(EXEEXT): $(test_mail_index_modseq_OBJECTS) $(test_mail_index_modseq_DEPENDENCIES) $(EXTRA_test_mail_index_modseq_DEPENDENCIES)
+ @rm -f test-mail-index-modseq$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(test_mail_index_modseq_OBJECTS) $(test_mail_index_modseq_LDADD) $(LIBS)
+
+test-mail-index-sync-ext$(EXEEXT): $(test_mail_index_sync_ext_OBJECTS) $(test_mail_index_sync_ext_DEPENDENCIES) $(EXTRA_test_mail_index_sync_ext_DEPENDENCIES)
+ @rm -f test-mail-index-sync-ext$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(test_mail_index_sync_ext_OBJECTS) $(test_mail_index_sync_ext_LDADD) $(LIBS)
+
+test-mail-index-transaction-finish$(EXEEXT): $(test_mail_index_transaction_finish_OBJECTS) $(test_mail_index_transaction_finish_DEPENDENCIES) $(EXTRA_test_mail_index_transaction_finish_DEPENDENCIES)
+ @rm -f test-mail-index-transaction-finish$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(test_mail_index_transaction_finish_OBJECTS) $(test_mail_index_transaction_finish_LDADD) $(LIBS)
+
+test-mail-index-transaction-update$(EXEEXT): $(test_mail_index_transaction_update_OBJECTS) $(test_mail_index_transaction_update_DEPENDENCIES) $(EXTRA_test_mail_index_transaction_update_DEPENDENCIES)
+ @rm -f test-mail-index-transaction-update$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(test_mail_index_transaction_update_OBJECTS) $(test_mail_index_transaction_update_LDADD) $(LIBS)
+
+test-mail-index-write$(EXEEXT): $(test_mail_index_write_OBJECTS) $(test_mail_index_write_DEPENDENCIES) $(EXTRA_test_mail_index_write_DEPENDENCIES)
+ @rm -f test-mail-index-write$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(test_mail_index_write_OBJECTS) $(test_mail_index_write_LDADD) $(LIBS)
+
+test-mail-transaction-log-append$(EXEEXT): $(test_mail_transaction_log_append_OBJECTS) $(test_mail_transaction_log_append_DEPENDENCIES) $(EXTRA_test_mail_transaction_log_append_DEPENDENCIES)
+ @rm -f test-mail-transaction-log-append$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(test_mail_transaction_log_append_OBJECTS) $(test_mail_transaction_log_append_LDADD) $(LIBS)
+
+test-mail-transaction-log-file$(EXEEXT): $(test_mail_transaction_log_file_OBJECTS) $(test_mail_transaction_log_file_DEPENDENCIES) $(EXTRA_test_mail_transaction_log_file_DEPENDENCIES)
+ @rm -f test-mail-transaction-log-file$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(test_mail_transaction_log_file_OBJECTS) $(test_mail_transaction_log_file_LDADD) $(LIBS)
+
+test-mail-transaction-log-view$(EXEEXT): $(test_mail_transaction_log_view_OBJECTS) $(test_mail_transaction_log_view_DEPENDENCIES) $(EXTRA_test_mail_transaction_log_view_DEPENDENCIES)
+ @rm -f test-mail-transaction-log-view$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(test_mail_transaction_log_view_OBJECTS) $(test_mail_transaction_log_view_LDADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-cache-decisions.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-cache-fields.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-cache-lookup.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-cache-purge.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-cache-sync-update.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-cache-transaction.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-cache.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-alloc-cache.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-dummy-view.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-fsck.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-lock.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-map-hdr.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-map-read.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-map.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-modseq.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-strmap.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-sync-ext.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-sync-keywords.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-sync-update.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-sync.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-transaction-export.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-transaction-finish.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-transaction-sort-appends.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-transaction-update.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-transaction-view.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-transaction.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-util.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-view-sync.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-view.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index-write.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-index.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-transaction-log-append.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-transaction-log-file.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-transaction-log-modseq.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-transaction-log-view.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mail-transaction-log.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mailbox-log.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-cache-common.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-cache-fields.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-cache-purge.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-cache.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-index-map.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-index-modseq.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-index-sync-ext.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-index-transaction-finish.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-index-transaction-update.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-index-write.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-index.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-transaction-log-append.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-transaction-log-file.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-mail-transaction-log-view.Po@am__quote@ # am--include-marker
+
+$(am__depfiles_remade):
+ @$(MKDIR_P) $(@D)
+ @echo '# dummy' >$@-t && $(am__mv) $@-t $@
+
+am--depfiles: $(am__depfiles_remade)
+
+.c.o:
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+install-pkginc_libHEADERS: $(pkginc_lib_HEADERS)
+ @$(NORMAL_INSTALL)
+ @list='$(pkginc_lib_HEADERS)'; test -n "$(pkginc_libdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pkginc_libdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pkginc_libdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(pkginc_libdir)'"; \
+ $(INSTALL_HEADER) $$files "$(DESTDIR)$(pkginc_libdir)" || exit $$?; \
+ done
+
+uninstall-pkginc_libHEADERS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(pkginc_lib_HEADERS)'; test -n "$(pkginc_libdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pkginc_libdir)'; $(am__uninstall_files_from_dir)
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(BUILT_SOURCES)
+ $(MAKE) $(AM_MAKEFLAGS) distdir-am
+
+distdir-am: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+ $(MAKE) $(AM_MAKEFLAGS) check-local
+check: check-am
+all-am: Makefile $(PROGRAMS) $(LTLIBRARIES) $(HEADERS)
+installdirs:
+ for dir in "$(DESTDIR)$(pkginc_libdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+ clean-noinstPROGRAMS mostlyclean-am
+
+distclean: distclean-am
+ -rm -f ./$(DEPDIR)/mail-cache-decisions.Plo
+ -rm -f ./$(DEPDIR)/mail-cache-fields.Plo
+ -rm -f ./$(DEPDIR)/mail-cache-lookup.Plo
+ -rm -f ./$(DEPDIR)/mail-cache-purge.Plo
+ -rm -f ./$(DEPDIR)/mail-cache-sync-update.Plo
+ -rm -f ./$(DEPDIR)/mail-cache-transaction.Plo
+ -rm -f ./$(DEPDIR)/mail-cache.Plo
+ -rm -f ./$(DEPDIR)/mail-index-alloc-cache.Plo
+ -rm -f ./$(DEPDIR)/mail-index-dummy-view.Plo
+ -rm -f ./$(DEPDIR)/mail-index-fsck.Plo
+ -rm -f ./$(DEPDIR)/mail-index-lock.Plo
+ -rm -f ./$(DEPDIR)/mail-index-map-hdr.Plo
+ -rm -f ./$(DEPDIR)/mail-index-map-read.Plo
+ -rm -f ./$(DEPDIR)/mail-index-map.Plo
+ -rm -f ./$(DEPDIR)/mail-index-modseq.Plo
+ -rm -f ./$(DEPDIR)/mail-index-strmap.Plo
+ -rm -f ./$(DEPDIR)/mail-index-sync-ext.Plo
+ -rm -f ./$(DEPDIR)/mail-index-sync-keywords.Plo
+ -rm -f ./$(DEPDIR)/mail-index-sync-update.Plo
+ -rm -f ./$(DEPDIR)/mail-index-sync.Plo
+ -rm -f ./$(DEPDIR)/mail-index-transaction-export.Plo
+ -rm -f ./$(DEPDIR)/mail-index-transaction-finish.Plo
+ -rm -f ./$(DEPDIR)/mail-index-transaction-sort-appends.Plo
+ -rm -f ./$(DEPDIR)/mail-index-transaction-update.Plo
+ -rm -f ./$(DEPDIR)/mail-index-transaction-view.Plo
+ -rm -f ./$(DEPDIR)/mail-index-transaction.Plo
+ -rm -f ./$(DEPDIR)/mail-index-util.Plo
+ -rm -f ./$(DEPDIR)/mail-index-view-sync.Plo
+ -rm -f ./$(DEPDIR)/mail-index-view.Plo
+ -rm -f ./$(DEPDIR)/mail-index-write.Plo
+ -rm -f ./$(DEPDIR)/mail-index.Plo
+ -rm -f ./$(DEPDIR)/mail-transaction-log-append.Plo
+ -rm -f ./$(DEPDIR)/mail-transaction-log-file.Plo
+ -rm -f ./$(DEPDIR)/mail-transaction-log-modseq.Plo
+ -rm -f ./$(DEPDIR)/mail-transaction-log-view.Plo
+ -rm -f ./$(DEPDIR)/mail-transaction-log.Plo
+ -rm -f ./$(DEPDIR)/mailbox-log.Plo
+ -rm -f ./$(DEPDIR)/test-mail-cache-common.Po
+ -rm -f ./$(DEPDIR)/test-mail-cache-fields.Po
+ -rm -f ./$(DEPDIR)/test-mail-cache-purge.Po
+ -rm -f ./$(DEPDIR)/test-mail-cache.Po
+ -rm -f ./$(DEPDIR)/test-mail-index-map.Po
+ -rm -f ./$(DEPDIR)/test-mail-index-modseq.Po
+ -rm -f ./$(DEPDIR)/test-mail-index-sync-ext.Po
+ -rm -f ./$(DEPDIR)/test-mail-index-transaction-finish.Po
+ -rm -f ./$(DEPDIR)/test-mail-index-transaction-update.Po
+ -rm -f ./$(DEPDIR)/test-mail-index-write.Po
+ -rm -f ./$(DEPDIR)/test-mail-index.Po
+ -rm -f ./$(DEPDIR)/test-mail-transaction-log-append.Po
+ -rm -f ./$(DEPDIR)/test-mail-transaction-log-file.Po
+ -rm -f ./$(DEPDIR)/test-mail-transaction-log-view.Po
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-pkginc_libHEADERS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f ./$(DEPDIR)/mail-cache-decisions.Plo
+ -rm -f ./$(DEPDIR)/mail-cache-fields.Plo
+ -rm -f ./$(DEPDIR)/mail-cache-lookup.Plo
+ -rm -f ./$(DEPDIR)/mail-cache-purge.Plo
+ -rm -f ./$(DEPDIR)/mail-cache-sync-update.Plo
+ -rm -f ./$(DEPDIR)/mail-cache-transaction.Plo
+ -rm -f ./$(DEPDIR)/mail-cache.Plo
+ -rm -f ./$(DEPDIR)/mail-index-alloc-cache.Plo
+ -rm -f ./$(DEPDIR)/mail-index-dummy-view.Plo
+ -rm -f ./$(DEPDIR)/mail-index-fsck.Plo
+ -rm -f ./$(DEPDIR)/mail-index-lock.Plo
+ -rm -f ./$(DEPDIR)/mail-index-map-hdr.Plo
+ -rm -f ./$(DEPDIR)/mail-index-map-read.Plo
+ -rm -f ./$(DEPDIR)/mail-index-map.Plo
+ -rm -f ./$(DEPDIR)/mail-index-modseq.Plo
+ -rm -f ./$(DEPDIR)/mail-index-strmap.Plo
+ -rm -f ./$(DEPDIR)/mail-index-sync-ext.Plo
+ -rm -f ./$(DEPDIR)/mail-index-sync-keywords.Plo
+ -rm -f ./$(DEPDIR)/mail-index-sync-update.Plo
+ -rm -f ./$(DEPDIR)/mail-index-sync.Plo
+ -rm -f ./$(DEPDIR)/mail-index-transaction-export.Plo
+ -rm -f ./$(DEPDIR)/mail-index-transaction-finish.Plo
+ -rm -f ./$(DEPDIR)/mail-index-transaction-sort-appends.Plo
+ -rm -f ./$(DEPDIR)/mail-index-transaction-update.Plo
+ -rm -f ./$(DEPDIR)/mail-index-transaction-view.Plo
+ -rm -f ./$(DEPDIR)/mail-index-transaction.Plo
+ -rm -f ./$(DEPDIR)/mail-index-util.Plo
+ -rm -f ./$(DEPDIR)/mail-index-view-sync.Plo
+ -rm -f ./$(DEPDIR)/mail-index-view.Plo
+ -rm -f ./$(DEPDIR)/mail-index-write.Plo
+ -rm -f ./$(DEPDIR)/mail-index.Plo
+ -rm -f ./$(DEPDIR)/mail-transaction-log-append.Plo
+ -rm -f ./$(DEPDIR)/mail-transaction-log-file.Plo
+ -rm -f ./$(DEPDIR)/mail-transaction-log-modseq.Plo
+ -rm -f ./$(DEPDIR)/mail-transaction-log-view.Plo
+ -rm -f ./$(DEPDIR)/mail-transaction-log.Plo
+ -rm -f ./$(DEPDIR)/mailbox-log.Plo
+ -rm -f ./$(DEPDIR)/test-mail-cache-common.Po
+ -rm -f ./$(DEPDIR)/test-mail-cache-fields.Po
+ -rm -f ./$(DEPDIR)/test-mail-cache-purge.Po
+ -rm -f ./$(DEPDIR)/test-mail-cache.Po
+ -rm -f ./$(DEPDIR)/test-mail-index-map.Po
+ -rm -f ./$(DEPDIR)/test-mail-index-modseq.Po
+ -rm -f ./$(DEPDIR)/test-mail-index-sync-ext.Po
+ -rm -f ./$(DEPDIR)/test-mail-index-transaction-finish.Po
+ -rm -f ./$(DEPDIR)/test-mail-index-transaction-update.Po
+ -rm -f ./$(DEPDIR)/test-mail-index-write.Po
+ -rm -f ./$(DEPDIR)/test-mail-index.Po
+ -rm -f ./$(DEPDIR)/test-mail-transaction-log-append.Po
+ -rm -f ./$(DEPDIR)/test-mail-transaction-log-file.Po
+ -rm -f ./$(DEPDIR)/test-mail-transaction-log-view.Po
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkginc_libHEADERS
+
+.MAKE: check-am install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am \
+ check-local clean clean-generic clean-libtool \
+ clean-noinstLTLIBRARIES clean-noinstPROGRAMS cscopelist-am \
+ ctags ctags-am distclean distclean-compile distclean-generic \
+ distclean-libtool distclean-tags distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-pkginc_libHEADERS install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+ pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \
+ uninstall-pkginc_libHEADERS
+
+.PRECIOUS: Makefile
+
+
+check-local:
+ for bin in $(test_programs); do \
+ if ! $(RUN_TEST) ./$$bin; then exit 1; fi; \
+ done
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/lib-index/mail-cache-decisions.c b/src/lib-index/mail-cache-decisions.c
new file mode 100644
index 0000000..93435d0
--- /dev/null
+++ b/src/lib-index/mail-cache-decisions.c
@@ -0,0 +1,238 @@
+/* Copyright (c) 2004-2018 Dovecot authors, see the included COPYING file */
+
+/*
+ IMAP clients can work in many different ways. There are basically 2
+ types:
+
+ 1. Online clients that ask for the same information multiple times (e.g.
+ webmails, Pine)
+
+ 2. Offline clients that usually download first some of the interesting
+ message headers and only after that the message bodies (possibly
+ automatically, or possibly only when the user opens the mail). Most
+ non-webmail IMAP clients behave like this.
+
+ Cache file is extremely helpful with the type 1 clients. The first time
+ that client requests message headers or some other metadata they're
+ stored into the cache file. The second time they ask for the same
+ information Dovecot can now get it quickly from the cache file instead
+ of opening the message and parsing the headers.
+
+ For type 2 clients the cache file is also somewhat helpful if client
+ fetches any initial metadata. Some of the information is helpful in any
+ case, for example it's required to know the message's virtual size when
+ downloading the message with IMAP. Without the virtual size being in cache
+ Dovecot first has to read the whole message first to calculate it, which
+ increases CPU usage.
+
+ Only the specified fields that client(s) have asked for earlier are
+ stored into cache file. This allows Dovecot to be adaptive to different
+ clients' needs and still not waste disk space (and cause extra disk
+ I/O!) for fields that client never needs.
+
+ Dovecot can cache fields either permanently or temporarily. Temporarily
+ cached fields are dropped from the cache file after about a week.
+ Dovecot uses two rules to determine when data should be cached
+ permanently instead of temporarily:
+
+ 1. Client accessed messages in non-sequential order within this session.
+ This most likely means it doesn't have a local cache.
+
+ 2. Client accessed a message older than one week.
+
+ These rules might not always work optimally, so Dovecot also re-evaluates
+ the caching decisions once in a while:
+
+ - When caching decision is YES (permanently cache the field), the field's
+ last_used is updated only when the caching decision has been verified to
+ be correct.
+
+ - When caching decision is TEMP, the last_used is updated whenever the field
+ is accessed.
+
+ - When last_used becomes 30 days old (or unaccessed_field_drop_secs) a
+ YES caching decision is changed to TEMP.
+
+ - When last_used becomes 60 days old (or 2*unaccessed_field_drop_secs) a
+ TEMP caching decision is changed to NO.
+*/
+
+#include "lib.h"
+#include "ioloop.h"
+#include "mail-cache-private.h"
+
+const char *mail_cache_decision_to_string(enum mail_cache_decision_type dec)
+{
+ switch (dec & ENUM_NEGATE(MAIL_CACHE_DECISION_FORCED)) {
+ case MAIL_CACHE_DECISION_NO:
+ return "no";
+ case MAIL_CACHE_DECISION_TEMP:
+ return "temp";
+ case MAIL_CACHE_DECISION_YES:
+ return "yes";
+ }
+ i_unreached();
+}
+
+struct event_passthrough *
+mail_cache_decision_changed_event(struct mail_cache *cache, struct event *event,
+ unsigned int field)
+{
+ return event_create_passthrough(event)->
+ set_name("mail_cache_decision_changed")->
+ add_str("field", cache->fields[field].field.name)->
+ add_int("last_used", cache->fields[field].field.last_used);
+}
+
+static void
+mail_cache_update_last_used(struct mail_cache *cache, unsigned int field)
+{
+ cache->fields[field].field.last_used = (uint32_t)ioloop_time;
+ if (cache->field_file_map[field] != (uint32_t)-1)
+ cache->field_header_write_pending = TRUE;
+}
+
+void mail_cache_decision_state_update(struct mail_cache_view *view,
+ uint32_t seq, unsigned int field)
+{
+ struct mail_cache *cache = view->cache;
+ enum mail_cache_decision_type dec;
+ const struct mail_index_header *hdr;
+ uint32_t uid;
+
+ i_assert(field < cache->fields_count);
+
+ if (view->no_decision_updates)
+ return;
+
+ dec = cache->fields[field].field.decision;
+ if (dec == (MAIL_CACHE_DECISION_NO | MAIL_CACHE_DECISION_FORCED)) {
+ /* don't update last_used */
+ return;
+ }
+
+ /* update last_used about once a day */
+ bool last_used_need_update =
+ ioloop_time - cache->fields[field].field.last_used > 3600*24;
+
+ if (dec == MAIL_CACHE_DECISION_NO ||
+ (dec & MAIL_CACHE_DECISION_FORCED) != 0) {
+ /* a) forced decision
+ b) not cached, mail_cache_decision_add() will handle this */
+ if (last_used_need_update)
+ mail_cache_update_last_used(cache, field);
+ return;
+ }
+ if (dec == MAIL_CACHE_DECISION_YES) {
+ if (!last_used_need_update)
+ return;
+ /* update last_used only when we can confirm that the YES
+ decision is still correct. */
+ } else {
+ /* see if we want to change decision from TEMP to YES */
+ i_assert(dec == MAIL_CACHE_DECISION_TEMP);
+ if (last_used_need_update)
+ mail_cache_update_last_used(cache, field);
+ }
+
+ mail_index_lookup_uid(view->view, seq, &uid);
+ hdr = mail_index_get_header(view->view);
+
+ if (uid >= cache->fields[field].uid_highwater &&
+ uid >= hdr->day_first_uid[7]) {
+ cache->fields[field].uid_highwater = uid;
+ } else if (dec == MAIL_CACHE_DECISION_YES) {
+ /* Confirmed that we still want to preserve YES as cache
+ decision. We can update last_used now. */
+ i_assert(last_used_need_update);
+ mail_cache_update_last_used(cache, field);
+ } else {
+ /* a) nonordered access within this session. if client doesn't
+ request messages in growing order, we assume it doesn't
+ have a permanent local cache.
+ b) accessing message older than one week. assume it's a
+ client with no local cache. if it was just a new client
+ generating the local cache for the first time, we'll
+ drop back to TEMP within few months. */
+ i_assert(dec == MAIL_CACHE_DECISION_TEMP);
+ cache->fields[field].field.decision = MAIL_CACHE_DECISION_YES;
+ cache->fields[field].decision_dirty = TRUE;
+ cache->field_header_write_pending = TRUE;
+
+ const char *reason = uid < hdr->day_first_uid[7] ?
+ "old_mail" : "unordered_access";
+ struct event_passthrough *e =
+ mail_cache_decision_changed_event(
+ view->cache, view->cache->event, field)->
+ add_str("reason", reason)->
+ add_int("uid", uid)->
+ add_str("old_decision", "temp")->
+ add_str("new_decision", "yes");
+ e_debug(e->event(), "Changing field %s decision temp -> yes (uid=%u)",
+ cache->fields[field].field.name, uid);
+ }
+}
+
+void mail_cache_decision_add(struct mail_cache_view *view, uint32_t seq,
+ unsigned int field)
+{
+ struct mail_cache *cache = view->cache;
+ struct mail_cache_field_private *priv;
+ uint32_t uid;
+
+ i_assert(field < cache->fields_count);
+
+ if (view->no_decision_updates)
+ return;
+
+ priv = &cache->fields[field];
+ if (priv->field.decision != MAIL_CACHE_DECISION_NO &&
+ priv->field.last_used != 0) {
+ /* a) forced decision
+ b) we're already caching it, so it just wasn't in cache */
+ return;
+ }
+
+ /* field used the first time */
+ if (priv->field.decision == MAIL_CACHE_DECISION_NO)
+ priv->field.decision = MAIL_CACHE_DECISION_TEMP;
+ priv->field.last_used = ioloop_time;
+ priv->decision_dirty = TRUE;
+ cache->field_header_write_pending = TRUE;
+
+ mail_index_lookup_uid(view->view, seq, &uid);
+ priv->uid_highwater = uid;
+
+ const char *new_decision =
+ mail_cache_decision_to_string(priv->field.decision);
+ struct event_passthrough *e =
+ mail_cache_decision_changed_event(cache, cache->event, field)->
+ add_str("reason", "add")->
+ add_int("uid", uid)->
+ add_str("old_decision", "no")->
+ add_str("new_decision", new_decision);
+ e_debug(e->event(), "Adding field %s to cache for the first time (uid=%u)",
+ priv->field.name, uid);
+}
+
+int mail_cache_decisions_copy(struct mail_cache *src, struct mail_cache *dst)
+{
+ if (mail_cache_open_and_verify(src) < 0)
+ return -1;
+ if (MAIL_CACHE_IS_UNUSABLE(src))
+ return 0; /* no caching decisions */
+
+ unsigned int count = 0;
+ struct mail_cache_field *fields =
+ mail_cache_register_get_list(src, pool_datastack_create(), &count);
+ i_assert(fields != NULL || count == 0);
+ if (count > 0)
+ mail_cache_register_fields(dst, fields, count);
+
+ /* Destination cache isn't expected to exist yet, so use purging
+ to create it. Setting field_header_write_pending also guarantees
+ that the fields are updated even if the cache was already created
+ and no purging was done. */
+ dst->field_header_write_pending = TRUE;
+ return mail_cache_purge(dst, 0, "copy cache decisions");
+}
diff --git a/src/lib-index/mail-cache-fields.c b/src/lib-index/mail-cache-fields.c
new file mode 100644
index 0000000..429e0d2
--- /dev/null
+++ b/src/lib-index/mail-cache-fields.c
@@ -0,0 +1,660 @@
+/* Copyright (c) 2004-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "ioloop.h"
+#include "buffer.h"
+#include "hash.h"
+#include "file-cache.h"
+#include "read-full.h"
+#include "write-full.h"
+#include "mmap-util.h"
+#include "mail-cache-private.h"
+
+#include <stddef.h>
+
+#define CACHE_FIELD_IS_NEWLY_WANTED(cache, field_idx) \
+ ((cache)->field_file_map[field_idx] == (uint32_t)-1 && \
+ (cache)->fields[field_idx].used)
+
+static bool field_has_fixed_size(enum mail_cache_field_type type)
+{
+ switch (type) {
+ case MAIL_CACHE_FIELD_FIXED_SIZE:
+ case MAIL_CACHE_FIELD_BITMASK:
+ return TRUE;
+ case MAIL_CACHE_FIELD_VARIABLE_SIZE:
+ case MAIL_CACHE_FIELD_STRING:
+ case MAIL_CACHE_FIELD_HEADER:
+ return FALSE;
+
+ case MAIL_CACHE_FIELD_COUNT:
+ break;
+ }
+
+ i_unreached();
+ return FALSE;
+}
+
+static bool field_decision_is_valid(enum mail_cache_decision_type type)
+{
+ switch (type & ENUM_NEGATE(MAIL_CACHE_DECISION_FORCED)) {
+ case MAIL_CACHE_DECISION_NO:
+ case MAIL_CACHE_DECISION_TEMP:
+ case MAIL_CACHE_DECISION_YES:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
+
+static int field_type_verify(struct mail_cache *cache, unsigned int idx,
+ enum mail_cache_field_type type, unsigned int size)
+{
+ const struct mail_cache_field *field = &cache->fields[idx].field;
+
+ if (field->type != type) {
+ mail_cache_set_corrupted(cache,
+ "registered field %s type changed", field->name);
+ return -1;
+ }
+ if (field->field_size != size && field_has_fixed_size(type)) {
+ mail_cache_set_corrupted(cache,
+ "registered field %s size changed", field->name);
+ return -1;
+ }
+ return 0;
+}
+
+static void
+mail_cache_field_update(struct mail_cache *cache,
+ const struct mail_cache_field *newfield)
+{
+ struct mail_cache_field_private *orig;
+ bool initial_registering;
+
+ i_assert(newfield->type < MAIL_CACHE_FIELD_COUNT);
+
+ /* are we still doing the initial cache field registering for
+ internal fields and for mail_*cache_fields settings? */
+ initial_registering = cache->file_fields_count == 0;
+
+ orig = &cache->fields[newfield->idx];
+ if ((newfield->decision & MAIL_CACHE_DECISION_FORCED) != 0 ||
+ ((orig->field.decision & MAIL_CACHE_DECISION_FORCED) == 0 &&
+ newfield->decision > orig->field.decision)) {
+ orig->field.decision = newfield->decision;
+ if (!initial_registering)
+ orig->decision_dirty = TRUE;
+ }
+ if (orig->field.last_used < newfield->last_used) {
+ orig->field.last_used = newfield->last_used;
+ if (!initial_registering)
+ orig->decision_dirty = TRUE;
+ }
+ if (orig->decision_dirty)
+ cache->field_header_write_pending = TRUE;
+
+ (void)field_type_verify(cache, newfield->idx,
+ newfield->type, newfield->field_size);
+}
+
+void mail_cache_register_fields(struct mail_cache *cache,
+ struct mail_cache_field *fields,
+ unsigned int fields_count)
+{
+ char *name;
+ void *value;
+ unsigned int new_idx;
+ unsigned int i, j, registered_count;
+
+ new_idx = cache->fields_count;
+ for (i = 0; i < fields_count; i++) {
+ if (hash_table_lookup_full(cache->field_name_hash,
+ fields[i].name, &name, &value)) {
+ fields[i].idx = POINTER_CAST_TO(value, unsigned int);
+ mail_cache_field_update(cache, &fields[i]);
+ continue;
+ }
+
+ /* check if the same header is being registered in the
+ same field array */
+ for (j = 0; j < i; j++) {
+ if (strcasecmp(fields[i].name, fields[j].name) == 0) {
+ fields[i].idx = fields[j].idx;
+ break;
+ }
+ }
+
+ if (j == i)
+ fields[i].idx = new_idx++;
+ }
+
+ if (new_idx == cache->fields_count)
+ return;
+
+ /* @UNSAFE */
+ cache->fields = i_realloc_type(cache->fields,
+ struct mail_cache_field_private,
+ cache->fields_count, new_idx);
+ cache->field_file_map =
+ i_realloc_type(cache->field_file_map, uint32_t,
+ cache->fields_count, new_idx);
+
+ registered_count = cache->fields_count;
+ for (i = 0; i < fields_count; i++) {
+ unsigned int idx = fields[i].idx;
+
+ if (idx < registered_count)
+ continue;
+
+ /* new index - save it */
+ name = p_strdup(cache->field_pool, fields[i].name);
+ cache->fields[idx].field = fields[i];
+ cache->fields[idx].field.name = name;
+ cache->fields[idx].field.last_used = fields[i].last_used;
+ cache->field_file_map[idx] = (uint32_t)-1;
+
+ if (!field_has_fixed_size(cache->fields[idx].field.type))
+ cache->fields[idx].field.field_size = UINT_MAX;
+
+ hash_table_insert(cache->field_name_hash, name,
+ POINTER_CAST(idx));
+ registered_count++;
+ }
+ i_assert(registered_count == new_idx);
+ cache->fields_count = new_idx;
+}
+
+unsigned int
+mail_cache_register_lookup(struct mail_cache *cache, const char *name)
+{
+ char *key;
+ void *value;
+
+ if (hash_table_lookup_full(cache->field_name_hash, name, &key, &value))
+ return POINTER_CAST_TO(value, unsigned int);
+ else
+ return UINT_MAX;
+}
+
+const struct mail_cache_field *
+mail_cache_register_get_field(struct mail_cache *cache, unsigned int field_idx)
+{
+ i_assert(field_idx < cache->fields_count);
+
+ return &cache->fields[field_idx].field;
+}
+
+struct mail_cache_field *
+mail_cache_register_get_list(struct mail_cache *cache, pool_t pool,
+ unsigned int *count_r)
+{
+ struct mail_cache_field *list;
+ unsigned int i;
+
+ if (!cache->opened)
+ (void)mail_cache_open_and_verify(cache);
+
+ list = cache->fields_count == 0 ? NULL :
+ p_new(pool, struct mail_cache_field, cache->fields_count);
+ for (i = 0; i < cache->fields_count; i++) {
+ list[i] = cache->fields[i].field;
+ list[i].name = p_strdup(pool, list[i].name);
+ }
+
+ *count_r = cache->fields_count;
+ return list;
+}
+
+static int
+mail_cache_header_fields_get_offset(struct mail_cache *cache,
+ uint32_t *offset_r,
+ const struct mail_cache_header_fields **field_hdr_r)
+{
+ const struct mail_cache_header_fields *field_hdr;
+ struct mail_cache_header_fields tmp_field_hdr;
+ const void *data;
+ uint32_t offset = 0, next_offset, field_hdr_size;
+ unsigned int next_count = 0;
+ int ret;
+
+ if (MAIL_CACHE_IS_UNUSABLE(cache)) {
+ *offset_r = 0;
+ if (field_hdr_r != NULL)
+ *field_hdr_r = NULL;
+ return 0;
+ }
+
+ /* find the latest header */
+ offset = 0;
+ next_offset = cache->last_field_header_offset != 0 ?
+ cache->last_field_header_offset :
+ mail_index_offset_to_uint32(cache->hdr->field_header_offset);
+ while (next_offset != 0) {
+ if (next_offset == offset) {
+ mail_cache_set_corrupted(cache,
+ "next_offset in field header loops");
+ return -1;
+ }
+ /* In Dovecot v2.2+ we don't try to use any holes,
+ so next_offset must always be larger than current offset.
+ also makes it easier to guarantee there aren't any loops
+ (which we don't bother doing for old files) */
+ if (next_offset < offset && cache->hdr->minor_version != 0) {
+ mail_cache_set_corrupted(cache,
+ "next_offset in field header decreases");
+ return -1;
+ }
+ offset = next_offset;
+
+ if (cache->mmap_base != NULL || cache->map_with_read) {
+ ret = mail_cache_map(cache, offset, sizeof(*field_hdr),
+ &data);
+ if (ret <= 0) {
+ if (ret < 0)
+ return -1;
+ mail_cache_set_corrupted(cache,
+ "header field next_offset points outside file");
+ return -1;
+ }
+ field_hdr = data;
+ } else {
+ /* if we need to follow multiple offsets to get to
+ the last one, it's faster to just pread() the file
+ instead of going through cache */
+ ret = pread_full(cache->fd, &tmp_field_hdr,
+ sizeof(tmp_field_hdr), offset);
+ if (ret < 0) {
+ mail_cache_set_syscall_error(cache, "pread()");
+ return -1;
+ }
+ if (ret == 0) {
+ mail_cache_set_corrupted(cache,
+ "header field next_offset points outside file");
+ return -1;
+ }
+ field_hdr = &tmp_field_hdr;
+ }
+
+ next_offset =
+ mail_index_offset_to_uint32(field_hdr->next_offset);
+ next_count++;
+ }
+
+ if (offset == 0) {
+ mail_cache_set_corrupted(cache, "missing header fields");
+ return -1;
+ }
+ cache->last_field_header_offset = offset;
+
+ if (next_count > cache->index->optimization_set.cache.purge_header_continue_count) {
+ mail_cache_purge_later(cache, t_strdup_printf(
+ "Too many continued headers (%u)", next_count));
+ }
+
+ if (field_hdr_r != NULL) {
+ /* detect corrupted size later */
+ field_hdr_size = I_MAX(field_hdr->size, sizeof(*field_hdr));
+ if (cache->file_cache != NULL) {
+ /* invalidate the cache fields area to make sure we
+ get the latest cache decisions/last_used fields */
+ file_cache_invalidate(cache->file_cache, offset,
+ field_hdr_size);
+ }
+ if (cache->read_buf != NULL)
+ buffer_set_used_size(cache->read_buf, 0);
+ ret = mail_cache_map(cache, offset, field_hdr_size, &data);
+ if (ret < 0)
+ return -1;
+ if (ret == 0) {
+ mail_cache_set_corrupted(cache,
+ "header field size outside file");
+ return -1;
+ }
+ *field_hdr_r = data;
+ }
+ *offset_r = offset;
+ return 0;
+}
+
+int mail_cache_header_fields_read(struct mail_cache *cache)
+{
+ const struct mail_cache_header_fields *field_hdr;
+ struct mail_cache_field field;
+ const uint32_t *last_used, *sizes;
+ const uint8_t *types, *decisions;
+ const char *p, *names, *end;
+ char *orig_key;
+ void *orig_value;
+ unsigned int fidx, new_fields_count;
+ struct mail_cache_purge_drop_ctx drop_ctx;
+ uint32_t offset, i;
+
+ if (mail_cache_header_fields_get_offset(cache, &offset, &field_hdr) < 0)
+ return -1;
+
+ if (offset == 0) {
+ /* no fields - the file is empty */
+ return 0;
+ }
+
+ /* check the fixed size of the header. name[] has to be checked
+ separately */
+ if (field_hdr->fields_count > INT_MAX / MAIL_CACHE_FIELD_NAMES(1) ||
+ field_hdr->size < MAIL_CACHE_FIELD_NAMES(field_hdr->fields_count)) {
+ mail_cache_set_corrupted(cache, "invalid field header size");
+ return -1;
+ }
+
+ new_fields_count = field_hdr->fields_count;
+ if (new_fields_count != 0) {
+ cache->file_field_map =
+ i_realloc_type(cache->file_field_map, unsigned int,
+ cache->file_fields_count, new_fields_count);
+ } else {
+ i_free_and_null(cache->file_field_map);
+ }
+ cache->file_fields_count = new_fields_count;
+
+ last_used = CONST_PTR_OFFSET(field_hdr, MAIL_CACHE_FIELD_LAST_USED());
+ sizes = CONST_PTR_OFFSET(field_hdr,
+ MAIL_CACHE_FIELD_SIZE(field_hdr->fields_count));
+ types = CONST_PTR_OFFSET(field_hdr,
+ MAIL_CACHE_FIELD_TYPE(field_hdr->fields_count));
+ decisions = CONST_PTR_OFFSET(field_hdr,
+ MAIL_CACHE_FIELD_DECISION(field_hdr->fields_count));
+ names = CONST_PTR_OFFSET(field_hdr,
+ MAIL_CACHE_FIELD_NAMES(field_hdr->fields_count));
+ end = CONST_PTR_OFFSET(field_hdr, field_hdr->size);
+ i_assert(names <= end);
+
+ /* clear the old mapping */
+ for (i = 0; i < cache->fields_count; i++)
+ cache->field_file_map[i] = (uint32_t)-1;
+
+ mail_cache_purge_drop_init(cache, &cache->index->map->hdr, &drop_ctx);
+ i_zero(&field);
+ for (i = 0; i < field_hdr->fields_count; i++) {
+ for (p = names; p != end && *p != '\0'; p++) ;
+ if (p == end || *names == '\0') {
+ mail_cache_set_corrupted(cache,
+ "field header names corrupted");
+ return -1;
+ }
+
+ if (types[i] > MAIL_CACHE_FIELD_COUNT) {
+ mail_cache_set_corrupted(cache, "field type corrupted");
+ return -1;
+ }
+ if (!field_decision_is_valid(decisions[i])) {
+ mail_cache_set_corrupted(cache,
+ "field decision type corrupted");
+ return -1;
+ }
+
+ /* ignore any forced-flags in the file */
+ enum mail_cache_decision_type file_dec =
+ decisions[i] & ENUM_NEGATE(MAIL_CACHE_DECISION_FORCED);
+
+ if (hash_table_lookup_full(cache->field_name_hash, names,
+ &orig_key, &orig_value)) {
+ /* already exists, see if decision can be updated */
+ fidx = POINTER_CAST_TO(orig_value, unsigned int);
+ enum mail_cache_decision_type cur_dec =
+ cache->fields[fidx].field.decision;
+ if ((cur_dec & MAIL_CACHE_DECISION_FORCED) != 0) {
+ /* Forced decision. If the decision has
+ changed, update the fields in the file. */
+ if ((cur_dec & ENUM_NEGATE(MAIL_CACHE_DECISION_FORCED)) != file_dec)
+ cache->field_header_write_pending = TRUE;
+ } else if (cache->fields[fidx].decision_dirty) {
+ /* Decisions have recently been updated
+ internally. Don't change them. */
+ } else {
+ /* Use the decision from the cache file. */
+ cache->fields[fidx].field.decision = file_dec;
+ }
+ if (field_type_verify(cache, fidx,
+ types[i], sizes[i]) < 0)
+ return -1;
+ } else {
+ /* field is currently unknown, so just use whatever
+ exists in the file. */
+ field.name = names;
+ field.type = types[i];
+ field.field_size = sizes[i];
+ field.decision = file_dec;
+ mail_cache_register_fields(cache, &field, 1);
+ fidx = field.idx;
+ }
+ if (cache->field_file_map[fidx] != (uint32_t)-1) {
+ mail_cache_set_corrupted(cache,
+ "Duplicated field in header: %s", names);
+ return -1;
+ }
+ cache->fields[fidx].used = TRUE;
+
+ cache->field_file_map[fidx] = i;
+ cache->file_field_map[i] = fidx;
+
+ /* Update last_used if it's newer than ours. Note that the
+ last_used may have been overwritten while we were reading
+ this cache header. In theory this can mean that the
+ last_used field is only half-updated and contains garbage.
+ This practically won't matter, since the worst that can
+ happen is that we trigger a purge earlier than necessary.
+ The purging re-reads the last_used while cache is locked and
+ correctly figures out whether to drop the field. */
+ if ((time_t)last_used[i] > cache->fields[fidx].field.last_used)
+ cache->fields[fidx].field.last_used = last_used[i];
+
+ switch (mail_cache_purge_drop_test(&drop_ctx, fidx)) {
+ case MAIL_CACHE_PURGE_DROP_DECISION_NONE:
+ break;
+ case MAIL_CACHE_PURGE_DROP_DECISION_DROP:
+ mail_cache_purge_later(cache, t_strdup_printf(
+ "Drop old field %s (last_used=%"PRIdTIME_T")",
+ cache->fields[fidx].field.name,
+ cache->fields[fidx].field.last_used));
+ break;
+ case MAIL_CACHE_PURGE_DROP_DECISION_TO_TEMP:
+ /* This cache decision change can cause the field to be
+ dropped for old mails, so do it via purging. */
+ mail_cache_purge_later(cache, t_strdup_printf(
+ "Change cache decision to temp for old field %s "
+ "(last_used=%"PRIdTIME_T")",
+ cache->fields[fidx].field.name,
+ cache->fields[fidx].field.last_used));
+ break;
+ }
+
+ names = p + 1;
+ }
+ return 0;
+}
+
+static void copy_to_buf(struct mail_cache *cache, buffer_t *dest, bool add_new,
+ size_t offset, size_t size)
+{
+ const void *data;
+ unsigned int i, field;
+
+ /* copy the existing fields */
+ for (i = 0; i < cache->file_fields_count; i++) {
+ field = cache->file_field_map[i];
+ data = CONST_PTR_OFFSET(&cache->fields[field], offset);
+ buffer_append(dest, data, size);
+ }
+ if (!add_new)
+ return;
+
+ /* copy newly wanted fields */
+ for (i = 0; i < cache->fields_count; i++) {
+ if (CACHE_FIELD_IS_NEWLY_WANTED(cache, i)) {
+ data = CONST_PTR_OFFSET(&cache->fields[i], offset);
+ buffer_append(dest, data, size);
+ }
+ }
+}
+
+static void copy_to_buf_byte(struct mail_cache *cache, buffer_t *dest,
+ bool add_new, size_t offset)
+{
+ const int *data;
+ unsigned int i, field;
+ uint8_t byte;
+
+ /* copy the existing fields */
+ for (i = 0; i < cache->file_fields_count; i++) {
+ field = cache->file_field_map[i];
+ data = CONST_PTR_OFFSET(&cache->fields[field], offset);
+ byte = (uint8_t)*data;
+ buffer_append(dest, &byte, 1);
+ }
+ if (!add_new)
+ return;
+
+ /* copy newly wanted fields */
+ for (i = 0; i < cache->fields_count; i++) {
+ if (CACHE_FIELD_IS_NEWLY_WANTED(cache, i)) {
+ data = CONST_PTR_OFFSET(&cache->fields[i], offset);
+ byte = (uint8_t)*data;
+ buffer_append(dest, &byte, 1);
+ }
+ }
+}
+
+static void
+copy_to_buf_last_used(struct mail_cache *cache, buffer_t *dest, bool add_new)
+{
+ size_t offset = offsetof(struct mail_cache_field, last_used);
+#if defined(WORDS_BIGENDIAN) && SIZEOF_VOID_P == 8
+ /* 64bit time_t with big endian CPUs: copy the last 32 bits instead of
+ the first 32 bits (that are always 0). The 32 bits are enough until
+ year 2106, so we're not in a hurry to use 64 bits on disk. */
+ offset += sizeof(uint32_t);
+#endif
+ copy_to_buf(cache, dest, add_new, offset, sizeof(uint32_t));
+}
+
+static int mail_cache_header_fields_update_locked(struct mail_cache *cache)
+{
+ buffer_t *buffer;
+ uint32_t i, offset, dec_offset;
+ int ret = 0;
+
+ if (mail_cache_header_fields_read(cache) < 0 ||
+ mail_cache_header_fields_get_offset(cache, &offset, NULL) < 0)
+ return -1;
+
+ buffer = t_buffer_create(256);
+
+ copy_to_buf_last_used(cache, buffer, FALSE);
+ ret = mail_cache_write(cache, buffer->data, buffer->used,
+ offset + MAIL_CACHE_FIELD_LAST_USED());
+ if (ret == 0) {
+ buffer_set_used_size(buffer, 0);
+ copy_to_buf_byte(cache, buffer, FALSE,
+ offsetof(struct mail_cache_field, decision));
+
+ dec_offset = offset +
+ MAIL_CACHE_FIELD_DECISION(cache->file_fields_count);
+ ret = mail_cache_write(cache, buffer->data, buffer->used,
+ dec_offset);
+ if (ret == 0) {
+ for (i = 0; i < cache->file_fields_count; i++)
+ cache->fields[i].decision_dirty = FALSE;
+ }
+ }
+
+ if (ret == 0)
+ cache->field_header_write_pending = FALSE;
+ return ret;
+}
+
+int mail_cache_header_fields_update(struct mail_cache *cache)
+{
+ int ret;
+
+ if (cache->locked) {
+ T_BEGIN {
+ ret = mail_cache_header_fields_update_locked(cache);
+ } T_END;
+ return ret;
+ }
+
+ if (mail_cache_lock(cache) <= 0)
+ return -1;
+
+ T_BEGIN {
+ ret = mail_cache_header_fields_update_locked(cache);
+ } T_END;
+ i_assert(!cache->hdr_modified);
+ mail_cache_unlock(cache);
+ return ret;
+}
+
+void mail_cache_header_fields_get(struct mail_cache *cache, buffer_t *dest)
+{
+ struct mail_cache_header_fields hdr;
+ unsigned int field;
+ const char *name;
+ uint32_t i;
+
+ i_zero(&hdr);
+ hdr.fields_count = cache->file_fields_count;
+ for (i = 0; i < cache->fields_count; i++) {
+ if (CACHE_FIELD_IS_NEWLY_WANTED(cache, i))
+ hdr.fields_count++;
+ }
+ buffer_append(dest, &hdr, sizeof(hdr));
+
+ /* we have to keep the field order for the existing fields. */
+ copy_to_buf_last_used(cache, dest, TRUE);
+ copy_to_buf(cache, dest, TRUE,
+ offsetof(struct mail_cache_field, field_size),
+ sizeof(uint32_t));
+ copy_to_buf_byte(cache, dest, TRUE,
+ offsetof(struct mail_cache_field, type));
+ copy_to_buf_byte(cache, dest, TRUE,
+ offsetof(struct mail_cache_field, decision));
+
+ i_assert(dest->used == sizeof(hdr) +
+ (sizeof(uint32_t)*2 + 2) * hdr.fields_count);
+
+ /* add existing fields' names */
+ for (i = 0; i < cache->file_fields_count; i++) {
+ field = cache->file_field_map[i];
+ name = cache->fields[field].field.name;
+ buffer_append(dest, name, strlen(name)+1);
+ }
+ /* add newly wanted fields' names */
+ for (i = 0; i < cache->fields_count; i++) {
+ if (CACHE_FIELD_IS_NEWLY_WANTED(cache, i)) {
+ name = cache->fields[i].field.name;
+ buffer_append(dest, name, strlen(name)+1);
+ }
+ }
+
+ hdr.size = dest->used;
+ buffer_write(dest, 0, &hdr, sizeof(hdr));
+
+ if ((hdr.size & 3) != 0)
+ buffer_append_zero(dest, 4 - (hdr.size & 3));
+}
+
+int mail_cache_header_fields_get_next_offset(struct mail_cache *cache,
+ uint32_t *offset_r)
+{
+ if (mail_cache_header_fields_get_offset(cache, offset_r, NULL) < 0)
+ return -1;
+
+ if (*offset_r == 0) {
+ *offset_r = offsetof(struct mail_cache_header,
+ field_header_offset);
+ } else {
+ *offset_r += offsetof(struct mail_cache_header_fields,
+ next_offset);
+ }
+ return 0;
+}
diff --git a/src/lib-index/mail-cache-lookup.c b/src/lib-index/mail-cache-lookup.c
new file mode 100644
index 0000000..e57b40b
--- /dev/null
+++ b/src/lib-index/mail-cache-lookup.c
@@ -0,0 +1,694 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "buffer.h"
+#include "str.h"
+#include "mail-cache-private.h"
+
+
+#define CACHE_PREFETCH IO_BLOCK_SIZE
+
+int mail_cache_get_record(struct mail_cache *cache, uint32_t offset,
+ const struct mail_cache_record **rec_r)
+{
+ const struct mail_cache_record *rec;
+ const void *data;
+ int ret;
+
+ i_assert(offset != 0);
+
+ if (offset % sizeof(uint32_t) != 0) {
+ /* records are always 32-bit aligned */
+ mail_cache_set_corrupted(cache, "invalid record offset");
+ return -1;
+ }
+
+ /* we don't know yet how large the record is, so just guess */
+ if (mail_cache_map(cache, offset, sizeof(*rec) + CACHE_PREFETCH,
+ &data) < 0)
+ return -1;
+
+ if (offset + sizeof(*rec) > cache->mmap_length) {
+ mail_cache_set_corrupted(cache, "record points outside file");
+ return -1;
+ }
+ rec = data;
+
+ if (rec->size < sizeof(*rec)) {
+ mail_cache_set_corrupted(cache, "invalid record size");
+ return -1;
+ }
+ if (rec->size > CACHE_PREFETCH) {
+ /* larger than we guessed. map the rest of the record. */
+ if ((ret = mail_cache_map(cache, offset, rec->size, &data)) < 0)
+ return -1;
+ if (ret == 0) {
+ mail_cache_set_corrupted(cache, "record points outside file");
+ return -1;
+ }
+ rec = data;
+ }
+
+ *rec_r = rec;
+ return 0;
+}
+
+uint32_t mail_cache_lookup_cur_offset(struct mail_index_view *view,
+ uint32_t seq, uint32_t *reset_id_r)
+{
+ struct mail_cache *cache = mail_index_view_get_index(view)->cache;
+ struct mail_index_map *map;
+ const void *data;
+ uint32_t offset;
+
+ mail_index_lookup_ext_full(view, seq, cache->ext_id, &map, &data, NULL);
+ if (data == NULL) {
+ /* no cache offsets */
+ return 0;
+ }
+ offset = *((const uint32_t *)data);
+ if (offset == 0)
+ return 0;
+
+ if (!mail_index_ext_get_reset_id(view, map, cache->ext_id, reset_id_r))
+ i_unreached();
+ return offset;
+}
+
+static int
+mail_cache_lookup_offset(struct mail_cache *cache, struct mail_index_view *view,
+ uint32_t seq, uint32_t *offset_r)
+{
+ uint32_t offset, reset_id, reset_id2;
+ int ret;
+
+ offset = mail_cache_lookup_cur_offset(view, seq, &reset_id);
+ if (offset == 0)
+ return 0;
+
+ while (cache->hdr->file_seq != reset_id) {
+ /* reset_it doesn't match - sync the index/cache */
+ if ((ret = mail_cache_sync_reset_id(cache)) <= 0)
+ return ret;
+
+ /* lookup again after syncing */
+ offset = mail_cache_lookup_cur_offset(view, seq, &reset_id2);
+ if (offset == 0)
+ return 0;
+ if (cache->hdr->file_seq == reset_id2)
+ break; /* match - all good */
+ if (reset_id == reset_id2) {
+ /* reset_id didn't change after sync. This means it's
+ pointing to an old already deleted cache file. */
+ return 0;
+ }
+ /* reset_id changed - try again */
+ reset_id = reset_id2;
+ }
+
+ *offset_r = offset;
+ return 1;
+}
+
+bool mail_cache_track_loops(struct mail_cache_loop_track *loop_track,
+ uoff_t offset, uoff_t size)
+{
+ i_assert(offset != 0);
+ i_assert(size != 0);
+
+ /* looping happens only in rare error conditions, so it's enough if we
+ just catch it eventually. we do this by checking if we've seen
+ more record data than possible in the accessed file area. */
+ if (loop_track->size_sum == 0) {
+ /* first call */
+ loop_track->min_offset = offset;
+ loop_track->max_offset = offset + size;
+ } else {
+ if (loop_track->min_offset > offset)
+ loop_track->min_offset = offset;
+ if (loop_track->max_offset < offset + size)
+ loop_track->max_offset = offset + size;
+ }
+
+ loop_track->size_sum += size;
+ return loop_track->size_sum >
+ (loop_track->max_offset - loop_track->min_offset);
+}
+
+void mail_cache_lookup_iter_init(struct mail_cache_view *view, uint32_t seq,
+ struct mail_cache_lookup_iterate_ctx *ctx_r)
+{
+ struct mail_cache_lookup_iterate_ctx *ctx = ctx_r;
+ int ret;
+
+ if (!view->cache->opened)
+ (void)mail_cache_open_and_verify(view->cache);
+
+ i_zero(ctx);
+ ctx->view = view;
+ ctx->seq = seq;
+
+ if (!MAIL_CACHE_IS_UNUSABLE(view->cache)) {
+ /* look up the first offset */
+ ret = mail_cache_lookup_offset(view->cache, view->view, seq,
+ &ctx->offset);
+ if (ret <= 0) {
+ ctx->stop = TRUE;
+ ctx->failed = ret < 0;
+ }
+ }
+ ctx->remap_counter = view->cache->remap_counter;
+
+ i_zero(&view->loop_track);
+}
+
+static bool
+mail_cache_lookup_iter_transaction(struct mail_cache_lookup_iterate_ctx *ctx)
+{
+ ctx->rec = mail_cache_transaction_lookup_rec(ctx->view->transaction,
+ ctx->seq,
+ &ctx->trans_next_idx);
+ if (ctx->rec == NULL)
+ return FALSE;
+
+ ctx->inmemory_field_idx = TRUE;
+ ctx->remap_counter = ctx->view->cache->remap_counter;
+ ctx->pos = sizeof(*ctx->rec);
+ ctx->rec_size = ctx->rec->size;
+ return TRUE;
+}
+
+static int
+mail_cache_lookup_iter_next_record(struct mail_cache_lookup_iterate_ctx *ctx)
+{
+ struct mail_cache_view *view = ctx->view;
+
+ if (ctx->failed)
+ return -1;
+
+ if (ctx->rec != NULL)
+ ctx->offset = ctx->rec->prev_offset;
+ if (ctx->offset == 0) {
+ /* end of this record list. check newly appended data. */
+ if (view->trans_seq1 > ctx->seq ||
+ view->trans_seq2 < ctx->seq)
+ return 0;
+ /* check data still in memory. this works for recent mails
+ even with INDEX=MEMORY */
+ if (!ctx->memory_appends_checked) {
+ if (mail_cache_lookup_iter_transaction(ctx))
+ return 1;
+ ctx->memory_appends_checked = TRUE;
+ }
+ if (MAIL_CACHE_IS_UNUSABLE(view->cache) || ctx->stop)
+ return 0;
+
+ /* check data already written to cache file */
+ if (ctx->disk_appends_checked ||
+ mail_cache_lookup_offset(view->cache, view->trans_view,
+ ctx->seq, &ctx->offset) <= 0)
+ return 0;
+
+ ctx->disk_appends_checked = TRUE;
+ ctx->remap_counter = view->cache->remap_counter;
+ i_zero(&view->loop_track);
+ }
+
+ if (ctx->stop)
+ return 0;
+
+ /* look up the next record */
+ if (mail_cache_get_record(view->cache, ctx->offset, &ctx->rec) < 0)
+ return -1;
+ if (mail_cache_track_loops(&view->loop_track, ctx->offset,
+ ctx->rec->size)) {
+ mail_cache_set_corrupted(view->cache,
+ "record list is circular");
+ return -1;
+ }
+ ctx->inmemory_field_idx = FALSE;
+ ctx->remap_counter = view->cache->remap_counter;
+
+ ctx->pos = sizeof(*ctx->rec);
+ ctx->rec_size = ctx->rec->size;
+ return 1;
+}
+
+static int
+mail_cache_lookup_rec_get_field(struct mail_cache_lookup_iterate_ctx *ctx,
+ unsigned int *field_idx_r)
+{
+ struct mail_cache *cache = ctx->view->cache;
+ uint32_t file_field;
+
+ file_field = *((const uint32_t *)CONST_PTR_OFFSET(ctx->rec, ctx->pos));
+ if (ctx->inmemory_field_idx) {
+ *field_idx_r = file_field;
+ return 0;
+ }
+
+ if (file_field >= cache->file_fields_count) {
+ /* new field, have to re-read fields header to figure
+ out its size. don't do this if we're purging. */
+ if (!cache->locked) {
+ if (mail_cache_header_fields_read(cache) < 0)
+ return -1;
+ }
+ if (file_field >= cache->file_fields_count) {
+ mail_cache_set_corrupted(cache,
+ "field index too large (%u >= %u)",
+ file_field, cache->file_fields_count);
+ return -1;
+ }
+
+ /* field reading might have re-mmaped the file and
+ caused rec pointer to break. need to get it again. */
+ if (mail_cache_get_record(cache, ctx->offset, &ctx->rec) < 0)
+ return -1;
+ ctx->remap_counter = cache->remap_counter;
+ }
+
+ *field_idx_r = cache->file_field_map[file_field];
+ return 0;
+}
+
+int mail_cache_lookup_iter_next(struct mail_cache_lookup_iterate_ctx *ctx,
+ struct mail_cache_iterate_field *field_r)
+{
+ struct mail_cache *cache = ctx->view->cache;
+ unsigned int field_idx;
+ unsigned int data_size;
+ int ret;
+
+ i_assert(ctx->remap_counter == cache->remap_counter);
+
+ if (ctx->pos + sizeof(uint32_t) > ctx->rec_size) {
+ if (ctx->pos != ctx->rec_size) {
+ mail_cache_set_corrupted(cache,
+ "record has invalid size");
+ return -1;
+ }
+
+ if ((ret = mail_cache_lookup_iter_next_record(ctx)) <= 0)
+ return ret;
+ }
+
+ /* return the next field */
+ if (mail_cache_lookup_rec_get_field(ctx, &field_idx) < 0)
+ return -1;
+ ctx->pos += sizeof(uint32_t);
+
+ data_size = cache->fields[field_idx].field.field_size;
+ if (data_size == UINT_MAX &&
+ ctx->pos + sizeof(uint32_t) <= ctx->rec->size) {
+ /* variable size field. get its size from the file. */
+ data_size = *((const uint32_t *)
+ CONST_PTR_OFFSET(ctx->rec, ctx->pos));
+ ctx->pos += sizeof(uint32_t);
+ }
+
+ if (ctx->rec->size - ctx->pos < data_size) {
+ mail_cache_set_corrupted(cache,
+ "record continues outside its allocated size");
+ return -1;
+ }
+
+ field_r->field_idx = field_idx;
+ field_r->data = CONST_PTR_OFFSET(ctx->rec, ctx->pos);
+ field_r->size = data_size;
+ field_r->offset = ctx->offset + ctx->pos;
+
+ /* each record begins from 32bit aligned position */
+ ctx->pos += (data_size + sizeof(uint32_t)-1) & ~(sizeof(uint32_t)-1);
+ return 1;
+}
+
+static int mail_cache_seq(struct mail_cache_view *view, uint32_t seq)
+{
+ struct mail_cache_lookup_iterate_ctx iter;
+ struct mail_cache_iterate_field field;
+ int ret;
+
+ view->cached_exists_value = (view->cached_exists_value + 1) & UINT8_MAX;
+ if (view->cached_exists_value == 0) {
+ /* wrapped, we'll have to clear the buffer */
+ buffer_set_used_size(view->cached_exists_buf, 0);
+ view->cached_exists_value++;
+ }
+ view->cached_exists_seq = seq;
+
+ mail_cache_lookup_iter_init(view, seq, &iter);
+ while ((ret = mail_cache_lookup_iter_next(&iter, &field)) > 0) {
+ buffer_write(view->cached_exists_buf, field.field_idx,
+ &view->cached_exists_value, 1);
+ }
+ return ret;
+}
+
+int mail_cache_field_exists(struct mail_cache_view *view, uint32_t seq,
+ unsigned int field)
+{
+ const uint8_t *data;
+
+ i_assert(seq > 0);
+
+ /* NOTE: view might point to a non-committed transaction that has
+ fields that don't yet exist in the cache file. So don't add any
+ fast-paths checking whether the field exists in the file. */
+
+ /* FIXME: we should discard the cache if view has been synced */
+ if (view->cached_exists_seq != seq) {
+ if (mail_cache_seq(view, seq) < 0)
+ return -1;
+ }
+
+ data = view->cached_exists_buf->data;
+ return (field < view->cached_exists_buf->used &&
+ data[field] == view->cached_exists_value) ? 1 : 0;
+}
+
+bool mail_cache_field_exists_any(struct mail_cache_view *view, uint32_t seq)
+{
+ uint32_t reset_id;
+
+ return mail_cache_lookup_cur_offset(view->view, seq, &reset_id) != 0;
+}
+
+enum mail_cache_decision_type
+mail_cache_field_get_decision(struct mail_cache *cache, unsigned int field_idx)
+{
+ i_assert(field_idx < cache->fields_count);
+
+ return cache->fields[field_idx].field.decision;
+}
+
+static int
+mail_cache_lookup_bitmask(struct mail_cache_lookup_iterate_ctx *iter,
+ unsigned int field_idx, unsigned int field_size,
+ buffer_t *dest_buf)
+{
+ struct mail_cache_iterate_field field;
+ const unsigned char *src;
+ unsigned char *dest;
+ unsigned int i;
+ bool found = FALSE;
+ int ret;
+
+ /* make sure all bits are cleared first */
+ buffer_write_zero(dest_buf, 0, field_size);
+
+ while ((ret = mail_cache_lookup_iter_next(iter, &field)) > 0) {
+ if (field.field_idx != field_idx)
+ continue;
+
+ /* merge all bits */
+ src = field.data;
+ dest = buffer_get_space_unsafe(dest_buf, 0, field.size);
+ for (i = 0; i < field.size; i++)
+ dest[i] |= src[i];
+ found = TRUE;
+ }
+ return ret < 0 ? -1 : (found ? 1 : 0);
+}
+
+int mail_cache_lookup_field(struct mail_cache_view *view, buffer_t *dest_buf,
+ uint32_t seq, unsigned int field_idx)
+{
+ struct mail_cache_lookup_iterate_ctx iter;
+ struct mail_cache_iterate_field field;
+ int ret;
+
+ ret = mail_cache_field_exists(view, seq, field_idx);
+ mail_cache_decision_state_update(view, seq, field_idx);
+ if (ret <= 0)
+ return ret;
+
+ /* the field should exist */
+ mail_cache_lookup_iter_init(view, seq, &iter);
+ if (view->cache->fields[field_idx].field.type == MAIL_CACHE_FIELD_BITMASK) {
+ ret = mail_cache_lookup_bitmask(&iter, field_idx,
+ view->cache->fields[field_idx].field.field_size,
+ dest_buf);
+ } else {
+ /* return the first one that's found. if there are multiple
+ they're all identical. */
+ while ((ret = mail_cache_lookup_iter_next(&iter, &field)) > 0) {
+ if (field.field_idx == field_idx) {
+ buffer_append(dest_buf, field.data, field.size);
+ break;
+ }
+ }
+ }
+ /* NOTE: view->cache->fields may have been reallocated by
+ mail_cache_lookup_*(). */
+ return ret;
+}
+
+struct header_lookup_data {
+ uint32_t data_size;
+ const unsigned char *data;
+};
+
+struct header_lookup_line {
+ uint32_t line_num;
+ struct header_lookup_data *data;
+};
+
+struct header_lookup_context {
+ struct mail_cache_view *view;
+ pool_t pool;
+ ARRAY(struct header_lookup_line) lines;
+};
+
+enum {
+ HDR_FIELD_STATE_DONTWANT = 0,
+ HDR_FIELD_STATE_WANT,
+ HDR_FIELD_STATE_SEEN
+};
+
+static void header_lines_save(struct header_lookup_context *ctx,
+ const struct mail_cache_iterate_field *field)
+{
+ const uint32_t *lines = field->data;
+ uint32_t data_size = field->size;
+ struct header_lookup_line hdr_line;
+ struct header_lookup_data *hdr_data;
+ void *data_dup;
+ unsigned int i, lines_count, pos;
+
+ /* data = { line_nums[], 0, "headers" } */
+ for (i = 0; data_size >= sizeof(uint32_t); i++) {
+ data_size -= sizeof(uint32_t);
+ if (lines[i] == 0)
+ break;
+ }
+ lines_count = i;
+ pos = (lines_count+1) * sizeof(uint32_t);
+
+ hdr_data = p_new(ctx->pool, struct header_lookup_data, 1);
+ hdr_data->data_size = data_size;
+ if (data_size > 0) {
+ hdr_data->data = data_dup =
+ p_malloc(ctx->pool, data_size);
+ memcpy(data_dup, CONST_PTR_OFFSET(field->data, pos), data_size);
+ }
+
+ for (i = 0; i < lines_count; i++) {
+ hdr_line.line_num = lines[i];
+ hdr_line.data = hdr_data;
+ array_push_back(&ctx->lines, &hdr_line);
+ }
+}
+
+static int header_lookup_line_cmp(const struct header_lookup_line *l1,
+ const struct header_lookup_line *l2)
+{
+ return (int)l1->line_num - (int)l2->line_num;
+}
+
+static int
+mail_cache_lookup_headers_real(struct mail_cache_view *view, string_t *dest,
+ uint32_t seq, const unsigned int field_idxs[],
+ unsigned int fields_count, pool_t *pool_r)
+{
+ struct mail_cache_lookup_iterate_ctx iter;
+ struct mail_cache_iterate_field field;
+ struct header_lookup_context ctx;
+ struct header_lookup_line *lines;
+ const unsigned char *p, *start, *end;
+ uint8_t *field_state;
+ unsigned int i, count, max_field = 0;
+ size_t hdr_size;
+ uint8_t want = HDR_FIELD_STATE_WANT;
+ buffer_t *buf;
+ int ret;
+
+ *pool_r = NULL;
+
+ if (fields_count == 0)
+ return 1;
+
+ /* update the decision state regardless of whether the fields
+ actually exist or not. */
+ for (i = 0; i < fields_count; i++)
+ mail_cache_decision_state_update(view, seq, field_idxs[i]);
+
+ /* mark all the fields we want to find. */
+ buf = t_buffer_create(32);
+ for (i = 0; i < fields_count; i++) {
+ if (field_idxs[i] > max_field)
+ max_field = field_idxs[i];
+
+ buffer_write(buf, field_idxs[i], &want, 1);
+ }
+ field_state = buffer_get_modifiable_data(buf, NULL);
+
+ /* lookup the fields */
+ i_zero(&ctx);
+ ctx.view = view;
+ ctx.pool = *pool_r = pool_alloconly_create(MEMPOOL_GROWING"mail cache headers", 1024);
+ t_array_init(&ctx.lines, 32);
+
+ mail_cache_lookup_iter_init(view, seq, &iter);
+ while ((ret = mail_cache_lookup_iter_next(&iter, &field)) > 0) {
+ if (field.field_idx > max_field ||
+ field_state[field.field_idx] != HDR_FIELD_STATE_WANT) {
+ /* a) don't want it, b) duplicate */
+ } else {
+ field_state[field.field_idx] = HDR_FIELD_STATE_SEEN;
+ header_lines_save(&ctx, &field);
+ }
+
+ }
+ if (ret < 0)
+ return -1;
+
+ /* check that all fields were found */
+ for (i = 0; i <= max_field; i++) {
+ if (field_state[i] == HDR_FIELD_STATE_WANT)
+ return 0;
+ }
+
+ /* we need to return headers in the order they existed originally.
+ we can do this by sorting the messages by their line numbers. */
+ array_sort(&ctx.lines, header_lookup_line_cmp);
+ lines = array_get_modifiable(&ctx.lines, &count);
+
+ /* then start filling dest buffer from the headers */
+ for (i = 0; i < count; i++) {
+ start = lines[i].data->data;
+ end = start + lines[i].data->data_size;
+
+ /* find the end of the (multiline) header */
+ for (p = start; p != end; p++) {
+ if (*p == '\n' &&
+ (p+1 == end || (p[1] != ' ' && p[1] != '\t'))) {
+ p++;
+ break;
+ }
+ }
+ hdr_size = (size_t)(p - start);
+ buffer_append(dest, start, hdr_size);
+
+ /* if there are more lines for this header, the following lines
+ continue after this one. so skip this line. */
+ lines[i].data->data += hdr_size;
+ lines[i].data->data_size -= hdr_size;
+ }
+ return 1;
+}
+
+int mail_cache_lookup_headers(struct mail_cache_view *view, string_t *dest,
+ uint32_t seq, const unsigned int field_idxs[],
+ unsigned int fields_count)
+{
+ pool_t pool = NULL;
+ int ret;
+
+ if (buffer_get_pool(dest)->datastack_pool)
+ ret = mail_cache_lookup_headers_real(view, dest, seq,
+ field_idxs, fields_count,
+ &pool);
+ else T_BEGIN {
+ ret = mail_cache_lookup_headers_real(view, dest, seq,
+ field_idxs, fields_count,
+ &pool);
+ } T_END;
+ pool_unref(&pool);
+ return ret;
+}
+
+static uint32_t
+mail_cache_get_highest_seq_with_cache(struct mail_cache_view *view,
+ uint32_t below_seq, uint32_t *reset_id_r)
+{
+ struct mail_cache_missing_reason_cache *rc = &view->reason_cache;
+ uint32_t seq = below_seq-1, highest_checked_seq = 0;
+
+ /* find the newest mail that has anything in cache */
+ if (rc->log_file_head_offset == view->view->log_file_head_offset &&
+ rc->log_file_head_seq == view->view->log_file_head_seq) {
+ /* reason_cache matches the current view - we can use it */
+ highest_checked_seq = rc->highest_checked_seq;
+ } else {
+ rc->log_file_head_offset = view->view->log_file_head_offset;
+ rc->log_file_head_seq = view->view->log_file_head_seq;
+ }
+ rc->highest_checked_seq = below_seq;
+
+ /* first check anything not already in reason_cache */
+ for (; seq > highest_checked_seq; seq--) {
+ if (mail_cache_lookup_cur_offset(view->view, seq, reset_id_r) != 0) {
+ rc->highest_seq_with_cache = seq;
+ rc->reset_id = *reset_id_r;
+ return seq;
+ }
+ }
+ if (seq == 0)
+ return 0;
+ /* then return the result from cache */
+ *reset_id_r = rc->reset_id;
+ return rc->highest_seq_with_cache;
+}
+
+const char *
+mail_cache_get_missing_reason(struct mail_cache_view *view, uint32_t seq)
+{
+ uint32_t offset, reset_id;
+
+ if (mail_index_is_expunged(view->view, seq))
+ return "Mail is already expunged";
+
+ if (MAIL_CACHE_IS_UNUSABLE(view->cache))
+ return "Cache file is unusable";
+
+ offset = mail_cache_lookup_cur_offset(view->view, seq, &reset_id);
+ if (offset != 0) {
+ if (view->cache->hdr->file_seq != reset_id) {
+ return t_strdup_printf(
+ "Index reset_id=%u doesn't match cache reset_id=%u",
+ reset_id, view->cache->hdr->file_seq);
+ }
+ return t_strdup_printf(
+ "Mail has other cached fields, reset_id=%u", reset_id);
+ }
+ seq = mail_cache_get_highest_seq_with_cache(view, seq, &reset_id);
+ if (seq == 0) {
+ return t_strdup_printf("Cache file is empty, reset_id=%u",
+ view->cache->hdr->file_seq);
+ }
+
+ uint32_t uid;
+ mail_index_lookup_uid(view->view, seq, &uid);
+
+ if (view->cache->hdr->file_seq != reset_id) {
+ return t_strdup_printf(
+ "Mail not cached, highest cached seq=%u uid=%u: "
+ "Index reset_id=%u doesn't match cache reset_id=%u",
+ seq, uid, reset_id, view->cache->hdr->file_seq);
+ }
+ return t_strdup_printf(
+ "Mail not cached, highest cached seq=%u uid=%u: reset_id=%u",
+ seq, uid, reset_id);
+}
diff --git a/src/lib-index/mail-cache-private.h b/src/lib-index/mail-cache-private.h
new file mode 100644
index 0000000..c2fee17
--- /dev/null
+++ b/src/lib-index/mail-cache-private.h
@@ -0,0 +1,421 @@
+#ifndef MAIL_CACHE_PRIVATE_H
+#define MAIL_CACHE_PRIVATE_H
+
+#include "file-dotlock.h"
+#include "mail-index-private.h"
+#include "mail-cache.h"
+
+#define MAIL_CACHE_MAJOR_VERSION 1
+#define MAIL_CACHE_MINOR_VERSION 1
+
+#define MAIL_CACHE_LOCK_TIMEOUT 10
+#define MAIL_CACHE_LOCK_CHANGE_TIMEOUT 300
+
+#define MAIL_CACHE_MAX_WRITE_BUFFER (1024*256)
+
+#define MAIL_CACHE_IS_UNUSABLE(cache) \
+ ((cache)->hdr == NULL)
+
+struct mail_cache_header {
+ /* Major version is increased only when you can't have backwards
+ compatibility. If the field doesn't match MAIL_CACHE_MAJOR_VERSION,
+ don't even try to read it. */
+ uint8_t major_version;
+ /* If this isn't the same as sizeof(uoff_t), the cache file can't be
+ safely used with the current implementation. */
+ uint8_t compat_sizeof_uoff_t;
+ /* Minor version is increased when the file format changes in a
+ backwards compatible way. */
+ uint8_t minor_version;
+ uint8_t unused;
+
+ /* Unique index file ID, which must match the main index's indexid.
+ See mail_index_header.indexid. */
+ uint32_t indexid;
+ /* Cache file sequence. Increased on every purge. This must match the
+ main index's reset_id for "cache" extension or the cache offsets
+ aren't valid. When creating the first cache file, use the current
+ UNIX timestamp as the file_seq. */
+ uint32_t file_seq;
+
+ /* Number of cache records that are linked inside the cache file,
+ instead of being directly pointed from the main index. */
+ uint32_t continued_record_count;
+
+ /* Number of messages cached in this file. This does not include
+ the continuation records.
+
+ NOTE: <=v2.1 used this for hole offset, so we can't fully
+ rely on it */
+ uint32_t record_count;
+ /* Currently unused. */
+ uint32_t backwards_compat_used_file_size;
+ /* Number of already expunged messages that currently have cache
+ content in this file. */
+ uint32_t deleted_record_count;
+
+ /* Offset to the first mail_cache_header_fields. */
+ uint32_t field_header_offset;
+};
+
+struct mail_cache_header_fields {
+ /* Offset to the updated version of this header. Use
+ mail_index_offset_to_uint32() to decode it. */
+ uint32_t next_offset;
+ /* Full size of this header. */
+ uint32_t size;
+ /* Number of fields in this header. */
+ uint32_t fields_count;
+
+#if 0
+ /* Last time the field was accessed. Not updated more often than
+ once a day. This field may be overwritten later on, which in theory
+ could cause reading to see a partially updated (corrupted) value.
+ Don't fully trust this field unless it was read while cache is
+ locked. */
+ uint32_t last_used[fields_count];
+ /* (uint32_t)-1 for variable sized fields */
+ uint32_t size[fields_count];
+ /* enum mail_cache_field_type */
+ uint8_t type[fields_count];
+ /* enum mail_cache_decision_type. This field can be overwritten
+ later on to update the caching decision. */
+ uint8_t decision[fields_count];
+ /* NUL-separated list of field names */
+ char name[fields_count][];
+#endif
+};
+
+/* Macros to return offsets to the fields in mail_cache_header_fields. */
+#define MAIL_CACHE_FIELD_LAST_USED() \
+ (sizeof(uint32_t) * 3)
+#define MAIL_CACHE_FIELD_SIZE(count) \
+ (MAIL_CACHE_FIELD_LAST_USED() + sizeof(uint32_t) * (count))
+#define MAIL_CACHE_FIELD_TYPE(count) \
+ (MAIL_CACHE_FIELD_SIZE(count) + sizeof(uint32_t) * (count))
+#define MAIL_CACHE_FIELD_DECISION(count) \
+ (MAIL_CACHE_FIELD_TYPE(count) + sizeof(uint8_t) * (count))
+#define MAIL_CACHE_FIELD_NAMES(count) \
+ (MAIL_CACHE_FIELD_DECISION(count) + sizeof(uint8_t) * (count))
+
+struct mail_cache_record {
+ uint32_t prev_offset;
+ uint32_t size; /* full record size, including this header */
+ /* array of { uint32_t field; [ uint32_t size; ] { .. } } */
+};
+
+struct mail_cache_field_private {
+ struct mail_cache_field field;
+
+ /* Highest message UID whose cache field of this type have been
+ accessed within this session. This is used to track whether messages
+ are accessed in non-ascending order, which indicates an IMAP client
+ that doesn't have a local cache. That will result in the caching
+ decision to change from TEMP to YES. */
+ uint32_t uid_highwater;
+
+ /* Unused fields aren't written to cache file */
+ bool used:1;
+ /* field.decision is pending a write to cache file header. If the
+ cache header is read from disk, don't overwrite it. */
+ bool decision_dirty:1;
+};
+
+struct mail_cache {
+ struct mail_index *index;
+ struct event *event;
+ /* Registered "cache" extension ID */
+ uint32_t ext_id;
+
+ char *filepath;
+ int fd;
+
+ struct dotlock_settings dotlock_settings;
+ struct file_lock *file_lock;
+
+ /* Cache file's inode, device and size when it was last fstat()ed. */
+ ino_t st_ino;
+ dev_t st_dev;
+ uoff_t last_stat_size;
+
+ /* Used to avoid logging mmap() errors too rapidly. */
+ time_t last_mmap_error_time;
+
+ /* a) mmaping the whole file */
+ void *mmap_base;
+ /* b) using file cache */
+ struct file_cache *file_cache;
+ /* c) using small read() calls with MAIL_INDEX_OPEN_FLAG_SAVEONLY */
+ uoff_t read_offset;
+ buffer_t *read_buf;
+ /* Size of the cache file as currently mapped to memory. Used for all
+ of a), b), and c). */
+ size_t mmap_length;
+ /* mail_cache_map() increases this always. Used only for asserts. */
+ unsigned int remap_counter;
+ /* Linked list of all cache views. */
+ struct mail_cache_view *views;
+
+ /* mmap_disable=no: hdr points to data / NULL when cache is invalid.
+ mmap_disable=yes: hdr points to hdr_ro_copy. this is needed because
+ cache invalidation can zero the data any time */
+ const struct mail_cache_header *hdr;
+ struct mail_cache_header hdr_ro_copy;
+ /* hdr_copy gets updated when cache is locked and written when
+ unlocking and hdr_modified=TRUE */
+ struct mail_cache_header hdr_copy;
+ /* If non-0, the offset for the last seen mail_cache_header_fields.
+ Used as a cache to avoid reading through multiple next_offset
+ pointers. */
+ uint32_t last_field_header_offset;
+
+ /* Memory pool used for permanent field allocations. Currently this
+ means mail_cache_field.name and field_name_hash. */
+ pool_t field_pool;
+ /* Size of fields[] and field_file_map[] */
+ unsigned int fields_count;
+ /* All the registered cache fields. */
+ struct mail_cache_field_private *fields;
+ /* mail_cache_field.idx -> file-specific header index. The reverse
+ of this is file_field_map[]. */
+ uint32_t *field_file_map;
+ /* mail_cache_field.name -> mail_cache_field.idx */
+ HASH_TABLE(char *, void *) field_name_hash; /* name -> idx */
+
+ /* file-specific header index -> mail_cache_fields.idx. The reverse
+ of this is field_file_map[]. */
+ unsigned int *file_field_map;
+ /* Size of file_field_map[] */
+ unsigned int file_fields_count;
+
+ /* mail_cache_purge_later() sets these values to trigger purging on
+ the next index sync. need_purge_file_seq is set to the current
+ cache file_seq. If at sync time the file_seq differs, it means
+ the cache was already purged and another purge isn't necessary. */
+ uint32_t need_purge_file_seq;
+ /* Human-readable reason for purging. Used for debugging and events. */
+ char *need_purge_reason;
+
+ /* Cache has been opened (or it doesn't exist). */
+ bool opened:1;
+ /* Cache has been locked with mail_cache_lock(). */
+ bool locked:1;
+ /* TRUE if the last lock attempt failed. The next locking attempt will
+ be non-blocking to avoid unnecessarily waiting on a cache that has
+ been locked for a long time. Since cache isn't strictly required,
+ this could avoid unnecessarily long waits with some edge cases. */
+ bool last_lock_failed:1;
+ /* cache->hdr_copy has been modified. This must be used only while
+ cache is locked. */
+ bool hdr_modified:1;
+ /* At least one of the cache fields' last_used or cache decision has
+ changed. mail_cache_header_fields_update() will be used to overwrite
+ these to the latest mail_cache_header_fields. */
+ bool field_header_write_pending:1;
+ /* Cache is currently being purged. */
+ bool purging:1;
+ /* Access the cache file by reading as little as possible from it
+ (as opposed to mmap()ing it or using file-cache.h API to cache
+ larger parts of it). This is used with MAIL_INDEX_OPEN_FLAG_SAVEONLY
+ to avoid unnecessary cache reads. */
+ bool map_with_read:1;
+};
+
+struct mail_cache_loop_track {
+ /* we're looping if size_sum > (max_offset-min_offset) */
+ uoff_t min_offset, max_offset;
+ uoff_t size_sum;
+};
+
+struct mail_cache_missing_reason_cache {
+ uint32_t highest_checked_seq;
+ uint32_t highest_seq_with_cache;
+
+ uint32_t reset_id;
+ uint32_t log_file_head_seq;
+ uoff_t log_file_head_offset;
+};
+
+struct mail_cache_view {
+ struct mail_cache *cache;
+ struct mail_cache_view *prev, *next;
+ struct mail_index_view *view, *trans_view;
+
+ struct mail_cache_transaction_ctx *transaction;
+ /* mail_cache_add() has been called for some of the messages between
+ trans_seq1..trans_seq2 in an uncommitted transaction. Check also
+ the transaction contents when looking up cache fields for these
+ mails. */
+ uint32_t trans_seq1, trans_seq2;
+
+ /* Used to avoid infinite loops in case cache records point to each
+ others, causing a loop. FIXME: New cache files no longer support
+ overwriting existing data, so this could be removed and replaced
+ with a simple check that prev_offset is always smaller than the
+ current record's offset. */
+ struct mail_cache_loop_track loop_track;
+ /* Used for optimizing mail_cache_get_missing_reason() */
+ struct mail_cache_missing_reason_cache reason_cache;
+
+ /* if cached_exists_buf[field] == cached_exists_value, it's cached.
+ this allows us to avoid constantly clearing the whole buffer.
+ it needs to be cleared only when cached_exists_value is wrapped. */
+ buffer_t *cached_exists_buf;
+ uint8_t cached_exists_value;
+ uint32_t cached_exists_seq;
+
+ /* mail_cache_view_update_cache_decisions() has been used to disable
+ updating cache decisions. */
+ bool no_decision_updates:1;
+};
+
+/* mail_cache_lookup_iter_next() returns the next found field. */
+struct mail_cache_iterate_field {
+ /* mail_cache_field.idx */
+ unsigned int field_idx;
+ /* Size of data */
+ unsigned int size;
+ /* Cache field content in the field type-specific format */
+ const void *data;
+ /* Offset to data in cache file */
+ uoff_t offset;
+};
+
+struct mail_cache_lookup_iterate_ctx {
+ struct mail_cache_view *view;
+ /* This must match mail_cache.remap_counter or the iterator is
+ invalid. */
+ unsigned int remap_counter;
+ /* Message sequence as given to mail_cache_lookup_iter_init() */
+ uint32_t seq;
+
+ /* Pointer to current cache record being iterated. This may point
+ to the cache file or uncommitted transaction. */
+ const struct mail_cache_record *rec;
+ /* Iterator's current position in the cache record. Starts from
+ sizeof(mail_cache_record). */
+ unsigned int pos;
+ /* Copy of rec->size */
+ unsigned int rec_size;
+ /* Cache file offset to the beginning of rec, or 0 if it points to
+ an uncommitted transaction. */
+ uint32_t offset;
+
+ /* Used to loop through all changes in the uncommited transaction,
+ in case there are multiple changes to the same message. */
+ unsigned int trans_next_idx;
+
+ /* Cache has become unusable. Stop the iteration. */
+ bool stop:1;
+ /* I/O error or lock timeout occurred during iteration. Normally there
+ is no locking during iteration, but it may happen while cache is
+ being purged to wait for the purging to finish before cache can be
+ accessed again. */
+ bool failed:1;
+ /* Iteration has finished returning changes from uncommitted
+ transaction's in-memory buffer. */
+ bool memory_appends_checked:1;
+ /* Iteration has finished returning changes from uncommitted
+ transaction that were already written to cache file, but not
+ to main index. */
+ bool disk_appends_checked:1;
+ /* TRUE if the field index numbers in rec as the internal
+ mail_cache_field.idx (instead of the file-specific indexes).
+ This indicates that the rec points to uncommited transaction's
+ in-memory buffer. */
+ bool inmemory_field_idx:1;
+};
+
+/* Explicitly lock the cache file. Returns -1 if error / timed out,
+ 1 if ok, 0 if cache is broken/doesn't exist */
+int mail_cache_lock(struct mail_cache *cache);
+/* Flush pending header updates and unlock. Returns -1 if cache is / just got
+ corrupted, 0 if ok. */
+int mail_cache_flush_and_unlock(struct mail_cache *cache);
+/* Unlock the cache without any header updates. */
+void mail_cache_unlock(struct mail_cache *cache);
+
+int mail_cache_write(struct mail_cache *cache, const void *data, size_t size,
+ uoff_t offset);
+int mail_cache_append(struct mail_cache *cache, const void *data, size_t size,
+ uint32_t *offset);
+
+int mail_cache_header_fields_read(struct mail_cache *cache);
+int mail_cache_header_fields_update(struct mail_cache *cache);
+void mail_cache_header_fields_get(struct mail_cache *cache, buffer_t *dest);
+int mail_cache_header_fields_get_next_offset(struct mail_cache *cache,
+ uint32_t *offset_r);
+void mail_cache_expunge_count(struct mail_cache *cache, unsigned int count);
+
+uint32_t mail_cache_lookup_cur_offset(struct mail_index_view *view,
+ uint32_t seq, uint32_t *reset_id_r);
+int mail_cache_get_record(struct mail_cache *cache, uint32_t offset,
+ const struct mail_cache_record **rec_r);
+uint32_t mail_cache_get_first_new_seq(struct mail_index_view *view);
+
+/* Returns TRUE if offset..size area has been tracked before.
+ Returns FALSE if the area may or may not have been tracked before,
+ but we don't know for sure yet. */
+bool mail_cache_track_loops(struct mail_cache_loop_track *loop_track,
+ uoff_t offset, uoff_t size);
+
+/* Iterate through a message's cached fields. */
+void mail_cache_lookup_iter_init(struct mail_cache_view *view, uint32_t seq,
+ struct mail_cache_lookup_iterate_ctx *ctx_r);
+/* Returns 1 if field was returned, 0 if end of fields, or -1 if error.
+ Note that this may trigger re-reading and reallocating cache fields. */
+int mail_cache_lookup_iter_next(struct mail_cache_lookup_iterate_ctx *ctx,
+ struct mail_cache_iterate_field *field_r);
+const struct mail_cache_record *
+mail_cache_transaction_lookup_rec(struct mail_cache_transaction_ctx *ctx,
+ unsigned int seq,
+ unsigned int *trans_next_idx);
+bool mail_cache_transactions_have_changes(struct mail_cache *cache);
+
+/* Return data from the specified position in the cache file. Returns 1 if
+ successful, 0 if offset/size points outside the cache file, -1 if I/O
+ error. */
+int mail_cache_map(struct mail_cache *cache, size_t offset, size_t size,
+ const void **data_r);
+/* Map the whole cache file into memory. Returns 1 if ok, 0 if corrupted
+ (and deleted), -1 if I/O error. */
+int mail_cache_map_all(struct mail_cache *cache);
+void mail_cache_file_close(struct mail_cache *cache);
+int mail_cache_reopen(struct mail_cache *cache);
+int mail_cache_sync_reset_id(struct mail_cache *cache);
+
+/* Notify the decision handling code that field was looked up for seq.
+ This should be called even for fields that aren't currently in cache file.
+ This is used to update caching decisions for fields that already exist
+ in the cache file. */
+void mail_cache_decision_state_update(struct mail_cache_view *view,
+ uint32_t seq, unsigned int field);
+const char *mail_cache_decision_to_string(enum mail_cache_decision_type dec);
+struct event_passthrough *
+mail_cache_decision_changed_event(struct mail_cache *cache, struct event *event,
+ unsigned int field);
+
+struct mail_cache_purge_drop_ctx {
+ struct mail_cache *cache;
+ time_t max_yes_downgrade_time;
+ time_t max_temp_drop_time;
+};
+enum mail_cache_purge_drop_decision {
+ MAIL_CACHE_PURGE_DROP_DECISION_NONE,
+ MAIL_CACHE_PURGE_DROP_DECISION_DROP,
+ MAIL_CACHE_PURGE_DROP_DECISION_TO_TEMP,
+};
+void mail_cache_purge_drop_init(struct mail_cache *cache,
+ const struct mail_index_header *hdr,
+ struct mail_cache_purge_drop_ctx *ctx_r);
+enum mail_cache_purge_drop_decision
+mail_cache_purge_drop_test(struct mail_cache_purge_drop_ctx *ctx,
+ unsigned int field);
+
+int mail_cache_expunge_handler(struct mail_index_sync_map_ctx *sync_ctx,
+ const void *data, void **sync_context);
+
+void mail_cache_set_syscall_error(struct mail_cache *cache,
+ const char *function) ATTR_COLD;
+
+#endif
diff --git a/src/lib-index/mail-cache-purge.c b/src/lib-index/mail-cache-purge.c
new file mode 100644
index 0000000..bc86bf1
--- /dev/null
+++ b/src/lib-index/mail-cache-purge.c
@@ -0,0 +1,707 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "ostream.h"
+#include "nfs-workarounds.h"
+#include "read-full.h"
+#include "file-dotlock.h"
+#include "file-cache.h"
+#include "file-set-size.h"
+#include "mail-cache-private.h"
+
+#include <stdio.h>
+#include <sys/stat.h>
+
+struct mail_cache_copy_context {
+ struct mail_cache *cache;
+ struct event *event;
+ struct mail_cache_purge_drop_ctx drop_ctx;
+
+ buffer_t *buffer, *field_seen;
+ ARRAY(unsigned int) bitmask_pos;
+ uint32_t *field_file_map;
+
+ uint8_t field_seen_value;
+ bool new_msg;
+};
+
+static void
+mail_cache_merge_bitmask(struct mail_cache_copy_context *ctx,
+ const struct mail_cache_iterate_field *field)
+{
+ unsigned char *dest;
+ unsigned int i, *pos;
+
+ pos = array_idx_get_space(&ctx->bitmask_pos, field->field_idx);
+ if (*pos == 0) {
+ /* we decided to drop this field */
+ return;
+ }
+
+ dest = buffer_get_space_unsafe(ctx->buffer, *pos, field->size);
+ for (i = 0; i < field->size; i++)
+ dest[i] |= ((const unsigned char*)field->data)[i];
+}
+
+static void
+mail_cache_purge_field(struct mail_cache_copy_context *ctx,
+ const struct mail_cache_iterate_field *field)
+{
+ struct mail_cache_field *cache_field;
+ enum mail_cache_decision_type dec;
+ uint32_t file_field_idx, size32;
+ uint8_t *field_seen;
+
+ file_field_idx = ctx->field_file_map[field->field_idx];
+ if (file_field_idx == (uint32_t)-1)
+ return;
+
+ cache_field = &ctx->cache->fields[field->field_idx].field;
+
+ field_seen = buffer_get_space_unsafe(ctx->field_seen,
+ field->field_idx, 1);
+ if (*field_seen == ctx->field_seen_value) {
+ /* duplicate */
+ if (cache_field->type == MAIL_CACHE_FIELD_BITMASK)
+ mail_cache_merge_bitmask(ctx, field);
+ return;
+ }
+ *field_seen = ctx->field_seen_value;
+
+ dec = cache_field->decision & ENUM_NEGATE(MAIL_CACHE_DECISION_FORCED);
+ if (ctx->new_msg) {
+ if (dec == MAIL_CACHE_DECISION_NO)
+ return;
+ } else {
+ if (dec != MAIL_CACHE_DECISION_YES)
+ return;
+ }
+
+ buffer_append(ctx->buffer, &file_field_idx, sizeof(file_field_idx));
+
+ if (cache_field->field_size == UINT_MAX) {
+ size32 = (uint32_t)field->size;
+ buffer_append(ctx->buffer, &size32, sizeof(size32));
+ }
+
+ if (cache_field->type == MAIL_CACHE_FIELD_BITMASK) {
+ /* remember the position in case we need to update it */
+ unsigned int pos = ctx->buffer->used;
+
+ array_idx_set(&ctx->bitmask_pos, field->field_idx, &pos);
+ }
+ buffer_append(ctx->buffer, field->data, field->size);
+ if ((field->size & 3) != 0)
+ buffer_append_zero(ctx->buffer, 4 - (field->size & 3));
+}
+
+static uint32_t get_next_file_seq(struct mail_cache *cache)
+{
+ const struct mail_index_ext *ext;
+ struct mail_index_view *view;
+ uint32_t file_seq;
+
+ /* make sure we look up the latest reset_id */
+ if (mail_index_refresh(cache->index) < 0)
+ return -1;
+
+ view = mail_index_view_open(cache->index);
+ ext = mail_index_view_get_ext(view, cache->ext_id);
+ file_seq = ext != NULL ? ext->reset_id + 1 : (uint32_t)ioloop_time;
+
+ if (cache->hdr != NULL && file_seq <= cache->hdr->file_seq)
+ file_seq = cache->hdr->file_seq + 1;
+ mail_index_view_close(&view);
+
+ return file_seq != 0 ? file_seq : 1;
+}
+
+static void
+mail_cache_purge_get_fields(struct mail_cache_copy_context *ctx,
+ unsigned int used_fields_count)
+{
+ struct mail_cache *cache = ctx->cache;
+ unsigned int i, j, idx;
+
+ /* Make mail_cache_header_fields_get() return the fields in
+ the same order as we saved them. */
+ memcpy(cache->field_file_map, ctx->field_file_map,
+ sizeof(uint32_t) * cache->fields_count);
+
+ /* reverse mapping */
+ cache->file_fields_count = used_fields_count;
+ i_free(cache->file_field_map);
+ cache->file_field_map = used_fields_count == 0 ? NULL :
+ i_new(unsigned int, used_fields_count);
+ for (i = j = 0; i < cache->fields_count; i++) {
+ idx = cache->field_file_map[i];
+ if (idx != (uint32_t)-1) {
+ i_assert(idx < used_fields_count &&
+ cache->file_field_map != NULL &&
+ cache->file_field_map[idx] == 0);
+ cache->file_field_map[idx] = i;
+ j++;
+ }
+ }
+ i_assert(j == used_fields_count);
+
+ buffer_set_used_size(ctx->buffer, 0);
+ mail_cache_header_fields_get(cache, ctx->buffer);
+}
+
+static bool
+mail_cache_purge_check_field(struct mail_cache_copy_context *ctx,
+ unsigned int field)
+{
+ struct mail_cache_field_private *priv = &ctx->cache->fields[field];
+ enum mail_cache_decision_type dec = priv->field.decision;
+
+ switch (mail_cache_purge_drop_test(&ctx->drop_ctx, field)) {
+ case MAIL_CACHE_PURGE_DROP_DECISION_NONE:
+ break;
+ case MAIL_CACHE_PURGE_DROP_DECISION_DROP: {
+ const char *dec_str = mail_cache_decision_to_string(dec);
+ struct event_passthrough *e =
+ event_create_passthrough(ctx->event)->
+ set_name("mail_cache_purge_drop_field")->
+ add_str("field", priv->field.name)->
+ add_str("decision", dec_str)->
+ add_int("last_used", priv->field.last_used);
+ e_debug(e->event(), "Purge dropped field %s "
+ "(decision=%s, last_used=%"PRIdTIME_T")",
+ priv->field.name, dec_str, priv->field.last_used);
+ dec = MAIL_CACHE_DECISION_NO;
+ break;
+ }
+ case MAIL_CACHE_PURGE_DROP_DECISION_TO_TEMP: {
+ struct event_passthrough *e =
+ mail_cache_decision_changed_event(
+ ctx->cache, ctx->event, field)->
+ add_str("old_decision", "yes")->
+ add_str("new_decision", "temp");
+ e_debug(e->event(), "Purge changes field %s "
+ "cache decision yes -> temp "
+ "(last_used=%"PRIdTIME_T")",
+ priv->field.name, priv->field.last_used);
+ dec = MAIL_CACHE_DECISION_TEMP;
+ break;
+ }
+ }
+ priv->field.decision = dec;
+
+ /* drop all fields we don't want */
+ if ((dec & ENUM_NEGATE(MAIL_CACHE_DECISION_FORCED)) == MAIL_CACHE_DECISION_NO) {
+ priv->used = FALSE;
+ priv->field.last_used = 0;
+ }
+ return priv->used;
+}
+
+static int
+mail_cache_copy(struct mail_cache *cache, struct mail_index_transaction *trans,
+ struct event *event, int fd, const char *reason,
+ uint32_t *file_seq_r, uoff_t *file_size_r, uint32_t *max_uid_r,
+ uint32_t *ext_first_seq_r, ARRAY_TYPE(uint32_t) *ext_offsets)
+{
+ struct mail_cache_copy_context ctx;
+ struct mail_cache_lookup_iterate_ctx iter;
+ struct mail_cache_iterate_field field;
+ struct mail_index_view *view;
+ struct mail_cache_view *cache_view;
+ const struct mail_index_header *idx_hdr;
+ struct mail_cache_header hdr;
+ struct mail_cache_record cache_rec;
+ struct ostream *output;
+ uint32_t message_count, seq, first_new_seq, ext_offset;
+ unsigned int i, used_fields_count, orig_fields_count, record_count;
+
+ i_assert(reason != NULL);
+
+ *max_uid_r = 0;
+ *ext_first_seq_r = 0;
+
+ /* get the latest info on fields */
+ if (mail_cache_header_fields_read(cache) < 0)
+ return -1;
+
+ view = mail_index_transaction_open_updated_view(trans);
+ cache_view = mail_cache_view_open(cache, view);
+ output = o_stream_create_fd_file(fd, 0, FALSE);
+
+ i_zero(&hdr);
+ hdr.major_version = MAIL_CACHE_MAJOR_VERSION;
+ hdr.minor_version = MAIL_CACHE_MINOR_VERSION;
+ hdr.compat_sizeof_uoff_t = sizeof(uoff_t);
+ hdr.indexid = cache->index->indexid;
+ hdr.file_seq = get_next_file_seq(cache);
+ o_stream_nsend(output, &hdr, sizeof(hdr));
+
+ event_add_str(event, "reason", reason);
+ event_add_int(event, "file_seq", hdr.file_seq);
+ event_set_name(event, "mail_cache_purge_started");
+ e_debug(event, "Purging (new file_seq=%u): %s", hdr.file_seq, reason);
+
+ i_zero(&ctx);
+ ctx.cache = cache;
+ ctx.event = event;
+ ctx.buffer = buffer_create_dynamic(default_pool, 4096);
+ ctx.field_seen = buffer_create_dynamic(default_pool, 64);
+ ctx.field_seen_value = 0;
+ ctx.field_file_map = t_new(uint32_t, cache->fields_count + 1);
+ t_array_init(&ctx.bitmask_pos, 32);
+
+ /* @UNSAFE: drop unused fields and create a field mapping for
+ used fields */
+ idx_hdr = mail_index_get_header(view);
+ mail_cache_purge_drop_init(cache, idx_hdr, &ctx.drop_ctx);
+
+ orig_fields_count = cache->fields_count;
+ if (cache->file_fields_count == 0) {
+ /* creating the initial cache file. add all fields. */
+ for (i = 0; i < orig_fields_count; i++)
+ ctx.field_file_map[i] = i;
+ used_fields_count = i;
+ } else {
+ for (i = used_fields_count = 0; i < orig_fields_count; i++) {
+ if (!mail_cache_purge_check_field(&ctx, i))
+ ctx.field_file_map[i] = (uint32_t)-1;
+ else
+ ctx.field_file_map[i] = used_fields_count++;
+ }
+ }
+
+ /* get sequence of first message which doesn't need its temp fields
+ removed. */
+ first_new_seq = mail_cache_get_first_new_seq(view);
+ message_count = mail_index_view_get_messages_count(view);
+ if (!trans->reset)
+ seq = 1;
+ else {
+ /* Index is being rebuilt. Ignore old messages. */
+ seq = trans->first_new_seq;
+ }
+
+ *ext_first_seq_r = seq;
+ i_array_init(ext_offsets, message_count); record_count = 0;
+ for (; seq <= message_count; seq++) {
+ if (mail_index_transaction_is_expunged(trans, seq)) {
+ array_append_zero(ext_offsets);
+ continue;
+ }
+
+ ctx.new_msg = seq >= first_new_seq;
+ buffer_set_used_size(ctx.buffer, 0);
+
+ ctx.field_seen_value = (ctx.field_seen_value + 1) & UINT8_MAX;
+ if (ctx.field_seen_value == 0) {
+ memset(buffer_get_modifiable_data(ctx.field_seen, NULL),
+ 0, buffer_get_size(ctx.field_seen));
+ ctx.field_seen_value++;
+ }
+ array_clear(&ctx.bitmask_pos);
+
+ i_zero(&cache_rec);
+ buffer_append(ctx.buffer, &cache_rec, sizeof(cache_rec));
+
+ mail_cache_lookup_iter_init(cache_view, seq, &iter);
+ while (mail_cache_lookup_iter_next(&iter, &field) > 0)
+ mail_cache_purge_field(&ctx, &field);
+
+ if (ctx.buffer->used == sizeof(cache_rec) ||
+ ctx.buffer->used > cache->index->optimization_set.cache.record_max_size) {
+ /* nothing cached */
+ ext_offset = 0;
+ } else {
+ mail_index_lookup_uid(view, seq, max_uid_r);
+ cache_rec.size = ctx.buffer->used;
+ ext_offset = output->offset;
+ buffer_write(ctx.buffer, 0, &cache_rec,
+ sizeof(cache_rec));
+ o_stream_nsend(output, ctx.buffer->data, cache_rec.size);
+ record_count++;
+ }
+
+ array_push_back(ext_offsets, &ext_offset);
+ }
+ i_assert(orig_fields_count == cache->fields_count);
+
+ hdr.record_count = record_count;
+ hdr.field_header_offset = mail_index_uint32_to_offset(output->offset);
+ mail_cache_purge_get_fields(&ctx, used_fields_count);
+ o_stream_nsend(output, ctx.buffer->data, ctx.buffer->used);
+
+ hdr.backwards_compat_used_file_size = output->offset;
+ buffer_free(&ctx.buffer);
+ buffer_free(&ctx.field_seen);
+
+ *file_size_r = output->offset;
+ (void)o_stream_seek(output, 0);
+ o_stream_nsend(output, &hdr, sizeof(hdr));
+
+ mail_cache_view_close(&cache_view);
+ mail_index_view_close(&view);
+
+ if (o_stream_finish(output) < 0) {
+ mail_cache_set_syscall_error(cache, "write()");
+ o_stream_destroy(&output);
+ array_free(ext_offsets);
+ return -1;
+ }
+ o_stream_destroy(&output);
+
+ if (cache->index->set.fsync_mode == FSYNC_MODE_ALWAYS) {
+ if (fdatasync(fd) < 0) {
+ mail_cache_set_syscall_error(cache, "fdatasync()");
+ array_free(ext_offsets);
+ return -1;
+ }
+ }
+
+ *file_seq_r = hdr.file_seq;
+ return 0;
+}
+
+static int
+mail_cache_purge_write(struct mail_cache *cache,
+ struct mail_index_transaction *trans,
+ int fd, const char *temp_path, const char *
+ reason, bool *unlock)
+{
+ struct event *event;
+ struct stat st;
+ uint32_t prev_file_seq, file_seq, old_offset, max_uid, ext_first_seq;
+ ARRAY_TYPE(uint32_t) ext_offsets;
+ const uint32_t *offsets;
+ uoff_t prev_file_size, file_size;
+ unsigned int i, count, prev_deleted_records;
+
+ if (cache->hdr == NULL) {
+ prev_file_seq = 0;
+ prev_file_size = 0;
+ prev_deleted_records = 0;
+ } else {
+ prev_file_seq = cache->hdr->file_seq;
+ prev_file_size = cache->last_stat_size;
+ prev_deleted_records = cache->hdr->deleted_record_count;
+ }
+ event = event_create(cache->event);
+ event_add_int(event, "prev_file_seq", prev_file_seq);
+ event_add_int(event, "prev_file_size", prev_file_size);
+ event_add_int(event, "prev_deleted_records", prev_deleted_records);
+
+ if (mail_cache_copy(cache, trans, event, fd, reason,
+ &file_seq, &file_size, &max_uid,
+ &ext_first_seq, &ext_offsets) < 0)
+ return -1;
+
+ if (fstat(fd, &st) < 0) {
+ mail_cache_set_syscall_error(cache, "fstat()");
+ array_free(&ext_offsets);
+ return -1;
+ }
+ if (rename(temp_path, cache->filepath) < 0) {
+ mail_cache_set_syscall_error(cache, "rename()");
+ array_free(&ext_offsets);
+ return -1;
+ }
+
+ event_add_int(event, "file_size", file_size);
+ event_add_int(event, "max_uid", max_uid);
+ event_set_name(event, "mail_cache_purge_finished");
+ e_debug(event, "Purging finished, file_seq changed %u -> %u, "
+ "size=%"PRIuUOFF_T" -> %"PRIuUOFF_T", max_uid=%u",
+ prev_file_seq, file_seq, prev_file_size, file_size, max_uid);
+ event_unref(&event);
+
+ /* once we're sure that the purging was successful,
+ update the offsets */
+ mail_index_ext_reset(trans, cache->ext_id, file_seq, TRUE);
+ offsets = array_get(&ext_offsets, &count);
+ for (i = 0; i < count; i++) {
+ if (offsets[i] != 0) {
+ mail_index_update_ext(trans, ext_first_seq + i,
+ cache->ext_id,
+ &offsets[i], &old_offset);
+ }
+ }
+ array_free(&ext_offsets);
+
+ if (*unlock) {
+ mail_cache_unlock(cache);
+ *unlock = FALSE;
+ }
+
+ mail_cache_file_close(cache);
+ cache->opened = TRUE;
+ cache->fd = fd;
+ cache->st_ino = st.st_ino;
+ cache->st_dev = st.st_dev;
+ cache->field_header_write_pending = FALSE;
+ return 0;
+}
+
+static int
+mail_cache_purge_has_file_changed(struct mail_cache *cache,
+ uint32_t purge_file_seq)
+{
+ struct mail_cache_header hdr;
+ unsigned int i;
+ int fd, ret;
+
+ for (i = 0;; i++) {
+ fd = nfs_safe_open(cache->filepath, O_RDONLY);
+ if (fd == -1) {
+ if (errno == ENOENT)
+ return 0;
+
+ mail_cache_set_syscall_error(cache, "open()");
+ return -1;
+ }
+
+ ret = read_full(fd, &hdr, sizeof(hdr));
+ i_close_fd(&fd);
+
+ if (ret >= 0) {
+ if (ret == 0)
+ return 0;
+ if (purge_file_seq == 0) {
+ /* previously it didn't exist or it
+ was unusable and was just unlinked */
+ return 1;
+ }
+ return hdr.file_seq != purge_file_seq ? 1 : 0;
+ } else if (errno != ESTALE || i >= NFS_ESTALE_RETRY_COUNT) {
+ mail_cache_set_syscall_error(cache, "read()");
+ return -1;
+ }
+ }
+}
+
+static int mail_cache_purge_locked(struct mail_cache *cache,
+ uint32_t purge_file_seq,
+ struct mail_index_transaction *trans,
+ const char *reason, bool *unlock)
+{
+ const char *temp_path;
+ int fd, ret;
+
+ /* we've locked the cache purging now. if somebody else had just
+ recreated the cache, reopen the cache and return success. */
+ if (purge_file_seq != (uint32_t)-1 &&
+ (ret = mail_cache_purge_has_file_changed(cache, purge_file_seq)) != 0) {
+ if (ret < 0)
+ return -1;
+
+ /* was just purged, forget this */
+ mail_cache_purge_later_reset(cache);
+
+ if (*unlock) {
+ (void)mail_cache_unlock(cache);
+ *unlock = FALSE;
+ }
+
+ return mail_cache_reopen(cache) < 0 ? -1 : 0;
+ }
+ if (cache->fd != -1) {
+ /* make sure we have mapped it before reading. */
+ if (mail_cache_map_all(cache) <= 0)
+ return -1;
+ }
+
+ /* we want to recreate the cache. write it first to a temporary file */
+ fd = mail_index_create_tmp_file(cache->index, cache->filepath, &temp_path);
+ if (fd == -1)
+ return -1;
+ if (mail_cache_purge_write(cache, trans, fd, temp_path, reason, unlock) < 0) {
+ i_close_fd(&fd);
+ i_unlink(temp_path);
+ return -1;
+ }
+ if (cache->file_cache != NULL)
+ file_cache_set_fd(cache->file_cache, cache->fd);
+
+ if (mail_cache_map_all(cache) <= 0)
+ return -1;
+ if (mail_cache_header_fields_read(cache) < 0)
+ return -1;
+
+ mail_cache_purge_later_reset(cache);
+ return 0;
+}
+
+static int
+mail_cache_purge_full(struct mail_cache *cache,
+ struct mail_index_transaction *trans,
+ uint32_t purge_file_seq, const char *reason)
+{
+ bool unlock = FALSE;
+ int ret;
+
+ i_assert(!cache->purging);
+ i_assert(cache->index->log_sync_locked);
+
+ if (MAIL_INDEX_IS_IN_MEMORY(cache->index) || cache->index->readonly)
+ return 0;
+
+ /* purging isn't very efficient with small read()s */
+ if (cache->map_with_read) {
+ cache->map_with_read = FALSE;
+ if (cache->read_buf != NULL)
+ buffer_set_used_size(cache->read_buf, 0);
+ cache->hdr = NULL;
+ cache->mmap_length = 0;
+ }
+
+ /* .log lock already prevents other processes from purging cache at
+ the same time, but locking the cache file itself prevents other
+ processes from doing other changes to it (header changes, adding
+ more cached data). */
+ switch (mail_cache_lock(cache)) {
+ case -1:
+ /* lock timeout or some other error */
+ return -1;
+ case 0:
+ /* cache is broken or doesn't exist.
+ just start creating it. */
+ break;
+ default:
+ /* locking succeeded. */
+ unlock = TRUE;
+ }
+ cache->purging = TRUE;
+ ret = mail_cache_purge_locked(cache, purge_file_seq, trans, reason, &unlock);
+ cache->purging = FALSE;
+ if (unlock)
+ mail_cache_unlock(cache);
+ i_assert(!cache->hdr_modified);
+ if (ret < 0) {
+ /* the fields may have been updated in memory already.
+ reverse those changes by re-reading them from file. */
+ (void)mail_cache_header_fields_read(cache);
+ }
+ return ret;
+}
+
+int mail_cache_purge_with_trans(struct mail_cache *cache,
+ struct mail_index_transaction *trans,
+ uint32_t purge_file_seq, const char *reason)
+{
+ return mail_cache_purge_full(cache, trans, purge_file_seq, reason);
+}
+
+int mail_cache_purge(struct mail_cache *cache, uint32_t purge_file_seq,
+ const char *reason)
+{
+ struct mail_index_view *view;
+ struct mail_index_transaction *trans;
+ bool lock_log;
+ int ret;
+
+ lock_log = !cache->index->log_sync_locked;
+ if (lock_log) {
+ uint32_t file_seq;
+ uoff_t file_offset;
+
+ if (mail_transaction_log_sync_lock(cache->index->log,
+ "mail cache purge",
+ &file_seq, &file_offset) < 0)
+ return -1;
+ }
+ /* make sure we see the latest changes in index */
+ ret = mail_index_refresh(cache->index);
+
+ view = mail_index_view_open(cache->index);
+ trans = mail_index_transaction_begin(view,
+ MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL);
+ if (ret < 0)
+ ;
+ else if ((ret = mail_cache_purge_full(cache, trans, purge_file_seq,
+ reason)) < 0)
+ mail_index_transaction_rollback(&trans);
+ else {
+ if (mail_index_transaction_commit(&trans) < 0)
+ ret = -1;
+ }
+ mail_index_view_close(&view);
+ if (lock_log) {
+ mail_transaction_log_sync_unlock(cache->index->log,
+ "mail cache purge");
+ }
+ return ret;
+}
+
+bool mail_cache_need_purge(struct mail_cache *cache, const char **reason_r)
+{
+ if (cache->need_purge_file_seq == 0)
+ return FALSE; /* delayed purging not requested */
+ if (cache->index->readonly)
+ return FALSE; /* no purging when opened as read-only */
+ if ((cache->index->flags & MAIL_INDEX_OPEN_FLAG_SAVEONLY) != 0) {
+ /* Mail deliveries don't really need to purge, even if there
+ could be some work to do. Just delay until the next regular
+ mail access comes before doing any extra work. */
+ return FALSE;
+ }
+
+ i_assert(cache->need_purge_reason != NULL);
+ /* t_strdup() the reason in case it gets freed (or replaced)
+ before it's used */
+ *reason_r = t_strdup(cache->need_purge_reason);
+ return TRUE;
+}
+
+void mail_cache_purge_later(struct mail_cache *cache, const char *reason)
+{
+ i_assert(cache->hdr != NULL);
+
+ cache->need_purge_file_seq = cache->hdr->file_seq;
+ i_free(cache->need_purge_reason);
+ cache->need_purge_reason = i_strdup(reason);
+}
+
+void mail_cache_purge_later_reset(struct mail_cache *cache)
+{
+ cache->need_purge_file_seq = 0;
+ i_free(cache->need_purge_reason);
+}
+
+void mail_cache_purge_drop_init(struct mail_cache *cache,
+ const struct mail_index_header *hdr,
+ struct mail_cache_purge_drop_ctx *ctx_r)
+{
+ i_zero(ctx_r);
+ ctx_r->cache = cache;
+ if (hdr->day_stamp != 0) {
+ const struct mail_index_cache_optimization_settings *opt =
+ &cache->index->optimization_set.cache;
+ ctx_r->max_yes_downgrade_time = hdr->day_stamp -
+ opt->unaccessed_field_drop_secs;
+ ctx_r->max_temp_drop_time = hdr->day_stamp -
+ 2 * opt->unaccessed_field_drop_secs;
+ }
+}
+
+enum mail_cache_purge_drop_decision
+mail_cache_purge_drop_test(struct mail_cache_purge_drop_ctx *ctx,
+ unsigned int field)
+{
+ struct mail_cache_field_private *priv = &ctx->cache->fields[field];
+ enum mail_cache_decision_type dec = priv->field.decision;
+
+ if ((dec & MAIL_CACHE_DECISION_FORCED) != 0)
+ return MAIL_CACHE_PURGE_DROP_DECISION_NONE;
+ if (dec != MAIL_CACHE_DECISION_NO &&
+ priv->field.last_used < ctx->max_temp_drop_time) {
+ /* YES or TEMP decision field hasn't been accessed for a long
+ time now. Drop it. */
+ return MAIL_CACHE_PURGE_DROP_DECISION_DROP;
+ }
+ if (dec == MAIL_CACHE_DECISION_YES &&
+ priv->field.last_used < ctx->max_yes_downgrade_time) {
+ /* YES decision field hasn't been accessed for a while
+ now. Change its decision to TEMP. */
+ return MAIL_CACHE_PURGE_DROP_DECISION_TO_TEMP;
+ }
+ return MAIL_CACHE_PURGE_DROP_DECISION_NONE;
+}
diff --git a/src/lib-index/mail-cache-sync-update.c b/src/lib-index/mail-cache-sync-update.c
new file mode 100644
index 0000000..9073187
--- /dev/null
+++ b/src/lib-index/mail-cache-sync-update.c
@@ -0,0 +1,68 @@
+/* Copyright (c) 2004-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "mail-cache-private.h"
+#include "mail-index-sync-private.h"
+
+struct mail_cache_sync_context {
+ unsigned expunge_count;
+};
+
+void mail_cache_expunge_count(struct mail_cache *cache, unsigned int count)
+{
+ if (mail_cache_lock(cache) > 0) {
+ cache->hdr_copy.deleted_record_count += count;
+ if (cache->hdr_copy.record_count >= count)
+ cache->hdr_copy.record_count -= count;
+ else
+ cache->hdr_copy.record_count = 0;
+ cache->hdr_modified = TRUE;
+ (void)mail_cache_flush_and_unlock(cache);
+ }
+}
+
+static struct mail_cache_sync_context *mail_cache_handler_init(void **context)
+{
+ struct mail_cache_sync_context *ctx;
+
+ if (*context != NULL)
+ ctx = *context;
+ else {
+ *context = i_new(struct mail_cache_sync_context, 1);
+ ctx = *context;
+ }
+ return ctx;
+}
+
+static void mail_cache_handler_deinit(struct mail_index_sync_map_ctx *sync_ctx,
+ struct mail_cache_sync_context *ctx)
+{
+ struct mail_cache *cache = sync_ctx->view->index->cache;
+
+ if (ctx == NULL)
+ return;
+
+ mail_cache_expunge_count(cache, ctx->expunge_count);
+
+ i_free(ctx);
+}
+
+int mail_cache_expunge_handler(struct mail_index_sync_map_ctx *sync_ctx,
+ const void *data, void **sync_context)
+{
+ struct mail_cache_sync_context *ctx = *sync_context;
+ const uint32_t *cache_offset = data;
+
+ if (data == NULL) {
+ mail_cache_handler_deinit(sync_ctx, ctx);
+ *sync_context = NULL;
+ return 0;
+ }
+
+ if (*cache_offset == 0)
+ return 0;
+
+ ctx = mail_cache_handler_init(sync_context);
+ ctx->expunge_count++;
+ return 0;
+}
diff --git a/src/lib-index/mail-cache-transaction.c b/src/lib-index/mail-cache-transaction.c
new file mode 100644
index 0000000..0cdf089
--- /dev/null
+++ b/src/lib-index/mail-cache-transaction.c
@@ -0,0 +1,929 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "ioloop.h"
+#include "array.h"
+#include "buffer.h"
+#include "module-context.h"
+#include "file-cache.h"
+#include "file-set-size.h"
+#include "read-full.h"
+#include "write-full.h"
+#include "mail-cache-private.h"
+#include "mail-index-transaction-private.h"
+
+#include <stddef.h>
+#include <sys/stat.h>
+
+#define MAIL_CACHE_INIT_WRITE_BUFFER (1024*16)
+
+#define CACHE_TRANS_CONTEXT(obj) \
+ MODULE_CONTEXT(obj, cache_mail_index_transaction_module)
+#define CACHE_TRANS_CONTEXT_REQUIRE(obj) \
+ MODULE_CONTEXT_REQUIRE(obj, cache_mail_index_transaction_module)
+
+struct mail_cache_transaction_rec {
+ uint32_t seq;
+ uint32_t cache_data_pos;
+};
+
+struct mail_cache_transaction_ctx {
+ union mail_index_transaction_module_context module_ctx;
+ struct mail_index_transaction_vfuncs super;
+
+ struct mail_cache *cache;
+ struct mail_cache_view *view;
+ struct mail_index_transaction *trans;
+
+ uint32_t cache_file_seq;
+ uint32_t first_new_seq;
+
+ buffer_t *cache_data;
+ ARRAY(uint8_t) cache_field_idx_used;
+ ARRAY(struct mail_cache_transaction_rec) cache_data_seq;
+ ARRAY_TYPE(seq_range) cache_data_wanted_seqs;
+ uint32_t prev_seq, min_seq;
+ size_t last_rec_pos;
+
+ unsigned int records_written;
+
+ bool tried_purging:1;
+ bool decisions_refreshed:1;
+ bool have_noncommited_mails:1;
+ bool changes:1;
+};
+
+static MODULE_CONTEXT_DEFINE_INIT(cache_mail_index_transaction_module,
+ &mail_index_module_register);
+
+static int mail_cache_transaction_lock(struct mail_cache_transaction_ctx *ctx);
+static bool
+mail_cache_transaction_update_last_rec_size(struct mail_cache_transaction_ctx *ctx,
+ size_t *size_r);
+static int mail_cache_header_rewrite_fields(struct mail_cache *cache);
+
+static void mail_index_transaction_cache_reset(struct mail_index_transaction *t)
+{
+ struct mail_cache_transaction_ctx *ctx = CACHE_TRANS_CONTEXT_REQUIRE(t);
+ struct mail_index_transaction_vfuncs super = ctx->super;
+
+ mail_cache_transaction_reset(ctx);
+ super.reset(t);
+}
+
+static int
+mail_index_transaction_cache_commit(struct mail_index_transaction *t,
+ struct mail_index_transaction_commit_result *result_r)
+{
+ struct mail_cache_transaction_ctx *ctx = CACHE_TRANS_CONTEXT_REQUIRE(t);
+ struct mail_index_transaction_vfuncs super = ctx->super;
+
+ /* a failed cache commit isn't important enough to fail the entire
+ index transaction, so we'll just ignore it */
+ (void)mail_cache_transaction_commit(&ctx);
+ return super.commit(t, result_r);
+}
+
+static void
+mail_index_transaction_cache_rollback(struct mail_index_transaction *t)
+{
+ struct mail_cache_transaction_ctx *ctx = CACHE_TRANS_CONTEXT_REQUIRE(t);
+ struct mail_index_transaction_vfuncs super = ctx->super;
+
+ mail_cache_transaction_rollback(&ctx);
+ super.rollback(t);
+}
+
+struct mail_cache_transaction_ctx *
+mail_cache_get_transaction(struct mail_cache_view *view,
+ struct mail_index_transaction *t)
+{
+ struct mail_cache_transaction_ctx *ctx;
+
+ ctx = !cache_mail_index_transaction_module.id.module_id_set ? NULL :
+ CACHE_TRANS_CONTEXT(t);
+
+ if (ctx != NULL)
+ return ctx;
+
+ ctx = i_new(struct mail_cache_transaction_ctx, 1);
+ ctx->cache = view->cache;
+ ctx->view = view;
+ ctx->trans = t;
+
+ i_assert(view->transaction == NULL);
+ view->transaction = ctx;
+ view->trans_view = mail_index_transaction_open_updated_view(t);
+
+ ctx->super = t->v;
+ t->v.reset = mail_index_transaction_cache_reset;
+ t->v.commit = mail_index_transaction_cache_commit;
+ t->v.rollback = mail_index_transaction_cache_rollback;
+
+ MODULE_CONTEXT_SET(t, cache_mail_index_transaction_module, ctx);
+ return ctx;
+}
+
+static void
+mail_cache_transaction_forget_flushed(struct mail_cache_transaction_ctx *ctx,
+ bool reset_id_changed)
+{
+ uint32_t new_cache_file_seq = MAIL_CACHE_IS_UNUSABLE(ctx->cache) ? 0 :
+ ctx->cache->hdr->file_seq;
+ if (reset_id_changed && ctx->records_written > 0) {
+ e_warning(ctx->cache->event,
+ "Purging lost %u written cache records "
+ "(reset_id changed %u -> %u)", ctx->records_written,
+ ctx->cache_file_seq, new_cache_file_seq);
+ /* don't increase deleted_record_count in the new file */
+ ctx->records_written = 0;
+ }
+ ctx->cache_file_seq = new_cache_file_seq;
+ /* forget all cache extension updates even if reset_id doesn't change */
+ mail_index_ext_set_reset_id(ctx->trans, ctx->cache->ext_id,
+ ctx->cache_file_seq);
+}
+
+void mail_cache_transaction_reset(struct mail_cache_transaction_ctx *ctx)
+{
+ mail_cache_transaction_forget_flushed(ctx, FALSE);
+ if (ctx->cache_data != NULL)
+ buffer_set_used_size(ctx->cache_data, 0);
+ if (array_is_created(&ctx->cache_data_seq))
+ array_clear(&ctx->cache_data_seq);
+ ctx->prev_seq = 0;
+ ctx->last_rec_pos = 0;
+
+ ctx->changes = FALSE;
+}
+
+void mail_cache_transaction_rollback(struct mail_cache_transaction_ctx **_ctx)
+{
+ struct mail_cache_transaction_ctx *ctx = *_ctx;
+
+ *_ctx = NULL;
+
+ if (ctx->records_written > 0) {
+ /* we already wrote to the cache file. we can't (or don't want
+ to) delete that data, so just mark it as deleted space */
+ if (mail_cache_transaction_lock(ctx) > 0) {
+ ctx->cache->hdr_copy.deleted_record_count +=
+ ctx->records_written;
+ ctx->cache->hdr_modified = TRUE;
+ (void)mail_cache_flush_and_unlock(ctx->cache);
+ }
+ }
+
+ MODULE_CONTEXT_UNSET(ctx->trans, cache_mail_index_transaction_module);
+
+ ctx->view->transaction = NULL;
+ ctx->view->trans_seq1 = ctx->view->trans_seq2 = 0;
+
+ mail_index_view_close(&ctx->view->trans_view);
+ buffer_free(&ctx->cache_data);
+ if (array_is_created(&ctx->cache_data_seq))
+ array_free(&ctx->cache_data_seq);
+ if (array_is_created(&ctx->cache_data_wanted_seqs))
+ array_free(&ctx->cache_data_wanted_seqs);
+ array_free(&ctx->cache_field_idx_used);
+ i_free(ctx);
+}
+
+bool mail_cache_transactions_have_changes(struct mail_cache *cache)
+{
+ struct mail_cache_view *view;
+
+ for (view = cache->views; view != NULL; view = view->next) {
+ if (view->transaction != NULL &&
+ view->transaction->changes)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static int
+mail_cache_transaction_purge(struct mail_cache_transaction_ctx *ctx,
+ const char *reason)
+{
+ struct mail_cache *cache = ctx->cache;
+
+ ctx->tried_purging = TRUE;
+
+ uint32_t purge_file_seq =
+ MAIL_CACHE_IS_UNUSABLE(cache) ? 0 : cache->hdr->file_seq;
+
+ int ret = mail_cache_purge(cache, purge_file_seq, reason);
+ /* already written cache records must be forgotten, but records in
+ memory can still be written to the new cache file */
+ mail_cache_transaction_forget_flushed(ctx, TRUE);
+ return ret;
+}
+
+static int mail_cache_transaction_lock(struct mail_cache_transaction_ctx *ctx)
+{
+ struct mail_cache *cache = ctx->cache;
+ const uoff_t cache_max_size =
+ cache->index->optimization_set.cache.max_size;
+ int ret;
+
+ if ((ret = mail_cache_lock(cache)) <= 0) {
+ if (ret < 0)
+ return -1;
+
+ if (!ctx->tried_purging) {
+ if (mail_cache_transaction_purge(ctx, "creating cache") < 0)
+ return -1;
+ return mail_cache_transaction_lock(ctx);
+ } else {
+ return 0;
+ }
+ }
+ i_assert(!MAIL_CACHE_IS_UNUSABLE(cache));
+
+ if (!ctx->tried_purging && ctx->cache_data != NULL &&
+ cache->last_stat_size + ctx->cache_data->used > cache_max_size) {
+ /* Looks like cache file is becoming too large. Try to purge
+ it to free up some space. */
+ if (cache->hdr->continued_record_count > 0 ||
+ cache->hdr->deleted_record_count > 0) {
+ mail_cache_unlock(cache);
+ (void)mail_cache_transaction_purge(ctx, "cache is too large");
+ return mail_cache_transaction_lock(ctx);
+ }
+ }
+
+ if (ctx->cache_file_seq == 0)
+ ctx->cache_file_seq = cache->hdr->file_seq;
+ else if (ctx->cache_file_seq != cache->hdr->file_seq) {
+ /* already written cache records must be forgotten, but records
+ in memory can still be written to the new cache file */
+ mail_cache_transaction_forget_flushed(ctx, TRUE);
+ i_assert(ctx->cache_file_seq == cache->hdr->file_seq);
+ }
+ return 1;
+}
+
+const struct mail_cache_record *
+mail_cache_transaction_lookup_rec(struct mail_cache_transaction_ctx *ctx,
+ unsigned int seq,
+ unsigned int *trans_next_idx)
+{
+ const struct mail_cache_transaction_rec *recs;
+ unsigned int i, count;
+
+ recs = array_get(&ctx->cache_data_seq, &count);
+ for (i = *trans_next_idx; i < count; i++) {
+ if (recs[i].seq == seq) {
+ *trans_next_idx = i + 1;
+ return CONST_PTR_OFFSET(ctx->cache_data->data,
+ recs[i].cache_data_pos);
+ }
+ }
+ *trans_next_idx = i + 1;
+ if (seq == ctx->prev_seq && i == count) {
+ /* update the unfinished record's (temporary) size and
+ return it */
+ size_t size;
+ if (!mail_cache_transaction_update_last_rec_size(ctx, &size))
+ return NULL;
+ return CONST_PTR_OFFSET(ctx->cache_data->data,
+ ctx->last_rec_pos);
+ }
+ return NULL;
+}
+
+static void
+mail_cache_transaction_update_index(struct mail_cache_transaction_ctx *ctx,
+ uint32_t write_offset, bool committing)
+{
+ struct mail_cache *cache = ctx->cache;
+ struct mail_index_transaction *trans;
+ const struct mail_cache_record *rec = ctx->cache_data->data;
+ const struct mail_cache_transaction_rec *recs;
+ uint32_t i, seq_count;
+
+ if (committing) {
+ /* The transaction is being committed now. Use it. */
+ trans = ctx->trans;
+ } else if (ctx->have_noncommited_mails) {
+ /* Some of the mails haven't been committed yet. We must use
+ the provided transaction to update the cache records. */
+ trans = ctx->trans;
+ } else {
+ /* We can commit these changes immediately. This way even if
+ the provided transaction runs for a very long time, we
+ still once in a while commit the cache changes so they
+ become visible to other processes as well. */
+ trans = mail_index_transaction_begin(ctx->view->trans_view,
+ MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL);
+ }
+
+ mail_index_ext_using_reset_id(trans, ctx->cache->ext_id,
+ ctx->cache_file_seq);
+
+ /* write the cache_offsets to index file. records' prev_offset
+ is updated to point to old cache record when index is being
+ synced. */
+ recs = array_get(&ctx->cache_data_seq, &seq_count);
+ for (i = 0; i < seq_count; i++) {
+ mail_index_update_ext(trans, recs[i].seq, cache->ext_id,
+ &write_offset, NULL);
+
+ write_offset += rec->size;
+ rec = CONST_PTR_OFFSET(rec, rec->size);
+ ctx->records_written++;
+ }
+ if (trans != ctx->trans) {
+ i_assert(cache->index->log_sync_locked);
+ if (mail_index_transaction_commit(&trans) < 0) {
+ /* failed, but can't really do anything */
+ } else {
+ ctx->records_written = 0;
+ }
+ }
+}
+
+static int
+mail_cache_link_records(struct mail_cache_transaction_ctx *ctx,
+ uint32_t write_offset)
+{
+ struct mail_index_map *map;
+ struct mail_cache_record *rec;
+ const struct mail_cache_transaction_rec *recs;
+ const uint32_t *prev_offsetp;
+ ARRAY_TYPE(uint32_t) seq_offsets;
+ uint32_t i, seq_count, reset_id, prev_offset, *offsetp;
+ const void *data;
+
+ i_assert(ctx->min_seq != 0);
+
+ i_array_init(&seq_offsets, 64);
+ recs = array_get(&ctx->cache_data_seq, &seq_count);
+ rec = buffer_get_modifiable_data(ctx->cache_data, NULL);
+ for (i = 0; i < seq_count; i++) {
+ offsetp = array_idx_get_space(&seq_offsets,
+ recs[i].seq - ctx->min_seq);
+ if (*offsetp != 0)
+ prev_offset = *offsetp;
+ else {
+ mail_index_lookup_ext_full(ctx->view->trans_view, recs[i].seq,
+ ctx->cache->ext_id, &map,
+ &data, NULL);
+ prev_offsetp = data;
+
+ if (prev_offsetp == NULL || *prev_offsetp == 0)
+ prev_offset = 0;
+ else if (mail_index_ext_get_reset_id(ctx->view->trans_view, map,
+ ctx->cache->ext_id,
+ &reset_id) &&
+ reset_id == ctx->cache_file_seq)
+ prev_offset = *prev_offsetp;
+ else
+ prev_offset = 0;
+ if (prev_offset >= write_offset) {
+ mail_cache_set_corrupted(ctx->cache,
+ "Cache record offset points outside existing file");
+ array_free(&seq_offsets);
+ return -1;
+ }
+ }
+
+ if (prev_offset != 0) {
+ /* link this record to previous one */
+ rec->prev_offset = prev_offset;
+ ctx->cache->hdr_copy.continued_record_count++;
+ } else {
+ ctx->cache->hdr_copy.record_count++;
+ }
+ *offsetp = write_offset;
+
+ write_offset += rec->size;
+ rec = PTR_OFFSET(rec, rec->size);
+ }
+ array_free(&seq_offsets);
+ ctx->cache->hdr_modified = TRUE;
+ return 0;
+}
+
+static bool
+mail_cache_transaction_set_used(struct mail_cache_transaction_ctx *ctx)
+{
+ const uint8_t *cache_fields_used;
+ unsigned int field_idx, count;
+ bool missing_file_fields = FALSE;
+
+ cache_fields_used = array_get(&ctx->cache_field_idx_used, &count);
+ i_assert(count <= ctx->cache->fields_count);
+ for (field_idx = 0; field_idx < count; field_idx++) {
+ if (cache_fields_used[field_idx] != 0) {
+ ctx->cache->fields[field_idx].used = TRUE;
+ if (ctx->cache->field_file_map[field_idx] == (uint32_t)-1)
+ missing_file_fields = TRUE;
+ }
+ }
+ return missing_file_fields;
+}
+
+static int
+mail_cache_transaction_update_fields(struct mail_cache_transaction_ctx *ctx)
+{
+ unsigned char *p;
+ const unsigned char *end, *rec_end;
+ uint32_t field_idx, data_size;
+
+ if (mail_cache_transaction_set_used(ctx)) {
+ /* add missing fields to cache */
+ if (mail_cache_header_rewrite_fields(ctx->cache) < 0)
+ return -1;
+ /* make sure they were actually added */
+ if (mail_cache_transaction_set_used(ctx)) {
+ mail_index_set_error(ctx->cache->index,
+ "Cache file %s: Unexpectedly lost newly added field",
+ ctx->cache->filepath);
+ return -1;
+ }
+ }
+
+ /* Go through all the added cache records and replace the in-memory
+ field_idx with the cache file-specific field index. Update only
+ up to last_rec_pos, because that's how far flushing is done. The
+ fields after that keep the in-memory field_idx until the next
+ flush. */
+ p = buffer_get_modifiable_data(ctx->cache_data, NULL);
+ end = CONST_PTR_OFFSET(ctx->cache_data->data, ctx->last_rec_pos);
+ rec_end = p;
+ while (p < end) {
+ if (p >= rec_end) {
+ /* next cache record */
+ i_assert(p == rec_end);
+ const struct mail_cache_record *rec =
+ (const struct mail_cache_record *)p;
+ /* note that the last rec->size==0 */
+ rec_end = CONST_PTR_OFFSET(p, rec->size);
+ p += sizeof(*rec);
+ }
+ /* replace field_idx */
+ uint32_t *file_fieldp = (uint32_t *)p;
+ field_idx = *file_fieldp;
+ *file_fieldp = ctx->cache->field_file_map[field_idx];
+ i_assert(*file_fieldp != (uint32_t)-1);
+ p += sizeof(field_idx);
+
+ /* Skip to next cache field. Next is <data size> if the field
+ is not fixed size. */
+ data_size = ctx->cache->fields[field_idx].field.field_size;
+ if (data_size == UINT_MAX) {
+ memcpy(&data_size, p, sizeof(data_size));
+ p += sizeof(data_size);
+ }
+ /* data & 32bit padding */
+ p += data_size;
+ if ((data_size & 3) != 0)
+ p += 4 - (data_size & 3);
+ }
+ i_assert(p == end);
+ return 0;
+}
+
+static void
+mail_cache_transaction_drop_last_flush(struct mail_cache_transaction_ctx *ctx)
+{
+ buffer_copy(ctx->cache_data, 0,
+ ctx->cache_data, ctx->last_rec_pos, SIZE_MAX);
+ buffer_set_used_size(ctx->cache_data,
+ ctx->cache_data->used - ctx->last_rec_pos);
+ ctx->last_rec_pos = 0;
+ ctx->min_seq = 0;
+
+ array_clear(&ctx->cache_data_seq);
+ array_clear(&ctx->cache_data_wanted_seqs);
+}
+
+static int
+mail_cache_transaction_flush(struct mail_cache_transaction_ctx *ctx,
+ bool committing)
+{
+ struct stat st;
+ uint32_t write_offset = 0;
+ int ret = 0;
+
+ i_assert(!ctx->cache->locked);
+
+ if (array_count(&ctx->cache_data_seq) == 0) {
+ /* we had done some changes, but they were aborted. */
+ i_assert(ctx->last_rec_pos == 0);
+ ctx->min_seq = 0;
+ return 0;
+ }
+
+ /* If we're going to be committing a transaction, the log must be
+ locked before we lock cache or we can deadlock. */
+ bool lock_log = !ctx->cache->index->log_sync_locked &&
+ !committing && !ctx->have_noncommited_mails;
+ if (lock_log) {
+ uint32_t file_seq;
+ uoff_t file_offset;
+
+ if (mail_transaction_log_sync_lock(ctx->cache->index->log,
+ "mail cache transaction flush",
+ &file_seq, &file_offset) < 0)
+ return -1;
+ }
+
+ if (mail_cache_transaction_lock(ctx) <= 0) {
+ if (lock_log) {
+ mail_transaction_log_sync_unlock(ctx->cache->index->log,
+ "mail cache transaction flush: cache lock failed");
+ }
+ return -1;
+ }
+
+ i_assert(ctx->cache_data != NULL);
+ i_assert(ctx->last_rec_pos <= ctx->cache_data->used);
+
+ if (mail_cache_transaction_update_fields(ctx) < 0) {
+ if (lock_log) {
+ mail_transaction_log_sync_unlock(ctx->cache->index->log,
+ "mail cache transaction flush: field update failed");
+ }
+ mail_cache_unlock(ctx->cache);
+ return -1;
+ }
+
+ /* we need to get the final write offset for linking records */
+ if (fstat(ctx->cache->fd, &st) < 0) {
+ if (!ESTALE_FSTAT(errno))
+ mail_cache_set_syscall_error(ctx->cache, "fstat()");
+ ret = -1;
+ } else if ((uoff_t)st.st_size + ctx->last_rec_pos > ctx->cache->index->optimization_set.cache.max_size) {
+ mail_cache_set_corrupted(ctx->cache, "Cache file too large");
+ ret = -1;
+ } else {
+ write_offset = st.st_size;
+ if (mail_cache_link_records(ctx, write_offset) < 0)
+ ret = -1;
+ }
+
+ /* write to cache file */
+ if (ret < 0 ||
+ mail_cache_append(ctx->cache, ctx->cache_data->data,
+ ctx->last_rec_pos, &write_offset) < 0)
+ ret = -1;
+ else {
+ /* update records' cache offsets to index */
+ mail_cache_transaction_update_index(ctx, write_offset,
+ committing);
+ }
+ if (mail_cache_flush_and_unlock(ctx->cache) < 0)
+ ret = -1;
+
+ if (lock_log) {
+ mail_transaction_log_sync_unlock(ctx->cache->index->log,
+ "mail cache transaction flush");
+ }
+ return ret;
+}
+
+static void
+mail_cache_transaction_drop_unwanted(struct mail_cache_transaction_ctx *ctx,
+ size_t space_needed)
+{
+ struct mail_cache_transaction_rec *recs;
+ unsigned int i, count;
+
+ recs = array_get_modifiable(&ctx->cache_data_seq, &count);
+ /* find out how many records to delete. delete all unwanted sequences,
+ and if that's not enough delete some more. */
+ for (i = 0; i < count; i++) {
+ if (seq_range_exists(&ctx->cache_data_wanted_seqs, recs[i].seq)) {
+ if (recs[i].cache_data_pos >= space_needed)
+ break;
+ /* we're going to forcibly delete it - remove it also
+ from the array since it's no longer useful there */
+ seq_range_array_remove(&ctx->cache_data_wanted_seqs,
+ recs[i].seq);
+ }
+ }
+ unsigned int deleted_count = i;
+ size_t deleted_space = i < count ?
+ recs[i].cache_data_pos : ctx->last_rec_pos;
+ for (; i < count; i++)
+ recs[i].cache_data_pos -= deleted_space;
+ ctx->last_rec_pos -= deleted_space;
+ array_delete(&ctx->cache_data_seq, 0, deleted_count);
+ buffer_delete(ctx->cache_data, 0, deleted_space);
+}
+
+static bool
+mail_cache_transaction_update_last_rec_size(struct mail_cache_transaction_ctx *ctx,
+ size_t *size_r)
+{
+ struct mail_cache_record *rec;
+ void *data;
+ size_t size;
+
+ data = buffer_get_modifiable_data(ctx->cache_data, &size);
+ rec = PTR_OFFSET(data, ctx->last_rec_pos);
+ rec->size = size - ctx->last_rec_pos;
+ if (rec->size == sizeof(*rec))
+ return FALSE;
+ i_assert(rec->size > sizeof(*rec));
+ *size_r = rec->size;
+ return TRUE;
+}
+
+static void
+mail_cache_transaction_update_last_rec(struct mail_cache_transaction_ctx *ctx)
+{
+ struct mail_cache_transaction_rec *trans_rec;
+ size_t size;
+
+ if (!mail_cache_transaction_update_last_rec_size(ctx, &size) ||
+ size > ctx->cache->index->optimization_set.cache.record_max_size) {
+ buffer_set_used_size(ctx->cache_data, ctx->last_rec_pos);
+ return;
+ }
+
+ if (ctx->min_seq > ctx->prev_seq || ctx->min_seq == 0)
+ ctx->min_seq = ctx->prev_seq;
+ trans_rec = array_append_space(&ctx->cache_data_seq);
+ trans_rec->seq = ctx->prev_seq;
+ trans_rec->cache_data_pos = ctx->last_rec_pos;
+ ctx->last_rec_pos = ctx->cache_data->used;
+}
+
+static void
+mail_cache_transaction_switch_seq(struct mail_cache_transaction_ctx *ctx)
+{
+ struct mail_cache_record new_rec;
+
+ if (ctx->prev_seq != 0) {
+ /* update previously added cache record's size */
+ mail_cache_transaction_update_last_rec(ctx);
+ } else if (ctx->cache_data == NULL) {
+ ctx->cache_data =
+ buffer_create_dynamic(default_pool,
+ MAIL_CACHE_INIT_WRITE_BUFFER);
+ i_array_init(&ctx->cache_data_seq, 64);
+ i_array_init(&ctx->cache_data_wanted_seqs, 32);
+ i_array_init(&ctx->cache_field_idx_used, 64);
+ }
+
+ i_zero(&new_rec);
+ buffer_append(ctx->cache_data, &new_rec, sizeof(new_rec));
+
+ ctx->prev_seq = 0;
+ ctx->changes = TRUE;
+}
+
+int mail_cache_transaction_commit(struct mail_cache_transaction_ctx **_ctx)
+{
+ struct mail_cache_transaction_ctx *ctx = *_ctx;
+ int ret = 0;
+
+ if (ctx->changes) {
+ if (ctx->prev_seq != 0)
+ mail_cache_transaction_update_last_rec(ctx);
+ if (mail_cache_transaction_flush(ctx, TRUE) < 0)
+ ret = -1;
+ else {
+ /* successfully wrote everything */
+ ctx->records_written = 0;
+ }
+ /* Here would be a good place to do fdatasync() to make sure
+ everything is written before offsets are updated to index.
+ However it slows down I/O needlessly and we're pretty good
+ at catching and fixing cache corruption, so we no longer do
+ it. */
+ }
+ mail_cache_transaction_rollback(_ctx);
+ return ret;
+}
+
+static int
+mail_cache_header_fields_write(struct mail_cache *cache, const buffer_t *buffer)
+{
+ uint32_t offset, hdr_offset;
+
+ i_assert(cache->locked);
+
+ offset = 0;
+ if (mail_cache_append(cache, buffer->data, buffer->used, &offset) < 0)
+ return -1;
+
+ if (cache->index->set.fsync_mode == FSYNC_MODE_ALWAYS) {
+ if (fdatasync(cache->fd) < 0) {
+ mail_cache_set_syscall_error(cache, "fdatasync()");
+ return -1;
+ }
+ }
+ /* find offset to the previous header's "next_offset" field */
+ if (mail_cache_header_fields_get_next_offset(cache, &hdr_offset) < 0)
+ return -1;
+
+ /* update the next_offset offset, so our new header will be found */
+ offset = mail_index_uint32_to_offset(offset);
+ if (mail_cache_write(cache, &offset, sizeof(offset), hdr_offset) < 0)
+ return -1;
+
+ if (hdr_offset == offsetof(struct mail_cache_header,
+ field_header_offset)) {
+ /* we're adding the first field. hdr_copy needs to be kept
+ in sync so unlocking won't overwrite it. */
+ cache->hdr_copy.field_header_offset = hdr_offset;
+ cache->hdr_ro_copy.field_header_offset = hdr_offset;
+ }
+ return 0;
+}
+
+static int mail_cache_header_rewrite_fields(struct mail_cache *cache)
+{
+ int ret;
+
+ /* re-read header to make sure we don't lose any fields. */
+ if (mail_cache_header_fields_read(cache) < 0)
+ return -1;
+
+ T_BEGIN {
+ buffer_t *buffer;
+
+ buffer = t_buffer_create(256);
+ mail_cache_header_fields_get(cache, buffer);
+ ret = mail_cache_header_fields_write(cache, buffer);
+ } T_END;
+
+ if (ret == 0) {
+ /* we wrote all the headers, so there are no pending changes */
+ cache->field_header_write_pending = FALSE;
+ ret = mail_cache_header_fields_read(cache);
+ }
+ return ret;
+}
+
+static void
+mail_cache_transaction_refresh_decisions(struct mail_cache_transaction_ctx *ctx)
+{
+ if (ctx->decisions_refreshed)
+ return;
+
+ /* Read latest caching decisions from the cache file's header once
+ per transaction. */
+ if (!ctx->cache->opened)
+ (void)mail_cache_open_and_verify(ctx->cache);
+ else
+ (void)mail_cache_header_fields_read(ctx->cache);
+ ctx->decisions_refreshed = TRUE;
+}
+
+void mail_cache_add(struct mail_cache_transaction_ctx *ctx, uint32_t seq,
+ unsigned int field_idx, const void *data, size_t data_size)
+{
+ uint32_t data_size32;
+ unsigned int fixed_size;
+ size_t full_size, record_size;
+
+ i_assert(field_idx < ctx->cache->fields_count);
+ i_assert(data_size < (uint32_t)-1);
+
+ if (ctx->cache->fields[field_idx].field.decision ==
+ (MAIL_CACHE_DECISION_NO | MAIL_CACHE_DECISION_FORCED))
+ return;
+
+ if (seq >= ctx->trans->first_new_seq)
+ ctx->have_noncommited_mails = TRUE;
+
+ /* If the cache file exists, make sure the caching decisions have been
+ read. */
+ mail_cache_transaction_refresh_decisions(ctx);
+
+ mail_cache_decision_add(ctx->view, seq, field_idx);
+
+ fixed_size = ctx->cache->fields[field_idx].field.field_size;
+ i_assert(fixed_size == UINT_MAX || fixed_size == data_size);
+
+ data_size32 = (uint32_t)data_size;
+ full_size = sizeof(field_idx) + ((data_size + 3) & ~3U);
+ if (fixed_size == UINT_MAX)
+ full_size += sizeof(data_size32);
+
+ if (ctx->prev_seq != seq) {
+ mail_cache_transaction_switch_seq(ctx);
+ ctx->prev_seq = seq;
+ seq_range_array_add(&ctx->cache_data_wanted_seqs, seq);
+
+ /* remember roughly what we have modified, so cache lookups can
+ look into transactions to see changes. */
+ if (seq < ctx->view->trans_seq1 || ctx->view->trans_seq1 == 0)
+ ctx->view->trans_seq1 = seq;
+ if (seq > ctx->view->trans_seq2)
+ ctx->view->trans_seq2 = seq;
+ }
+
+ if (mail_cache_transaction_update_last_rec_size(ctx, &record_size) &&
+ record_size + full_size >
+ ctx->cache->index->optimization_set.cache.record_max_size) {
+ /* Adding this field would exceed the cache record's maximum
+ size. If we don't add this, it's possible that other fields
+ could still be added. */
+ return;
+ }
+
+ /* Remember that this field has been used within the transaction. Later
+ on we fill mail_cache_field_private.used with it. We can't rely on
+ setting it here, because cache purging may run and clear it. */
+ uint8_t field_idx_set = 1;
+ array_idx_set(&ctx->cache_field_idx_used, field_idx, &field_idx_set);
+
+ /* Remember that this value exists for the mail, in case we try to look
+ it up. Note that this gets forgotten whenever changing the mail. */
+ buffer_write(ctx->view->cached_exists_buf, field_idx,
+ &ctx->view->cached_exists_value, 1);
+
+ if (ctx->cache_data->used + full_size > MAIL_CACHE_MAX_WRITE_BUFFER &&
+ ctx->last_rec_pos > 0) {
+ /* time to flush our buffer. */
+ if (MAIL_INDEX_IS_IN_MEMORY(ctx->cache->index)) {
+ /* just drop the old data to free up memory */
+ size_t space_needed = ctx->cache_data->used +
+ full_size - MAIL_CACHE_MAX_WRITE_BUFFER;
+ mail_cache_transaction_drop_unwanted(ctx, space_needed);
+ } else {
+ if (mail_cache_transaction_flush(ctx, FALSE) < 0) {
+ /* If this is a syscall failure, the already
+ flushed changes could still be finished by
+ writing the offsets to .log file. If this is
+ a corruption/lost cache, the offsets will
+ point to a nonexistent file or be ignored.
+ Either way, we don't really need to handle
+ this failure in any special way. */
+ }
+ /* Regardless of whether the flush succeeded, drop all
+ data that it would have written. This way the flush
+ is attempted only once, but it could still be
+ possible to write new data later. Also don't reset
+ the transaction entirely so that the last partially
+ cached mail can still be accessed from memory. */
+ mail_cache_transaction_drop_last_flush(ctx);
+ }
+ }
+
+ buffer_append(ctx->cache_data, &field_idx, sizeof(field_idx));
+ if (fixed_size == UINT_MAX) {
+ buffer_append(ctx->cache_data, &data_size32,
+ sizeof(data_size32));
+ }
+
+ buffer_append(ctx->cache_data, data, data_size);
+ if ((data_size & 3) != 0)
+ buffer_append_zero(ctx->cache_data, 4 - (data_size & 3));
+}
+
+bool mail_cache_field_want_add(struct mail_cache_transaction_ctx *ctx,
+ uint32_t seq, unsigned int field_idx)
+{
+ enum mail_cache_decision_type decision;
+
+ mail_cache_transaction_refresh_decisions(ctx);
+
+ decision = mail_cache_field_get_decision(ctx->view->cache, field_idx);
+ decision &= ENUM_NEGATE(MAIL_CACHE_DECISION_FORCED);
+ switch (decision) {
+ case MAIL_CACHE_DECISION_NO:
+ return FALSE;
+ case MAIL_CACHE_DECISION_TEMP:
+ /* add it only if it's newer than what we would drop when
+ purging */
+ if (ctx->first_new_seq == 0) {
+ ctx->first_new_seq =
+ mail_cache_get_first_new_seq(ctx->view->view);
+ }
+ if (seq < ctx->first_new_seq)
+ return FALSE;
+ break;
+ default:
+ break;
+ }
+
+ return mail_cache_field_exists(ctx->view, seq, field_idx) == 0;
+}
+
+bool mail_cache_field_can_add(struct mail_cache_transaction_ctx *ctx,
+ uint32_t seq, unsigned int field_idx)
+{
+ enum mail_cache_decision_type decision;
+
+ mail_cache_transaction_refresh_decisions(ctx);
+
+ decision = mail_cache_field_get_decision(ctx->view->cache, field_idx);
+ if (decision == (MAIL_CACHE_DECISION_FORCED | MAIL_CACHE_DECISION_NO))
+ return FALSE;
+
+ return mail_cache_field_exists(ctx->view, seq, field_idx) == 0;
+}
+
+void mail_cache_close_mail(struct mail_cache_transaction_ctx *ctx,
+ uint32_t seq)
+{
+ if (array_is_created(&ctx->cache_data_wanted_seqs))
+ seq_range_array_remove(&ctx->cache_data_wanted_seqs, seq);
+}
diff --git a/src/lib-index/mail-cache.c b/src/lib-index/mail-cache.c
new file mode 100644
index 0000000..bd3c939
--- /dev/null
+++ b/src/lib-index/mail-cache.c
@@ -0,0 +1,1005 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "buffer.h"
+#include "hash.h"
+#include "llist.h"
+#include "nfs-workarounds.h"
+#include "file-cache.h"
+#include "mmap-util.h"
+#include "read-full.h"
+#include "write-full.h"
+#include "mail-cache-private.h"
+#include "ioloop.h"
+
+#include <unistd.h>
+
+#define MAIL_CACHE_MIN_HEADER_READ_SIZE 4096
+
+static struct event_category event_category_mail_cache = {
+ .name = "mail-cache",
+};
+
+void mail_cache_set_syscall_error(struct mail_cache *cache,
+ const char *function)
+{
+ mail_index_file_set_syscall_error(cache->index, cache->filepath,
+ function);
+}
+
+static void mail_cache_unlink(struct mail_cache *cache)
+{
+ if (!cache->index->readonly && !MAIL_INDEX_IS_IN_MEMORY(cache->index))
+ i_unlink_if_exists(cache->filepath);
+ /* mark the cache as unusable */
+ cache->hdr = NULL;
+}
+
+void mail_cache_set_corrupted(struct mail_cache *cache, const char *fmt, ...)
+{
+ va_list va;
+
+ mail_cache_unlink(cache);
+
+ va_start(va, fmt);
+ T_BEGIN {
+ const char *reason = t_strdup_vprintf(fmt, va);
+ const char *errstr = t_strdup_printf(
+ "Deleting corrupted cache: %s", reason);
+ e_error(event_create_passthrough(cache->event)->
+ set_name("mail_cache_corrupted")->
+ add_str("reason", reason)->event(), "%s", errstr);
+ mail_index_set_error_nolog(cache->index, errstr);
+ } T_END;
+ va_end(va);
+}
+
+void mail_cache_set_seq_corrupted_reason(struct mail_cache_view *cache_view,
+ uint32_t seq, const char *reason)
+{
+ uint32_t uid, empty = 0;
+ struct mail_cache *cache = cache_view->cache;
+ struct mail_index_view *view = cache_view->view;
+
+ /* drop cache pointer */
+ struct mail_index_transaction *t =
+ mail_index_transaction_begin(view, MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL);
+ mail_index_update_ext(t, seq, cache->ext_id, &empty, NULL);
+
+ if (mail_index_transaction_commit(&t) < 0) {
+ /* I/O error (e.g. out of disk space). Ignore this for now,
+ maybe it works again later. */
+ return;
+ }
+
+ mail_index_lookup_uid(cache_view->view, seq, &uid);
+ const char *errstr = t_strdup_printf(
+ "Deleting corrupted cache record uid=%u: %s", uid, reason);
+ e_error(event_create_passthrough(cache->event)->
+ set_name("mail_cache_record_corrupted")->
+ add_int("uid", uid)->
+ add_str("reason", reason)->event(), "%s", errstr);
+ mail_cache_expunge_count(cache, 1);
+}
+
+void mail_cache_file_close(struct mail_cache *cache)
+{
+ if (cache->mmap_base != NULL) {
+ if (munmap(cache->mmap_base, cache->mmap_length) < 0)
+ mail_cache_set_syscall_error(cache, "munmap()");
+ }
+
+ if (cache->file_cache != NULL)
+ file_cache_set_fd(cache->file_cache, -1);
+ if (cache->read_buf != NULL)
+ buffer_set_used_size(cache->read_buf, 0);
+
+ cache->mmap_base = NULL;
+ cache->hdr = NULL;
+ cache->mmap_length = 0;
+ cache->last_field_header_offset = 0;
+
+ file_lock_free(&cache->file_lock);
+ cache->locked = FALSE;
+
+ if (cache->fd != -1) {
+ if (close(cache->fd) < 0)
+ mail_cache_set_syscall_error(cache, "close()");
+ cache->fd = -1;
+ }
+ cache->opened = FALSE;
+}
+
+static void mail_cache_init_file_cache(struct mail_cache *cache)
+{
+ struct stat st;
+
+ if (cache->file_cache != NULL)
+ file_cache_set_fd(cache->file_cache, cache->fd);
+
+ if (fstat(cache->fd, &st) == 0) {
+ if (cache->file_cache != NULL)
+ (void)file_cache_set_size(cache->file_cache, st.st_size);
+ } else if (!ESTALE_FSTAT(errno)) {
+ mail_cache_set_syscall_error(cache, "fstat()");
+ }
+
+ cache->last_stat_size = st.st_size;
+ cache->st_ino = st.st_ino;
+ cache->st_dev = st.st_dev;
+}
+
+static int mail_cache_try_open(struct mail_cache *cache)
+{
+ int ret;
+
+ i_assert(!cache->opened);
+ cache->opened = TRUE;
+
+ if (MAIL_INDEX_IS_IN_MEMORY(cache->index))
+ return 0;
+
+ i_assert(cache->fd == -1);
+ cache->fd = nfs_safe_open(cache->filepath,
+ cache->index->readonly ? O_RDONLY : O_RDWR);
+ if (cache->fd == -1) {
+ mail_cache_file_close(cache);
+ if (errno == ENOENT) {
+ mail_cache_purge_later_reset(cache);
+ return 0;
+ }
+
+ mail_cache_set_syscall_error(cache, "open()");
+ return -1;
+ }
+
+ mail_cache_init_file_cache(cache);
+
+ if ((ret = mail_cache_map_all(cache)) <= 0) {
+ mail_cache_file_close(cache);
+ return ret;
+ }
+ return 1;
+}
+
+static bool mail_cache_need_reopen(struct mail_cache *cache)
+{
+ struct stat st;
+
+ if (MAIL_INDEX_IS_IN_MEMORY(cache->index)) {
+ /* disabled */
+ return FALSE;
+ }
+
+ if (cache->fd == -1)
+ return TRUE;
+
+ /* see if the file has changed */
+ if ((cache->index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0) {
+ i_assert(!cache->locked);
+ nfs_flush_file_handle_cache(cache->filepath);
+ }
+ if (nfs_safe_stat(cache->filepath, &st) < 0) {
+ /* if cache was already marked as corrupted, don't log errors
+ about nonexistent cache file */
+ if (cache->hdr != NULL || errno != ENOENT)
+ mail_cache_set_syscall_error(cache, "stat()");
+ return TRUE;
+ }
+ cache->last_stat_size = st.st_size;
+
+ if (st.st_ino != cache->st_ino ||
+ !CMP_DEV_T(st.st_dev, cache->st_dev)) {
+ /* file changed */
+ return TRUE;
+ }
+
+ if ((cache->index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0) {
+ /* if the old file has been deleted, the new file may have
+ the same inode as the old one. we'll catch this here by
+ checking if fstat() fails with ESTALE */
+ if (fstat(cache->fd, &st) < 0) {
+ if (ESTALE_FSTAT(errno))
+ return TRUE;
+ mail_cache_set_syscall_error(cache, "fstat()");
+ return FALSE;
+ }
+ }
+ return FALSE;
+}
+
+int mail_cache_reopen(struct mail_cache *cache)
+{
+ mail_cache_file_close(cache);
+ return mail_cache_open_and_verify(cache);
+}
+
+static void mail_cache_update_need_purge(struct mail_cache *cache)
+{
+ const struct mail_index_cache_optimization_settings *set =
+ &cache->index->optimization_set.cache;
+ const struct mail_cache_header *hdr = cache->hdr;
+ struct stat st;
+ unsigned int msg_count;
+ unsigned int records_count, cont_percentage, delete_percentage;
+ const char *want_purge_reason = NULL;
+
+ if (hdr->minor_version == 0) {
+ /* purge to get ourself into the new header version */
+ mail_cache_purge_later(cache, "Minor version too old");
+ return;
+ }
+
+ msg_count = cache->index->map->rec_map->records_count;
+ if (msg_count == 0)
+ records_count = 1;
+ else if (hdr->record_count == 0 || hdr->record_count > msg_count*2) {
+ /* probably not the real record_count, but hole offset that
+ Dovecot <=v2.1 versions used to use in this position.
+ we already checked that minor_version>0, but this could
+ happen if old Dovecot was used to access mailbox after
+ it had been updated. */
+ records_count = I_MAX(msg_count, 1);
+ } else {
+ records_count = hdr->record_count;
+ }
+
+ cont_percentage = hdr->continued_record_count * 100 / records_count;
+ if (cont_percentage >= set->purge_continued_percentage) {
+ /* too many continued rows, purge */
+ want_purge_reason = t_strdup_printf(
+ "Too many continued records (%u/%u)",
+ hdr->continued_record_count, records_count);
+ }
+
+ delete_percentage = hdr->deleted_record_count * 100 /
+ (records_count + hdr->deleted_record_count);
+ if (delete_percentage >= set->purge_delete_percentage) {
+ /* too many deleted records, purge */
+ want_purge_reason = t_strdup_printf(
+ "Too many deleted records (%u/%u)",
+ hdr->deleted_record_count, records_count);
+ }
+
+ if (want_purge_reason != NULL) {
+ if (fstat(cache->fd, &st) < 0) {
+ if (!ESTALE_FSTAT(errno))
+ mail_cache_set_syscall_error(cache, "fstat()");
+ return;
+ }
+ if ((uoff_t)st.st_size >= set->purge_min_size)
+ mail_cache_purge_later(cache, want_purge_reason);
+ }
+
+}
+
+static bool mail_cache_verify_header(struct mail_cache *cache,
+ const struct mail_cache_header *hdr)
+{
+ /* check that the header is still ok */
+ if (cache->mmap_length < sizeof(struct mail_cache_header)) {
+ mail_cache_set_corrupted(cache, "File too small");
+ return FALSE;
+ }
+
+ if (hdr->major_version != MAIL_CACHE_MAJOR_VERSION) {
+ /* version changed - upgrade silently */
+ mail_cache_set_corrupted(cache, "Unsupported major version (%u)",
+ hdr->major_version);
+ return FALSE;
+ }
+ if (hdr->compat_sizeof_uoff_t != sizeof(uoff_t)) {
+ /* architecture change - handle silently(?) */
+ mail_cache_set_corrupted(cache, "Unsupported uoff_t size (%u)",
+ hdr->compat_sizeof_uoff_t);
+ return FALSE;
+ }
+
+ if (hdr->indexid != cache->index->indexid) {
+ /* index id changed - handle silently */
+ mail_cache_unlink(cache);
+ return FALSE;
+ }
+ if (hdr->file_seq == 0) {
+ mail_cache_set_corrupted(cache, "file_seq is 0");
+ return FALSE;
+ }
+ return TRUE;
+}
+
+static int
+mail_cache_map_finish(struct mail_cache *cache, uoff_t offset, size_t size,
+ const void *hdr_data, bool copy_hdr, bool *corrupted_r)
+{
+ const struct mail_cache_header *hdr = hdr_data;
+
+ *corrupted_r = FALSE;
+
+ if (offset == 0) {
+ /* verify the header validity only with offset=0. this way
+ we won't waste time re-verifying it all the time */
+ if (!mail_cache_verify_header(cache, hdr)) {
+ if (!MAIL_CACHE_IS_UNUSABLE(cache) &&
+ cache->hdr->file_seq != 0)
+ mail_cache_purge_later(cache, "Invalid header");
+ *corrupted_r = TRUE;
+ return -1;
+ }
+ }
+ if (hdr_data != NULL) {
+ if (!copy_hdr)
+ cache->hdr = hdr;
+ else {
+ memcpy(&cache->hdr_ro_copy, hdr,
+ sizeof(cache->hdr_ro_copy));
+ cache->hdr = &cache->hdr_ro_copy;
+ }
+ mail_cache_update_need_purge(cache);
+ } else {
+ i_assert(cache->hdr != NULL);
+ }
+ i_assert(cache->hdr->file_seq != 0);
+
+ if (offset + size > cache->mmap_length)
+ return 0;
+ return 1;
+}
+
+static int
+mail_cache_map_with_read(struct mail_cache *cache, size_t offset, size_t size,
+ const void **data_r, bool *corrupted_r)
+{
+ const void *hdr_data;
+ void *data;
+ ssize_t ret;
+
+ if (cache->read_buf == NULL) {
+ cache->read_buf =
+ buffer_create_dynamic(default_pool, size);
+ } else if (cache->read_offset <= offset &&
+ cache->read_offset + cache->read_buf->used >= offset+size) {
+ /* already mapped */
+ *data_r = CONST_PTR_OFFSET(cache->read_buf->data,
+ offset - cache->read_offset);
+ hdr_data = offset == 0 ? *data_r : NULL;
+ return mail_cache_map_finish(cache, offset, size, hdr_data,
+ TRUE, corrupted_r);
+ } else {
+ buffer_set_used_size(cache->read_buf, 0);
+ }
+ if (offset == 0 && size < MAIL_CACHE_MIN_HEADER_READ_SIZE) {
+ /* we can usually read the fields header after the cache
+ header. we need them both, so try to read them all with one
+ pread() call. */
+ size = MAIL_CACHE_MIN_HEADER_READ_SIZE;
+ }
+
+ data = buffer_append_space_unsafe(cache->read_buf, size);
+ ret = pread(cache->fd, data, size, offset);
+ if (ret < 0) {
+ if (errno != ESTALE)
+ mail_cache_set_syscall_error(cache, "read()");
+
+ buffer_set_used_size(cache->read_buf, 0);
+ cache->hdr = NULL;
+ cache->mmap_length = 0;
+ return -1;
+ }
+ buffer_set_used_size(cache->read_buf, ret);
+
+ cache->read_offset = offset;
+ cache->mmap_length = offset + cache->read_buf->used;
+
+ *data_r = data;
+ hdr_data = offset == 0 ? *data_r : NULL;
+ return mail_cache_map_finish(cache, offset,
+ cache->read_buf->used, hdr_data,
+ TRUE, corrupted_r);
+}
+
+static int
+mail_cache_map_full(struct mail_cache *cache, size_t offset, size_t size,
+ const void **data_r, bool *corrupted_r)
+{
+ struct stat st;
+ const void *data;
+ ssize_t ret;
+ size_t orig_size = size;
+
+ *corrupted_r = FALSE;
+
+ if (size == 0)
+ size = sizeof(struct mail_cache_header);
+
+ /* verify offset + size before trying to allocate a huge amount of
+ memory due to them. note that we may be prefetching more than we
+ actually need, so don't fail too early. */
+ if ((size > cache->mmap_length || offset + size > cache->mmap_length) &&
+ (offset > 0 || size > sizeof(struct mail_cache_header))) {
+ if (fstat(cache->fd, &st) < 0) {
+ e_error(cache->index->event,
+ "fstat(%s) failed: %m", cache->filepath);
+ return -1;
+ }
+ cache->last_stat_size = st.st_size;
+ if (offset >= (uoff_t)st.st_size) {
+ *data_r = NULL;
+ return 0;
+ }
+ if (size > (uoff_t)st.st_size - offset)
+ size = st.st_size - offset;
+ }
+
+ cache->remap_counter++;
+ if (cache->map_with_read)
+ return mail_cache_map_with_read(cache, offset, size, data_r,
+ corrupted_r);
+
+ if (cache->file_cache != NULL) {
+ ret = file_cache_read(cache->file_cache, offset, size);
+ if (ret < 0) {
+ /* In case of ESTALE we'll simply fail without error
+ messages. The caller will then just have to
+ fallback to generating the value itself.
+
+ We can't simply reopen the cache file, because
+ using it requires also having updated file
+ offsets. */
+ if (errno != ESTALE)
+ mail_cache_set_syscall_error(cache, "read()");
+ cache->hdr = NULL;
+ return -1;
+ }
+
+ data = file_cache_get_map(cache->file_cache,
+ &cache->mmap_length);
+ *data_r = offset > cache->mmap_length ? NULL :
+ CONST_PTR_OFFSET(data, offset);
+ return mail_cache_map_finish(cache, offset, size,
+ offset == 0 ? data : NULL, TRUE,
+ corrupted_r);
+ }
+
+ if (offset < cache->mmap_length &&
+ size <= cache->mmap_length - offset) {
+ /* already mapped */
+ i_assert(cache->mmap_base != NULL);
+ *data_r = CONST_PTR_OFFSET(cache->mmap_base, offset);
+ if (orig_size > cache->mmap_length - offset) {
+ /* requested offset/size points outside file */
+ return 0;
+ }
+ return 1;
+ }
+
+ if (cache->mmap_base != NULL) {
+ if (munmap(cache->mmap_base, cache->mmap_length) < 0)
+ mail_cache_set_syscall_error(cache, "munmap()");
+ } else {
+ if (cache->fd == -1) {
+ /* unusable, waiting for purging or
+ index is in memory */
+ i_assert(cache->need_purge_file_seq != 0 ||
+ MAIL_INDEX_IS_IN_MEMORY(cache->index));
+ return -1;
+ }
+ }
+
+ /* map the whole file */
+ cache->hdr = NULL;
+ cache->mmap_length = 0;
+ if (cache->read_buf != NULL)
+ buffer_set_used_size(cache->read_buf, 0);
+
+ cache->mmap_base = mmap_ro_file(cache->fd, &cache->mmap_length);
+ if (cache->mmap_base == MAP_FAILED) {
+ cache->mmap_base = NULL;
+ if (ioloop_time != cache->last_mmap_error_time) {
+ cache->last_mmap_error_time = ioloop_time;
+ mail_cache_set_syscall_error(cache, t_strdup_printf(
+ "mmap(size=%zu)", cache->mmap_length));
+ }
+ cache->mmap_length = 0;
+ return -1;
+ }
+ *data_r = offset > cache->mmap_length ? NULL :
+ CONST_PTR_OFFSET(cache->mmap_base, offset);
+ return mail_cache_map_finish(cache, offset, orig_size,
+ cache->mmap_base, FALSE, corrupted_r);
+}
+
+int mail_cache_map(struct mail_cache *cache, size_t offset, size_t size,
+ const void **data_r)
+{
+ i_assert(offset != 0);
+
+ bool corrupted;
+ int ret = mail_cache_map_full(cache, offset, size, data_r, &corrupted);
+ i_assert(!corrupted);
+ return ret;
+}
+
+int mail_cache_map_all(struct mail_cache *cache)
+{
+ const void *data;
+ bool corrupted;
+
+ int ret = mail_cache_map_full(cache, 0, 0, &data, &corrupted);
+ i_assert(ret != 0);
+ if (corrupted) {
+ i_assert(ret == -1);
+ return 0;
+ }
+ return ret < 0 ? -1 : 1;
+}
+
+int mail_cache_open_and_verify(struct mail_cache *cache)
+{
+ int ret;
+
+ if (cache->opened) {
+ if (!MAIL_CACHE_IS_UNUSABLE(cache))
+ return 1;
+ mail_cache_file_close(cache);
+ }
+ if ((ret = mail_cache_try_open(cache)) < 0) {
+ /* I/O error */
+ mail_cache_file_close(cache);
+ return -1;
+ }
+
+ if (ret > 0) {
+ if (mail_cache_header_fields_read(cache) < 0) {
+ /* corrupted */
+ ret = 0;
+ }
+ }
+ if (ret == 0) {
+ /* cache was corrupted and should have been deleted already. */
+ mail_cache_file_close(cache);
+ }
+ return ret;
+}
+
+struct mail_cache *
+mail_cache_open_or_create_path(struct mail_index *index, const char *path)
+{
+ struct mail_cache *cache;
+
+ cache = i_new(struct mail_cache, 1);
+ cache->index = index;
+ cache->fd = -1;
+ cache->filepath = i_strdup(path);
+ cache->field_pool = pool_alloconly_create("Cache fields", 2048);
+ hash_table_create(&cache->field_name_hash, cache->field_pool, 0,
+ strcase_hash, strcasecmp);
+
+ cache->event = event_create(index->event);
+ event_add_category(cache->event, &event_category_mail_cache);
+
+ cache->dotlock_settings.use_excl_lock =
+ (index->flags & MAIL_INDEX_OPEN_FLAG_DOTLOCK_USE_EXCL) != 0;
+ cache->dotlock_settings.nfs_flush =
+ (index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0;
+ cache->dotlock_settings.timeout =
+ I_MIN(MAIL_CACHE_LOCK_TIMEOUT, index->set.max_lock_timeout_secs);
+ cache->dotlock_settings.stale_timeout = MAIL_CACHE_LOCK_CHANGE_TIMEOUT;
+
+ if (!MAIL_INDEX_IS_IN_MEMORY(index) &&
+ (index->flags & MAIL_INDEX_OPEN_FLAG_MMAP_DISABLE) != 0)
+ cache->file_cache = file_cache_new_path(-1, cache->filepath);
+ cache->map_with_read =
+ (cache->index->flags & MAIL_INDEX_OPEN_FLAG_SAVEONLY) != 0;
+
+ cache->ext_id =
+ mail_index_ext_register(index, "cache", 0,
+ sizeof(uint32_t), sizeof(uint32_t));
+ mail_index_register_expunge_handler(index, cache->ext_id,
+ mail_cache_expunge_handler);
+ return cache;
+}
+
+struct mail_cache *mail_cache_open_or_create(struct mail_index *index)
+{
+ const char *path = t_strconcat(index->filepath,
+ MAIL_CACHE_FILE_SUFFIX, NULL);
+ return mail_cache_open_or_create_path(index, path);
+}
+
+void mail_cache_free(struct mail_cache **_cache)
+{
+ struct mail_cache *cache = *_cache;
+
+ *_cache = NULL;
+
+ i_assert(cache->views == NULL);
+
+ if (cache->file_cache != NULL)
+ file_cache_free(&cache->file_cache);
+
+ mail_index_unregister_expunge_handler(cache->index, cache->ext_id);
+ mail_cache_file_close(cache);
+
+ buffer_free(&cache->read_buf);
+ hash_table_destroy(&cache->field_name_hash);
+ pool_unref(&cache->field_pool);
+ event_unref(&cache->event);
+ i_free(cache->need_purge_reason);
+ i_free(cache->field_file_map);
+ i_free(cache->file_field_map);
+ i_free(cache->fields);
+ i_free(cache->filepath);
+ i_free(cache);
+}
+
+static int mail_cache_lock_file(struct mail_cache *cache)
+{
+ unsigned int timeout_secs;
+ bool nonblock = FALSE;
+ int ret;
+
+ if (cache->last_lock_failed) {
+ /* previous locking failed. don't waste time waiting on it
+ again, just try once to see if it's available now. */
+ nonblock = TRUE;
+ }
+
+ i_assert(cache->file_lock == NULL);
+ if (cache->index->set.lock_method != FILE_LOCK_METHOD_DOTLOCK) {
+ timeout_secs = I_MIN(MAIL_CACHE_LOCK_TIMEOUT,
+ cache->index->set.max_lock_timeout_secs);
+
+ ret = mail_index_lock_fd(cache->index, cache->filepath,
+ cache->fd, F_WRLCK,
+ nonblock ? 0 : timeout_secs,
+ &cache->file_lock);
+ } else {
+ struct dotlock *dotlock;
+ enum dotlock_create_flags flags =
+ nonblock ? DOTLOCK_CREATE_FLAG_NONBLOCK : 0;
+
+ ret = file_dotlock_create(&cache->dotlock_settings,
+ cache->filepath, flags, &dotlock);
+ if (ret > 0)
+ cache->file_lock = file_lock_from_dotlock(&dotlock);
+ else if (ret < 0) {
+ mail_cache_set_syscall_error(cache,
+ "file_dotlock_create()");
+ }
+ }
+ cache->last_lock_failed = ret <= 0;
+
+ /* don't bother warning if locking failed due to a timeout. since cache
+ updating isn't all that important we're using a very short timeout
+ so it can be triggered sometimes on heavy load */
+ if (ret <= 0)
+ return ret;
+
+ mail_index_flush_read_cache(cache->index, cache->filepath, cache->fd,
+ TRUE);
+ return 1;
+}
+
+static void mail_cache_unlock_file(struct mail_cache *cache)
+{
+ if (cache->file_lock != NULL)
+ file_unlock(&cache->file_lock);
+}
+
+static bool
+mail_cache_verify_reset_id(struct mail_cache *cache, uint32_t *reset_id_r)
+{
+ const struct mail_index_ext *ext;
+ struct mail_index_view *iview;
+ uint32_t reset_id;
+
+ iview = mail_index_view_open(cache->index);
+ ext = mail_index_view_get_ext(iview, cache->ext_id);
+ reset_id = ext == NULL ? 0 : ext->reset_id;
+ mail_index_view_close(&iview);
+
+ *reset_id_r = reset_id;
+ return cache->hdr->file_seq == reset_id;
+}
+
+static int
+mail_cache_sync_wait_index(struct mail_cache *cache, uint32_t *reset_id_r)
+{
+ const char *lock_reason = "cache reset_id sync";
+ uint32_t file_seq;
+ uoff_t file_offset;
+ bool cache_locked = cache->file_lock != NULL;
+ int ret;
+
+ if (cache->index->log_sync_locked)
+ return 0;
+
+ /* Wait for .log file lock, so we can be sure that there is no cache
+ purging going on. (Because it first recreates the cache file,
+ unlocks it and only then writes the changes to the index and
+ releases the .log lock.) To prevent deadlocks, cache file must be
+ locked after the .log, not before. */
+ if (cache_locked)
+ mail_cache_unlock_file(cache);
+ if (mail_transaction_log_sync_lock(cache->index->log, lock_reason,
+ &file_seq, &file_offset) < 0)
+ return -1;
+ /* Lock the cache file as well so we'll get a guaranteed result on
+ whether the reset_id can be synced or if it's already desynced and
+ the cache just needs to be recreated. */
+ ret = -1;
+ while (mail_cache_lock_file(cache) > 0) {
+ /* Locked the current fd, but it may have already been
+ recreated. Reopen and retry if needed. */
+ if (!mail_cache_need_reopen(cache)) {
+ ret = 1;
+ break;
+ }
+ if ((ret = mail_cache_reopen(cache)) <= 0)
+ break;
+ }
+
+ if (ret <= 0)
+ ;
+ else if (mail_index_refresh(cache->index) < 0)
+ ret = -1;
+ else
+ ret = mail_cache_verify_reset_id(cache, reset_id_r) ? 1 : 0;
+ mail_transaction_log_sync_unlock(cache->index->log, lock_reason);
+ if (ret <= 0 || !cache_locked)
+ mail_cache_unlock_file(cache);
+ return ret;
+}
+
+int mail_cache_sync_reset_id(struct mail_cache *cache)
+{
+ uint32_t reset_id;
+ int ret;
+
+ /* verify that the index reset_id matches the cache's file_seq */
+ if (mail_cache_verify_reset_id(cache, &reset_id))
+ return 1;
+
+ /* Mismatch. See if we can get it synced. */
+ if (cache->index->mapping) {
+ /* Syncing is already locked, and we're in the middle of
+ mapping the index. The cache is unusable. */
+ i_assert(cache->index->log_sync_locked);
+ mail_cache_set_corrupted(cache, "reset_id mismatch during sync");
+ return 0;
+ }
+
+ /* See if reset_id changes after refreshing the index. */
+ if (mail_index_refresh(cache->index) < 0)
+ return -1;
+ if (mail_cache_verify_reset_id(cache, &reset_id))
+ return 1;
+
+ /* Use locking to wait for a potential cache purging to finish.
+ If that didn't work either, the cache is corrupted or lost. */
+ ret = mail_cache_sync_wait_index(cache, &reset_id);
+ if (ret == 0 && cache->fd != -1 && reset_id != 0) {
+ mail_cache_set_corrupted(cache,
+ "reset_id mismatch even after locking "
+ "(file_seq=%u != reset_id=%u)",
+ cache->hdr == NULL ? 0 : cache->hdr->file_seq,
+ reset_id);
+ }
+ return ret;
+}
+
+int mail_cache_lock(struct mail_cache *cache)
+{
+ int ret;
+
+ i_assert(!cache->locked);
+ /* the only reason why we might be in here while mapping the index is
+ if we're coming from mail_cache_expunge_count() while syncing the
+ index. */
+ i_assert(!cache->index->mapping || cache->index->log_sync_locked);
+
+ if (MAIL_INDEX_IS_IN_MEMORY(cache->index) ||
+ cache->index->readonly)
+ return 0;
+
+ /* Make sure at least some cache file is opened. Usually it's the
+ latest one, so delay until it's locked to check whether a newer
+ cache file exists. */
+ if ((ret = mail_cache_open_and_verify(cache)) < 0)
+ return -1;
+ if (ret == 0) {
+ /* Cache doesn't exist or it was just found to be corrupted and
+ was unlinked. Cache purging will create it back. */
+ return 0;
+ }
+
+ for (;;) {
+ if (mail_cache_lock_file(cache) <= 0)
+ return -1;
+ if (!mail_cache_need_reopen(cache)) {
+ /* locked the latest file */
+ break;
+ }
+ if ((ret = mail_cache_reopen(cache)) <= 0) {
+ i_assert(cache->file_lock == NULL);
+ return ret;
+ }
+ i_assert(cache->file_lock == NULL);
+ /* okay, so it was just purged. try again. */
+ }
+
+ if ((ret = mail_cache_sync_reset_id(cache)) <= 0) {
+ mail_cache_unlock_file(cache);
+ return ret;
+ }
+ i_assert(cache->file_lock != NULL);
+
+ /* successfully locked - make sure our header is up to date */
+ cache->locked = TRUE;
+ cache->hdr_modified = FALSE;
+
+ if (cache->file_cache != NULL) {
+ file_cache_invalidate(cache->file_cache, 0,
+ sizeof(struct mail_cache_header));
+ }
+ if (cache->read_buf != NULL)
+ buffer_set_used_size(cache->read_buf, 0);
+ if ((ret = mail_cache_map_all(cache)) <= 0) {
+ mail_cache_unlock(cache);
+ return ret;
+ }
+ cache->hdr_copy = *cache->hdr;
+ return 1;
+}
+
+int mail_cache_flush_and_unlock(struct mail_cache *cache)
+{
+ int ret = 0;
+
+ i_assert(cache->locked);
+
+ if (cache->field_header_write_pending)
+ ret = mail_cache_header_fields_update(cache);
+
+ /* Cache may become unusable during for various reasons, e.g.
+ mail_cache_map(). Also the above mail_cache_header_fields_update()
+ call can make it unusable, so check this after it. */
+ if (MAIL_CACHE_IS_UNUSABLE(cache)) {
+ mail_cache_unlock(cache);
+ return -1;
+ }
+
+ if (cache->hdr_modified) {
+ cache->hdr_modified = FALSE;
+ if (mail_cache_write(cache, &cache->hdr_copy,
+ sizeof(cache->hdr_copy), 0) < 0)
+ ret = -1;
+ cache->hdr_ro_copy = cache->hdr_copy;
+ mail_cache_update_need_purge(cache);
+ }
+
+ mail_cache_unlock(cache);
+ return ret;
+}
+
+void mail_cache_unlock(struct mail_cache *cache)
+{
+ i_assert(cache->locked);
+
+ if (MAIL_CACHE_IS_UNUSABLE(cache)) {
+ /* we found it to be broken during the lock. just clean up. */
+ cache->hdr_modified = FALSE;
+ } else if (cache->index->set.fsync_mode == FSYNC_MODE_ALWAYS) {
+ if (fdatasync(cache->fd) < 0)
+ mail_cache_set_syscall_error(cache, "fdatasync()");
+ }
+
+ cache->locked = FALSE;
+ mail_cache_unlock_file(cache);
+}
+
+int mail_cache_write(struct mail_cache *cache, const void *data, size_t size,
+ uoff_t offset)
+{
+ i_assert(cache->locked);
+
+ if (pwrite_full(cache->fd, data, size, offset) < 0) {
+ mail_cache_set_syscall_error(cache, "pwrite_full()");
+ return -1;
+ }
+
+ if (cache->file_cache != NULL)
+ file_cache_write(cache->file_cache, data, size, offset);
+ if (cache->read_buf != NULL)
+ buffer_set_used_size(cache->read_buf, 0);
+ return 0;
+}
+
+int mail_cache_append(struct mail_cache *cache, const void *data, size_t size,
+ uint32_t *offset)
+{
+ struct stat st;
+
+ if (*offset == 0) {
+ if (fstat(cache->fd, &st) < 0) {
+ if (!ESTALE_FSTAT(errno))
+ mail_cache_set_syscall_error(cache, "fstat()");
+ return -1;
+ }
+ cache->last_stat_size = st.st_size;
+ if ((uoff_t)st.st_size > cache->index->optimization_set.cache.max_size) {
+ mail_cache_set_corrupted(cache, "Cache file too large");
+ return -1;
+ }
+ *offset = st.st_size;
+ }
+ if (*offset >= cache->index->optimization_set.cache.max_size ||
+ cache->index->optimization_set.cache.max_size - *offset < size) {
+ mail_cache_set_corrupted(cache, "Cache file too large");
+ return -1;
+ }
+ if (mail_cache_write(cache, data, size, *offset) < 0)
+ return -1;
+ return 0;
+}
+
+bool mail_cache_exists(struct mail_cache *cache)
+{
+ return !MAIL_CACHE_IS_UNUSABLE(cache);
+}
+
+struct mail_cache_view *
+mail_cache_view_open(struct mail_cache *cache, struct mail_index_view *iview)
+{
+ struct mail_cache_view *view;
+
+ view = i_new(struct mail_cache_view, 1);
+ view->cache = cache;
+ view->view = iview;
+ view->cached_exists_buf =
+ buffer_create_dynamic(default_pool,
+ cache->file_fields_count + 10);
+ DLLIST_PREPEND(&cache->views, view);
+ return view;
+}
+
+void mail_cache_view_close(struct mail_cache_view **_view)
+{
+ struct mail_cache_view *view = *_view;
+
+ i_assert(view->trans_view == NULL);
+
+ *_view = NULL;
+ if (view->cache->field_header_write_pending &&
+ !view->cache->purging)
+ (void)mail_cache_header_fields_update(view->cache);
+
+ DLLIST_REMOVE(&view->cache->views, view);
+ buffer_free(&view->cached_exists_buf);
+ i_free(view);
+}
+
+void mail_cache_view_update_cache_decisions(struct mail_cache_view *view,
+ bool update)
+{
+ view->no_decision_updates = !update;
+}
+
+uint32_t mail_cache_get_first_new_seq(struct mail_index_view *view)
+{
+ const struct mail_index_header *idx_hdr;
+ uint32_t first_new_seq, message_count;
+
+ idx_hdr = mail_index_get_header(view);
+ if (idx_hdr->day_first_uid[7] == 0)
+ return 1;
+
+ if (!mail_index_lookup_seq_range(view, idx_hdr->day_first_uid[7],
+ (uint32_t)-1, &first_new_seq,
+ &message_count)) {
+ /* all messages are too old */
+ return idx_hdr->messages_count+1;
+ }
+ return first_new_seq;
+}
diff --git a/src/lib-index/mail-cache.h b/src/lib-index/mail-cache.h
new file mode 100644
index 0000000..09fde29
--- /dev/null
+++ b/src/lib-index/mail-cache.h
@@ -0,0 +1,193 @@
+#ifndef MAIL_CACHE_H
+#define MAIL_CACHE_H
+
+#include "mail-index.h"
+
+#define MAIL_CACHE_FILE_SUFFIX ".cache"
+
+struct mail_cache;
+struct mail_cache_view;
+struct mail_cache_transaction_ctx;
+
+enum mail_cache_decision_type {
+ /* Not needed currently */
+ MAIL_CACHE_DECISION_NO = 0x00,
+ /* Needed only for new mails. Drop when purging. */
+ MAIL_CACHE_DECISION_TEMP = 0x01,
+ /* Needed. */
+ MAIL_CACHE_DECISION_YES = 0x02,
+
+ /* This decision has been forced manually, don't change it. */
+ MAIL_CACHE_DECISION_FORCED = 0x80
+};
+
+enum mail_cache_field_type {
+ /* Fixed size cache field. The size is specified only in the cache
+ field header, not separately for each record. */
+ MAIL_CACHE_FIELD_FIXED_SIZE,
+ /* Variable sized binary data. */
+ MAIL_CACHE_FIELD_VARIABLE_SIZE,
+ /* Variable sized string. There is no difference internally to how
+ MAIL_CACHE_FIELD_VARIABLE_SIZE is handled, but it helps at least
+ "doveadm dump" to know whether to hex-encode the output. */
+ MAIL_CACHE_FIELD_STRING,
+ /* A fixed size bitmask field. It's possible to add new bits by
+ updating this field. All the added fields are ORed together. */
+ MAIL_CACHE_FIELD_BITMASK,
+ /* Variable sized message header. The data begins with a 0-terminated
+ uint32_t line_numbers[]. The line number exists only for each
+ header, header continuation lines in multiline headers don't get
+ listed. After the line numbers comes the list of headers, including
+ the "header-name: " prefix for each line, LFs and the TABs or spaces
+ for continued lines. */
+ MAIL_CACHE_FIELD_HEADER,
+
+ MAIL_CACHE_FIELD_COUNT
+};
+
+struct mail_cache_field {
+ /* Unique name for the cache field. The field name doesn't matter
+ internally. */
+ const char *name;
+ /* Field index name. Used to optimize accessing the cache field. */
+ unsigned int idx;
+
+ /* Type of the field */
+ enum mail_cache_field_type type;
+ /* Size of the field, if it's a fixed size type. */
+ unsigned int field_size;
+ /* Current caching decision */
+ enum mail_cache_decision_type decision;
+ /* Timestamp when the cache field was last intentionally read (e.g.
+ by an IMAP client). Saving new mails doesn't update this field.
+ This is used to track when an unaccessed field should be dropped. */
+ time_t last_used;
+};
+
+struct mail_cache *mail_cache_open_or_create(struct mail_index *index);
+struct mail_cache *
+mail_cache_open_or_create_path(struct mail_index *index, const char *path);
+void mail_cache_free(struct mail_cache **cache);
+
+/* Register fields. fields[].idx is updated to contain field index.
+ If field already exists and its caching decision is NO, the decision is
+ updated to the input field's decision. */
+void mail_cache_register_fields(struct mail_cache *cache,
+ struct mail_cache_field *fields,
+ unsigned int fields_count);
+/* Returns registered field index, or UINT_MAX if not found. */
+unsigned int
+mail_cache_register_lookup(struct mail_cache *cache, const char *name);
+/* Returns specified field */
+const struct mail_cache_field *
+mail_cache_register_get_field(struct mail_cache *cache, unsigned int field_idx);
+/* Returns a list of all registered fields */
+struct mail_cache_field *
+mail_cache_register_get_list(struct mail_cache *cache, pool_t pool,
+ unsigned int *count_r);
+
+/* Returns TRUE if cache should be purged. */
+bool mail_cache_need_purge(struct mail_cache *cache, const char **reason_r);
+/* Set cache file to be purged later. */
+void mail_cache_purge_later(struct mail_cache *cache, const char *reason);
+/* Don't try to purge the cache file later after all. */
+void mail_cache_purge_later_reset(struct mail_cache *cache);
+/* Purge cache file. Offsets are updated to given transaction.
+ The transaction log must already be exclusively locked.
+
+ The cache purging is done only if the current cache file's file_seq
+ matches purge_file_seq. The idea is that purging isn't done if
+ another process had just purged it. 0 means the cache file is created
+ only if it didn't already exist. (uint32_t)-1 means that purging is
+ done always regardless of file_seq. */
+int mail_cache_purge_with_trans(struct mail_cache *cache,
+ struct mail_index_transaction *trans,
+ uint32_t purge_file_seq, const char *reason);
+int mail_cache_purge(struct mail_cache *cache, uint32_t purge_file_seq,
+ const char *reason);
+/* Returns TRUE if there is at least something in the cache. */
+bool mail_cache_exists(struct mail_cache *cache);
+/* Open and read cache header. Returns 1 if ok, 0 if cache doesn't exist or it
+ was corrupted and just got deleted, -1 if I/O error. */
+int mail_cache_open_and_verify(struct mail_cache *cache);
+
+struct mail_cache_view *
+mail_cache_view_open(struct mail_cache *cache, struct mail_index_view *iview);
+void mail_cache_view_close(struct mail_cache_view **view);
+
+/* Normally cache decisions are updated on lookup/add. Use this function to
+ enable/disable this (useful for precaching data). */
+void mail_cache_view_update_cache_decisions(struct mail_cache_view *view,
+ bool update);
+
+/* Copy caching decisions. This is expected to be called only for a newly
+ created empty mailbox. */
+int mail_cache_decisions_copy(struct mail_cache *src, struct mail_cache *dst);
+
+/* Get index transaction specific cache transaction. */
+struct mail_cache_transaction_ctx *
+mail_cache_get_transaction(struct mail_cache_view *view,
+ struct mail_index_transaction *t);
+
+void mail_cache_transaction_reset(struct mail_cache_transaction_ctx *ctx);
+int mail_cache_transaction_commit(struct mail_cache_transaction_ctx **ctx);
+void mail_cache_transaction_rollback(struct mail_cache_transaction_ctx **ctx);
+
+/* Add new field to given record. Updates are not allowed. Fixed size fields
+ must be exactly the expected size. */
+void mail_cache_add(struct mail_cache_transaction_ctx *ctx, uint32_t seq,
+ unsigned int field_idx, const void *data, size_t data_size);
+/* Returns TRUE if field is wanted to be added and it doesn't already exist.
+ If current caching decisions say not to cache this field, FALSE is returned.
+ If seq is 0, the existence isn't checked. */
+bool mail_cache_field_want_add(struct mail_cache_transaction_ctx *ctx,
+ uint32_t seq, unsigned int field_idx);
+/* Like mail_cache_field_want_add(), but in caching decisions FALSE is
+ returned only if the decision is a forced no. */
+bool mail_cache_field_can_add(struct mail_cache_transaction_ctx *ctx,
+ uint32_t seq, unsigned int field_idx);
+/* Notify cache that the mail is now closed. Any records added with
+ mail_cache_add() are unlikely to be required again. This mainly tells
+ INDEX=MEMORY that it can free up the memory used by the mail. */
+void mail_cache_close_mail(struct mail_cache_transaction_ctx *ctx,
+ uint32_t seq);
+
+/* Returns 1 if field exists, 0 if not, -1 if error. */
+int mail_cache_field_exists(struct mail_cache_view *view, uint32_t seq,
+ unsigned int field_idx);
+/* Returns TRUE if something is cached for the message, FALSE if not. */
+bool mail_cache_field_exists_any(struct mail_cache_view *view, uint32_t seq);
+/* Returns current caching decision for given field. */
+enum mail_cache_decision_type
+mail_cache_field_get_decision(struct mail_cache *cache, unsigned int field_idx);
+/* Notify the decision handling code when field is committed to cache.
+ If this is the first time the field is added to cache, its caching decision
+ is updated to TEMP. */
+void mail_cache_decision_add(struct mail_cache_view *view, uint32_t seq,
+ unsigned int field);
+
+/* Set data_r and size_r to point to wanted field in cache file.
+ Returns 1 if field was found, 0 if not, -1 if error. */
+int mail_cache_lookup_field(struct mail_cache_view *view, buffer_t *dest_buf,
+ uint32_t seq, unsigned int field_idx);
+
+/* Return specified cached headers. Returns 1 if all fields were found,
+ 0 if not, -1 if error. dest is updated only if all fields were found. */
+int mail_cache_lookup_headers(struct mail_cache_view *view, string_t *dest,
+ uint32_t seq, const unsigned int field_idxs[],
+ unsigned int fields_count);
+
+/* "Error in index cache file %s: ...". */
+void mail_cache_set_corrupted(struct mail_cache *cache, const char *fmt, ...)
+ ATTR_FORMAT(2, 3) ATTR_COLD;
+void mail_cache_set_seq_corrupted_reason(struct mail_cache_view *cache_view,
+ uint32_t seq, const char *reason)
+ ATTR_COLD;
+
+/* Returns human-readable reason for why a cached field is missing for
+ the specified mail. This is mainly for debugging purposes, so the exact
+ field doesn't matter here. */
+const char *
+mail_cache_get_missing_reason(struct mail_cache_view *view, uint32_t seq);
+
+#endif
diff --git a/src/lib-index/mail-index-alloc-cache.c b/src/lib-index/mail-index-alloc-cache.c
new file mode 100644
index 0000000..fe52754
--- /dev/null
+++ b/src/lib-index/mail-index-alloc-cache.c
@@ -0,0 +1,315 @@
+/* Copyright (c) 2010-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "ioloop.h"
+#include "module-context.h"
+#include "eacces-error.h"
+#include "mail-index-private.h"
+#include "mail-index-alloc-cache.h"
+
+#define MAIL_INDEX_ALLOC_CACHE_CONTEXT(obj) \
+ MODULE_CONTEXT(obj, mail_index_alloc_cache_index_module)
+
+/* How many seconds to keep index opened for reuse after it's been closed */
+#define INDEX_CACHE_TIMEOUT 10
+/* How many closed indexes to keep */
+#define INDEX_CACHE_MAX 3
+
+struct mail_index_alloc_cache_list {
+ union mail_index_module_context module_ctx;
+ struct mail_index_alloc_cache_list *next;
+
+ struct mail_index *index;
+ char *mailbox_path;
+ int refcount;
+ bool referenced;
+
+ dev_t index_dir_dev;
+ ino_t index_dir_ino;
+
+ time_t destroy_time;
+};
+
+static MODULE_CONTEXT_DEFINE_INIT(mail_index_alloc_cache_index_module,
+ &mail_index_module_register);
+static struct mail_index_alloc_cache_list *indexes = NULL;
+static unsigned int indexes_cache_references_count = 0;
+static struct timeout *to_index = NULL;
+
+static struct mail_index_alloc_cache_list *
+mail_index_alloc_cache_add(struct mail_index *index,
+ const char *mailbox_path, struct stat *st)
+{
+ struct mail_index_alloc_cache_list *list;
+
+ list = i_new(struct mail_index_alloc_cache_list, 1);
+ list->refcount = 1;
+ list->index = index;
+
+ list->mailbox_path = i_strdup(mailbox_path);
+ list->index_dir_dev = st->st_dev;
+ list->index_dir_ino = st->st_ino;
+
+ list->next = indexes;
+ indexes = list;
+
+ MODULE_CONTEXT_SET(index, mail_index_alloc_cache_index_module, list);
+ return list;
+}
+
+static void
+mail_index_alloc_cache_list_unref(struct mail_index_alloc_cache_list *list)
+{
+ i_assert(list->referenced);
+ i_assert(indexes_cache_references_count > 0);
+
+ indexes_cache_references_count--;
+ mail_index_close(list->index);
+ list->referenced = FALSE;
+}
+
+static void
+mail_index_alloc_cache_list_free(struct mail_index_alloc_cache_list *list)
+{
+ i_assert(list->refcount == 0);
+
+ if (list->referenced)
+ mail_index_alloc_cache_list_unref(list);
+ mail_index_free(&list->index);
+ i_free(list->mailbox_path);
+ i_free(list);
+}
+
+static struct mail_index_alloc_cache_list *
+mail_index_alloc_cache_find_and_expire(const char *mailbox_path,
+ const char *index_dir,
+ const struct stat *index_st)
+{
+ struct mail_index_alloc_cache_list **indexp, *rec, *match;
+ unsigned int destroy_count;
+ struct stat st;
+
+ destroy_count = 0; match = NULL;
+ for (indexp = &indexes; *indexp != NULL;) {
+ rec = *indexp;
+
+ if (match != NULL) {
+ /* already found the index. we're just going through
+ the rest of them to drop 0 refcounts */
+ } else if (rec->refcount == 0 && rec->index->open_count == 0) {
+ /* index is already closed. don't even try to
+ reuse it. */
+ } else if (index_dir != NULL && rec->index_dir_ino != 0) {
+ if (index_st->st_ino == rec->index_dir_ino &&
+ CMP_DEV_T(index_st->st_dev, rec->index_dir_dev)) {
+ /* make sure the directory still exists.
+ it might have been renamed and we're trying
+ to access it via its new path now. */
+ if (stat(rec->index->dir, &st) < 0 ||
+ st.st_ino != index_st->st_ino ||
+ !CMP_DEV_T(st.st_dev, index_st->st_dev))
+ rec->destroy_time = 0;
+ else
+ match = rec;
+ }
+ } else if (mailbox_path != NULL && rec->mailbox_path != NULL &&
+ index_dir == NULL && rec->index_dir_ino == 0) {
+ if (strcmp(mailbox_path, rec->mailbox_path) == 0)
+ match = rec;
+ }
+
+ if (rec->refcount == 0 && rec != match) {
+ if (rec->destroy_time <= ioloop_time ||
+ destroy_count >= INDEX_CACHE_MAX) {
+ *indexp = rec->next;
+ mail_index_alloc_cache_list_free(rec);
+ continue;
+ } else {
+ destroy_count++;
+ }
+ }
+
+ indexp = &(*indexp)->next;
+ }
+ return match;
+}
+
+struct mail_index *
+mail_index_alloc_cache_get(struct event *parent_event, const char *mailbox_path,
+ const char *index_dir, const char *prefix)
+{
+ struct mail_index_alloc_cache_list *match;
+ struct stat st;
+
+ /* compare index_dir inodes so we don't break even with symlinks.
+ if index_dir doesn't exist yet or if using in-memory indexes, just
+ compare mailbox paths */
+ i_zero(&st);
+ if (index_dir == NULL) {
+ /* in-memory indexes */
+ } else if (stat(index_dir, &st) < 0) {
+ if (errno == ENOENT) {
+ /* it'll be created later */
+ } else if (errno == EACCES) {
+ e_error(parent_event, "%s",
+ eacces_error_get("stat", index_dir));
+ } else {
+ e_error(parent_event, "stat(%s) failed: %m", index_dir);
+ }
+ }
+
+ match = mail_index_alloc_cache_find_and_expire(mailbox_path,
+ index_dir, &st);
+ if (match == NULL) {
+ struct mail_index *index =
+ mail_index_alloc(parent_event, index_dir, prefix);
+ match = mail_index_alloc_cache_add(index, mailbox_path, &st);
+ } else {
+ match->refcount++;
+ }
+ i_assert(match->index != NULL);
+ return match->index;
+}
+
+struct mail_index *
+mail_index_alloc_cache_find(const char *index_dir)
+{
+ struct mail_index_alloc_cache_list *rec;
+ struct stat st;
+
+ if (stat(index_dir, &st) < 0) {
+ if (errno != ENOENT)
+ i_error("stat(%s) failed: %m", index_dir);
+ return NULL;
+ }
+
+ for (rec = indexes; rec != NULL; rec = rec->next) {
+ if (st.st_ino == rec->index_dir_ino &&
+ CMP_DEV_T(st.st_dev, rec->index_dir_dev))
+ return rec->index;
+ }
+ return NULL;
+}
+
+static bool destroy_unrefed(unsigned int min_destroy_count)
+{
+ struct mail_index_alloc_cache_list **list, *rec;
+ bool destroyed = FALSE;
+ bool seen_ref0 = FALSE;
+
+ for (list = &indexes; *list != NULL;) {
+ rec = *list;
+
+ if (rec->refcount == 0 &&
+ (min_destroy_count > 0 || rec->destroy_time <= ioloop_time)) {
+ *list = rec->next;
+ destroyed = TRUE;
+ mail_index_alloc_cache_list_free(rec);
+ if (min_destroy_count > 0)
+ min_destroy_count--;
+ } else {
+ if (rec->refcount == 0)
+ seen_ref0 = TRUE;
+ if (min_destroy_count > 0 &&
+ rec->index->open_count == 1 &&
+ rec->referenced) {
+ /* we're the only one keeping this index open.
+ we might be here, because the caller is
+ deleting this mailbox and wants its indexes
+ to be closed. so close it. */
+ destroyed = TRUE;
+ mail_index_alloc_cache_list_unref(rec);
+ }
+ list = &(*list)->next;
+ }
+ }
+
+ if (!seen_ref0 && to_index != NULL)
+ timeout_remove(&to_index);
+ return destroyed;
+}
+
+static void ATTR_NULL(1)
+index_removal_timeout(void *context ATTR_UNUSED)
+{
+ destroy_unrefed(0);
+}
+
+void mail_index_alloc_cache_unref(struct mail_index **_index)
+{
+ struct mail_index *index = *_index;
+ struct mail_index_alloc_cache_list *list, **listp;
+
+ *_index = NULL;
+ list = NULL;
+ for (listp = &indexes; *listp != NULL; listp = &(*listp)->next) {
+ if ((*listp)->index == index) {
+ list = *listp;
+ break;
+ }
+ }
+
+ i_assert(list != NULL);
+ i_assert(list->refcount > 0);
+
+ list->refcount--;
+ list->destroy_time = ioloop_time + INDEX_CACHE_TIMEOUT;
+
+ if (list->refcount == 0 && index->open_count == 0) {
+ /* index was already closed. don't even try to cache it. */
+ *listp = list->next;
+ mail_index_alloc_cache_list_free(list);
+ } else if (to_index == NULL) {
+ /* Add to root ioloop in case we got here from an inner
+ ioloop which gets destroyed too early. */
+ to_index = timeout_add_to(io_loop_get_root(),
+ INDEX_CACHE_TIMEOUT*1000/2,
+ index_removal_timeout, NULL);
+ }
+}
+
+void mail_index_alloc_cache_destroy_unrefed(void)
+{
+ destroy_unrefed(UINT_MAX);
+}
+
+void mail_index_alloc_cache_index_opened(struct mail_index *index)
+{
+ struct mail_index_alloc_cache_list *list =
+ MAIL_INDEX_ALLOC_CACHE_CONTEXT(index);
+ struct stat st;
+
+ if (list != NULL && list->index_dir_ino == 0 &&
+ !MAIL_INDEX_IS_IN_MEMORY(index)) {
+ /* newly created index directory. update its stat. */
+ if (stat(index->dir, &st) == 0) {
+ list->index_dir_ino = st.st_ino;
+ list->index_dir_dev = st.st_dev;
+ }
+ }
+}
+
+void mail_index_alloc_cache_index_closing(struct mail_index *index)
+{
+ struct mail_index_alloc_cache_list *list =
+ MAIL_INDEX_ALLOC_CACHE_CONTEXT(index);
+
+ i_assert(index->open_count > 0);
+ if (index->open_count > 1 || list == NULL)
+ return;
+
+ if (list->referenced) {
+ /* we're closing our referenced index */
+ return;
+ }
+ while (indexes_cache_references_count > INDEX_CACHE_MAX) {
+ if (!destroy_unrefed(1)) {
+ /* our cache is full already, don't keep more */
+ return;
+ }
+ }
+ /* keep the index referenced for caching */
+ indexes_cache_references_count++;
+ list->referenced = TRUE;
+ index->open_count++;
+}
diff --git a/src/lib-index/mail-index-alloc-cache.h b/src/lib-index/mail-index-alloc-cache.h
new file mode 100644
index 0000000..5f28563
--- /dev/null
+++ b/src/lib-index/mail-index-alloc-cache.h
@@ -0,0 +1,20 @@
+#ifndef MAIL_INDEX_ALLOC_CACHE_H
+#define MAIL_INDEX_ALLOC_CACHE_H
+
+/* If using in-memory indexes, give index_dir=NULL. */
+struct mail_index * ATTR_NULL(1, 2)
+mail_index_alloc_cache_get(struct event *parent_event, const char *mailbox_path,
+ const char *index_dir, const char *prefix);
+void mail_index_alloc_cache_unref(struct mail_index **index);
+
+/* Find an existing already opened index from a given index directory. */
+struct mail_index *
+mail_index_alloc_cache_find(const char *index_dir);
+
+void mail_index_alloc_cache_destroy_unrefed(void);
+
+/* internal: */
+void mail_index_alloc_cache_index_opened(struct mail_index *index);
+void mail_index_alloc_cache_index_closing(struct mail_index *index);
+
+#endif
diff --git a/src/lib-index/mail-index-dummy-view.c b/src/lib-index/mail-index-dummy-view.c
new file mode 100644
index 0000000..ea69377
--- /dev/null
+++ b/src/lib-index/mail-index-dummy-view.c
@@ -0,0 +1,47 @@
+/* Copyright (c) 2004-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "mail-index-private.h"
+#include "mail-index-view-private.h"
+
+static void dummy_view_close(struct mail_index_view *view ATTR_UNUSED)
+{
+ i_assert(view->refcount == 0);
+
+ array_free(&view->module_contexts);
+ i_free(view);
+}
+
+static uint32_t
+dummy_view_get_message_count(struct mail_index_view *view ATTR_UNUSED)
+{
+ return (uint32_t)-3;
+}
+
+static struct mail_index_view_vfuncs dummy_view_vfuncs = {
+ dummy_view_close,
+ dummy_view_get_message_count,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+struct mail_index_view *mail_index_dummy_view_open(struct mail_index *index)
+{
+ struct mail_index_view *view;
+
+ view = i_new(struct mail_index_view, 1);
+ view->refcount = 1;
+ view->v = dummy_view_vfuncs;
+ view->index = index;
+ i_array_init(&view->module_contexts,
+ I_MIN(5, mail_index_module_register.id));
+ return view;
+}
diff --git a/src/lib-index/mail-index-fsck.c b/src/lib-index/mail-index-fsck.c
new file mode 100644
index 0000000..6636edf
--- /dev/null
+++ b/src/lib-index/mail-index-fsck.c
@@ -0,0 +1,495 @@
+/* Copyright (c) 2004-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "ioloop.h"
+#include "array.h"
+#include "mail-index-private.h"
+#include "mail-transaction-log-private.h"
+
+static void mail_index_fsck_error(struct mail_index *index,
+ const char *fmt, ...) ATTR_FORMAT(2, 3);
+static void mail_index_fsck_error(struct mail_index *index,
+ const char *fmt, ...)
+{
+ va_list va;
+
+ va_start(va, fmt);
+ mail_index_set_error(index, "Fixed index file %s: %s",
+ index->filepath, t_strdup_vprintf(fmt, va));
+ va_end(va);
+}
+
+#define CHECK(field, oper) \
+ if (hdr->field oper map->hdr.field) { \
+ mail_index_fsck_error(index, #field" %u -> %u", \
+ map->hdr.field, hdr->field); \
+ }
+
+static void
+mail_index_fsck_log_pos(struct mail_index *index, struct mail_index_map *map,
+ struct mail_index_header *hdr)
+{
+ unsigned int hdr_size = index->log->head->hdr.hdr_size;
+ uint32_t file_seq;
+ uoff_t file_offset;
+
+ mail_transaction_log_get_head(index->log, &file_seq, &file_offset);
+ if (hdr->log_file_seq < file_seq) {
+ /* index's log_file_seq is too old. move it to log head. */
+ hdr->log_file_head_offset = hdr->log_file_tail_offset =
+ sizeof(struct mail_transaction_log_header);
+ } else if (hdr->log_file_seq == file_seq) {
+ /* index's log_file_seq matches the current log. make sure the
+ offsets are valid. */
+ if (hdr->log_file_head_offset > file_offset)
+ hdr->log_file_head_offset = file_offset;
+ else if (hdr->log_file_head_offset < hdr_size)
+ hdr->log_file_head_offset = hdr_size;
+
+ if (hdr->log_file_tail_offset > hdr->log_file_head_offset)
+ hdr->log_file_tail_offset = hdr->log_file_head_offset;
+ else if (hdr->log_file_tail_offset != 0 &&
+ hdr->log_file_tail_offset < hdr_size)
+ hdr->log_file_tail_offset = hdr_size;
+ } else {
+ /* index's log_file_seq is newer than exists. move it to
+ end of the current log head. */
+ hdr->log_file_head_offset = hdr->log_file_tail_offset =
+ file_offset;
+ }
+ hdr->log_file_seq = file_seq;
+
+ CHECK(log_file_seq, !=);
+ if (hdr->log_file_seq == map->hdr.log_file_seq) {
+ /* don't bother complaining about these if file changed too */
+ CHECK(log_file_head_offset, !=);
+ CHECK(log_file_tail_offset, !=);
+ }
+}
+
+static void
+mail_index_fsck_header(struct mail_index *index, struct mail_index_map *map,
+ struct mail_index_header *hdr)
+{
+ /* mail_index_map_check_header() has already checked that the index
+ isn't completely broken. */
+ if (hdr->uid_validity == 0 && hdr->next_uid != 1)
+ hdr->uid_validity = ioloop_time;
+
+ if (index->log->head != NULL)
+ mail_index_fsck_log_pos(index, map, hdr);
+}
+
+static bool
+array_has_name(const ARRAY_TYPE(const_string) *names, const char *name)
+{
+ const char *arr_name;
+
+ array_foreach_elem(names, arr_name) {
+ if (strcmp(arr_name, name) == 0)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static unsigned int
+mail_index_fsck_find_keyword_count(struct mail_index_map *map,
+ const struct mail_index_ext_header *ext_hdr)
+{
+ const struct mail_index_record *rec;
+ const uint8_t *kw;
+ unsigned int r, i, j, cur, max = 0, kw_pos, kw_size;
+
+ kw_pos = ext_hdr->record_offset;
+ kw_size = ext_hdr->record_size;
+
+ rec = map->rec_map->records;
+ for (r = 0; r < map->rec_map->records_count; r++) {
+ kw = CONST_PTR_OFFSET(rec, kw_pos);
+ for (i = cur = 0; i < kw_size; i++) {
+ if (kw[i] != 0) {
+ for (j = 0; j < 8; j++) {
+ if ((kw[i] & (1 << j)) != 0)
+ cur = i * 8 + j + 1;
+ }
+ }
+ }
+ if (cur > max) {
+ max = cur;
+ if (max == kw_size*8)
+ return max;
+ }
+ rec = CONST_PTR_OFFSET(rec, map->hdr.record_size);
+ }
+ return max;
+}
+
+static bool
+keyword_name_is_valid(const char *buffer, unsigned int pos, unsigned int size)
+{
+ for (; pos < size; pos++) {
+ if (buffer[pos] == '\0')
+ return TRUE;
+ if (((unsigned char)buffer[pos] & 0x7f) < 32) {
+ /* control characters aren't valid */
+ return FALSE;
+ }
+ }
+ return FALSE;
+}
+
+static void
+mail_index_fsck_keywords(struct mail_index *index, struct mail_index_map *map,
+ struct mail_index_header *hdr,
+ const struct mail_index_ext_header *ext_hdr,
+ unsigned int ext_offset, unsigned int *offset_p)
+{
+ const struct mail_index_keyword_header *kw_hdr;
+ struct mail_index_keyword_header *new_kw_hdr;
+ const struct mail_index_keyword_header_rec *kw_rec;
+ struct mail_index_keyword_header_rec new_kw_rec;
+ const char *name, *name_buffer, **name_array;
+ unsigned int i, j, name_pos, name_size, rec_pos, hdr_offset, diff;
+ unsigned int changed_count, keywords_count, name_base_pos;
+ ARRAY_TYPE(const_string) names;
+ buffer_t *dest;
+ bool changed = FALSE;
+
+ hdr_offset = ext_offset +
+ mail_index_map_ext_hdr_offset(sizeof(MAIL_INDEX_EXT_KEYWORDS)-1);
+ kw_hdr = MAIL_INDEX_MAP_HDR_OFFSET(map, hdr_offset);
+ keywords_count = kw_hdr->keywords_count;
+
+ kw_rec = (const void *)(kw_hdr + 1);
+ name_buffer = (const char *)(kw_rec + keywords_count);
+
+ name_pos = (size_t)(name_buffer - (const char *)kw_hdr);
+ if (name_pos > ext_hdr->hdr_size) {
+ /* the header is completely broken */
+ keywords_count =
+ mail_index_fsck_find_keyword_count(map, ext_hdr);
+ mail_index_fsck_error(index, "Assuming keywords_count = %u",
+ keywords_count);
+ kw_rec = NULL;
+ name_size = 0;
+ changed = TRUE;
+ } else {
+ name_size = ext_hdr->hdr_size - name_pos;
+ }
+
+ /* create keyword name array. invalid keywords are added as
+ empty strings */
+ t_array_init(&names, keywords_count);
+ for (i = 0; i < keywords_count; i++) {
+ if (name_size == 0 ||
+ !keyword_name_is_valid(name_buffer, kw_rec[i].name_offset,
+ name_size))
+ name = "";
+ else
+ name = name_buffer + kw_rec[i].name_offset;
+
+ if (*name != '\0' && array_has_name(&names, name)) {
+ /* duplicate */
+ name = "";
+ }
+ array_push_back(&names, &name);
+ }
+
+ /* give new names to invalid keywords */
+ changed_count = 0;
+ name_array = array_front_modifiable(&names);
+ for (i = j = 0; i < keywords_count; i++) {
+ while (name_array[i][0] == '\0') {
+ name = t_strdup_printf("unknown-%d", j++);
+ if (!array_has_name(&names, name)) {
+ name_array[i] = name;
+ changed = TRUE;
+ changed_count++;
+ }
+ }
+ }
+
+ if (!changed) {
+ /* nothing was broken */
+ return;
+ }
+
+ mail_index_fsck_error(index, "Renamed %u keywords to unknown-*",
+ changed_count);
+
+ dest = buffer_create_dynamic(default_pool,
+ I_MAX(ext_hdr->hdr_size, 128));
+ new_kw_hdr = buffer_append_space_unsafe(dest, sizeof(*new_kw_hdr));
+ new_kw_hdr->keywords_count = keywords_count;
+
+ /* add keyword records so we can start appending names directly */
+ rec_pos = dest->used;
+ i_zero(&new_kw_rec);
+ (void)buffer_append_space_unsafe(dest, keywords_count * sizeof(*kw_rec));
+
+ /* write the actual records and names */
+ name_base_pos = dest->used;
+ for (i = 0; i < keywords_count; i++) {
+ new_kw_rec.name_offset = dest->used - name_base_pos;
+ buffer_write(dest, rec_pos, &new_kw_rec, sizeof(new_kw_rec));
+ rec_pos += sizeof(*kw_rec);
+
+ buffer_append(dest, name_array[i], strlen(name_array[i]) + 1);
+ }
+
+ /* keep the header size at least the same size as before */
+ if (dest->used < ext_hdr->hdr_size)
+ buffer_append_zero(dest, ext_hdr->hdr_size - dest->used);
+
+ if (dest->used > ext_hdr->hdr_size) {
+ /* need to resize the header */
+ struct mail_index_ext_header new_ext_hdr;
+
+ diff = dest->used - ext_hdr->hdr_size;
+ buffer_copy(map->hdr_copy_buf, hdr_offset + diff,
+ map->hdr_copy_buf, hdr_offset, SIZE_MAX);
+ hdr->header_size += diff;
+ *offset_p += diff;
+
+ new_ext_hdr = *ext_hdr;
+ new_ext_hdr.hdr_size += diff;
+ buffer_write(map->hdr_copy_buf, ext_offset,
+ &new_ext_hdr, sizeof(new_ext_hdr));
+ }
+
+ i_assert(hdr_offset + dest->used <= map->hdr_copy_buf->used);
+ buffer_write(map->hdr_copy_buf, hdr_offset, dest->data, dest->used);
+
+ /* keywords changed unexpectedly, so all views are broken now */
+ index->inconsistency_id++;
+
+ buffer_free(&dest);
+}
+
+static void
+mail_index_fsck_extensions(struct mail_index *index, struct mail_index_map *map,
+ struct mail_index_header *hdr)
+{
+ const struct mail_index_ext_header *ext_hdr;
+ ARRAY_TYPE(const_string) names;
+ const char *name, *error;
+ unsigned int offset, next_offset, i;
+
+ t_array_init(&names, 64);
+ offset = MAIL_INDEX_HEADER_SIZE_ALIGN(hdr->base_header_size);
+ for (i = 0; offset < hdr->header_size; i++) {
+ /* mail_index_map_ext_get_next() uses map->hdr, so make sure
+ it's up-to-date */
+ map->hdr = *hdr;
+
+ next_offset = offset;
+ if (mail_index_map_ext_get_next(map, &next_offset,
+ &ext_hdr, &name) < 0) {
+ /* the extension continued outside header, drop it */
+ mail_index_fsck_error(index,
+ "Dropped extension #%d (%s) "
+ "with invalid header size",
+ i, name);
+ hdr->header_size = offset;
+ buffer_set_used_size(map->hdr_copy_buf, hdr->header_size);
+ break;
+ }
+ if (mail_index_map_ext_hdr_check(hdr, ext_hdr, name,
+ &error) < 0) {
+ mail_index_fsck_error(index,
+ "Dropped broken extension #%d (%s)", i, name);
+ } else if (array_has_name(&names, name)) {
+ mail_index_fsck_error(index,
+ "Dropped duplicate extension %s", name);
+ } else {
+ /* name may change if header buffer is changed */
+ name = t_strdup(name);
+
+ if (strcmp(name, MAIL_INDEX_EXT_KEYWORDS) == 0) {
+ mail_index_fsck_keywords(index, map, hdr,
+ ext_hdr, offset,
+ &next_offset);
+ }
+ array_push_back(&names, &name);
+ offset = next_offset;
+ continue;
+ }
+
+ /* drop the field */
+ hdr->header_size -= next_offset - offset;
+ buffer_copy(map->hdr_copy_buf, offset,
+ map->hdr_copy_buf, next_offset, SIZE_MAX);
+ buffer_set_used_size(map->hdr_copy_buf, hdr->header_size);
+ }
+}
+
+static void
+mail_index_fsck_records(struct mail_index *index, struct mail_index_map *map,
+ struct mail_index_header *hdr)
+{
+ struct mail_index_record *rec, *next_rec;
+ uint32_t i, last_uid;
+ bool logged_unordered_uids = FALSE, logged_zero_uids = FALSE;
+ bool records_dropped = FALSE;
+
+ hdr->messages_count = 0;
+ hdr->seen_messages_count = 0;
+ hdr->deleted_messages_count = 0;
+
+ hdr->first_unseen_uid_lowwater = 0;
+ hdr->first_deleted_uid_lowwater = 0;
+
+ rec = map->rec_map->records; last_uid = 0;
+ for (i = 0; i < map->rec_map->records_count; ) {
+ next_rec = PTR_OFFSET(rec, hdr->record_size);
+ if (rec->uid <= last_uid) {
+ /* log an error once, and skip this record */
+ if (rec->uid == 0) {
+ if (!logged_zero_uids) {
+ mail_index_fsck_error(index,
+ "Record UIDs have zeroes");
+ logged_zero_uids = TRUE;
+ }
+ } else {
+ if (!logged_unordered_uids) {
+ mail_index_fsck_error(index,
+ "Record UIDs unordered");
+ logged_unordered_uids = TRUE;
+ }
+ }
+ /* not the fastest way when we're skipping lots of
+ records, but this should happen rarely so don't
+ bother optimizing. */
+ memmove(rec, next_rec, hdr->record_size *
+ (map->rec_map->records_count - i - 1));
+ map->rec_map->records_count--;
+ records_dropped = TRUE;
+ continue;
+ }
+
+ hdr->messages_count++;
+ if ((rec->flags & MAIL_SEEN) != 0)
+ hdr->seen_messages_count++;
+ if ((rec->flags & MAIL_DELETED) != 0)
+ hdr->deleted_messages_count++;
+
+ if ((rec->flags & MAIL_SEEN) == 0 &&
+ hdr->first_unseen_uid_lowwater == 0)
+ hdr->first_unseen_uid_lowwater = rec->uid;
+ if ((rec->flags & MAIL_DELETED) != 0 &&
+ hdr->first_deleted_uid_lowwater == 0)
+ hdr->first_deleted_uid_lowwater = rec->uid;
+
+ last_uid = rec->uid;
+ rec = next_rec;
+ i++;
+ }
+
+ if (records_dropped) {
+ /* all existing views are broken now */
+ index->inconsistency_id++;
+ }
+
+ if (hdr->next_uid <= last_uid) {
+ mail_index_fsck_error(index, "next_uid %u -> %u",
+ hdr->next_uid, last_uid+1);
+ hdr->next_uid = last_uid+1;
+ }
+
+ if (hdr->first_unseen_uid_lowwater == 0)
+ hdr->first_unseen_uid_lowwater = hdr->next_uid;
+ if (hdr->first_deleted_uid_lowwater == 0)
+ hdr->first_deleted_uid_lowwater = hdr->next_uid;
+ if (hdr->first_recent_uid > hdr->next_uid)
+ hdr->first_recent_uid = hdr->next_uid;
+ if (hdr->first_recent_uid == 0)
+ hdr->first_recent_uid = 1;
+
+ CHECK(uid_validity, !=);
+ CHECK(messages_count, !=);
+ CHECK(seen_messages_count, !=);
+ CHECK(deleted_messages_count, !=);
+
+ CHECK(first_unseen_uid_lowwater, <);
+ CHECK(first_deleted_uid_lowwater, <);
+ CHECK(first_recent_uid, !=);
+}
+
+static void
+mail_index_fsck_map(struct mail_index *index, struct mail_index_map *map)
+{
+ struct mail_index_header hdr;
+
+ if (index->log->head != NULL) {
+ /* Remember the log head position. If we go back in the index's
+ head offset, ignore errors in the log up to this offset. */
+ mail_transaction_log_get_head(index->log,
+ &index->fsck_log_head_file_seq,
+ &index->fsck_log_head_file_offset);
+ }
+ hdr = map->hdr;
+
+ mail_index_fsck_header(index, map, &hdr);
+ mail_index_fsck_extensions(index, map, &hdr);
+ mail_index_fsck_records(index, map, &hdr);
+
+ hdr.flags |= MAIL_INDEX_HDR_FLAG_FSCKD;
+ map->hdr = hdr;
+ i_assert(map->hdr_copy_buf->used == map->hdr.header_size);
+}
+
+int mail_index_fsck(struct mail_index *index)
+{
+ bool orig_locked = index->log_sync_locked;
+ struct mail_index_map *map;
+ uint32_t file_seq;
+ uoff_t file_offset;
+
+ i_warning("fscking index file %s", index->filepath);
+
+ index->fscked = TRUE;
+
+ if (index->log->head == NULL) {
+ /* we're trying to open the index files, but there wasn't
+ any .log file. */
+ if (mail_transaction_log_create(index->log, FALSE) < 0)
+ return -1;
+ }
+
+ if (!orig_locked) {
+ if (mail_transaction_log_sync_lock(index->log, "fscking",
+ &file_seq, &file_offset) < 0)
+ return -1;
+ }
+
+ map = mail_index_map_clone(index->map);
+ mail_index_unmap(&index->map);
+ index->map = map;
+
+ T_BEGIN {
+ mail_index_fsck_map(index, map);
+ } T_END;
+
+ mail_index_write(index, FALSE, "fscking");
+
+ if (!orig_locked)
+ mail_transaction_log_sync_unlock(index->log, "fscking");
+ return 0;
+}
+
+void mail_index_fsck_locked(struct mail_index *index)
+{
+ int ret;
+
+ i_assert(index->log_sync_locked);
+ ret = mail_index_fsck(index);
+ i_assert(ret == 0);
+}
+
+bool mail_index_reset_fscked(struct mail_index *index)
+{
+ bool ret = index->fscked;
+
+ index->fscked = FALSE;
+ return ret;
+}
diff --git a/src/lib-index/mail-index-lock.c b/src/lib-index/mail-index-lock.c
new file mode 100644
index 0000000..cdf62e4
--- /dev/null
+++ b/src/lib-index/mail-index-lock.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+/*
+ Locking should never fail or timeout. Exclusive locks must be kept as short
+ time as possible. Shared locks can be long living, so if we can't get
+ exclusive lock directly, we'll recreate the index. That means the shared
+ lock holders can keep using the old file.
+
+ lock_id is used to figure out if acquired lock is still valid. When index
+ file is reopened, the lock_id can become invalid. It doesn't matter however,
+ as no-one's going to modify the old file anymore.
+
+ lock_id also tells us if we're referring to a shared or an exclusive lock.
+ This allows us to drop back to shared locking once all exclusive locks
+ are dropped. Shared locks have even numbers, exclusive locks have odd numbers.
+ The number is increased by two every time the lock is dropped or index file
+ is reopened.
+*/
+
+#include "lib.h"
+#include "nfs-workarounds.h"
+#include "mail-index-private.h"
+
+#define MAIL_INDEX_SHARED_LOCK_TIMEOUT 120
+
+int mail_index_lock_fd(struct mail_index *index, const char *path, int fd,
+ int lock_type, unsigned int timeout_secs,
+ struct file_lock **lock_r)
+{
+ const char *error;
+ int ret;
+
+ if (fd == -1) {
+ i_assert(MAIL_INDEX_IS_IN_MEMORY(index));
+ return 1;
+ }
+
+ struct file_lock_settings lock_set = {
+ .lock_method = index->set.lock_method,
+ };
+ ret = file_wait_lock(fd, path, lock_type, &lock_set, timeout_secs,
+ lock_r, &error);
+ if (ret < 0)
+ e_error(index->event, "%s", error);
+ return ret;
+}
+
+void mail_index_flush_read_cache(struct mail_index *index, const char *path,
+ int fd, bool locked)
+{
+ if ((index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) == 0)
+ return;
+
+ /* Assume flock() is emulated with fcntl(), because that's how most
+ OSes work nowadays. */
+ if (locked &&
+ (index->set.lock_method == FILE_LOCK_METHOD_FCNTL ||
+ index->set.lock_method == FILE_LOCK_METHOD_FLOCK)) {
+ nfs_flush_read_cache_locked(path, fd);
+ } else {
+ nfs_flush_read_cache_unlocked(path, fd);
+ }
+}
diff --git a/src/lib-index/mail-index-map-hdr.c b/src/lib-index/mail-index-map-hdr.c
new file mode 100644
index 0000000..3287a28
--- /dev/null
+++ b/src/lib-index/mail-index-map-hdr.c
@@ -0,0 +1,359 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "mail-index-private.h"
+
+int mail_index_map_parse_extensions(struct mail_index_map *map)
+{
+ struct mail_index *index = map->index;
+ const struct mail_index_ext_header *ext_hdr;
+ unsigned int i, old_count, offset;
+ const char *name, *error;
+ uint32_t ext_id, ext_map_idx, ext_offset;
+
+ /* extension headers always start from 64bit offsets, so if base header
+ doesn't happen to be 64bit aligned we'll skip some bytes */
+ offset = MAIL_INDEX_HEADER_SIZE_ALIGN(map->hdr.base_header_size);
+ if (offset >= map->hdr.header_size && map->extension_pool == NULL) {
+ /* nothing to do, skip allocations and all */
+ return 0;
+ }
+
+ old_count = array_count(&index->extensions);
+ mail_index_map_init_extbufs(map, old_count + 5);
+
+ ext_id = (uint32_t)-1;
+ for (i = 0; i < old_count; i++)
+ array_push_back(&map->ext_id_map, &ext_id);
+
+ for (i = 0; offset < map->hdr.header_size; i++) {
+ ext_offset = offset;
+
+ if (mail_index_map_ext_get_next(map, &offset,
+ &ext_hdr, &name) < 0) {
+ mail_index_set_error(index, "Corrupted index file %s: "
+ "Header extension #%d (%s) goes outside header",
+ index->filepath, i, name);
+ return -1;
+ }
+
+ if (mail_index_map_ext_hdr_check(&map->hdr, ext_hdr,
+ name, &error) < 0) {
+ mail_index_set_error(index, "Corrupted index file %s: "
+ "Broken extension #%d (%s): %s",
+ index->filepath, i, name, error);
+ return -1;
+ }
+ if (mail_index_map_lookup_ext(map, name, &ext_map_idx)) {
+ mail_index_set_error(index, "Corrupted index file %s: "
+ "Duplicate header extension %s",
+ index->filepath, name);
+ return -1;
+ }
+
+ (void)mail_index_map_register_ext(map, name, ext_offset, ext_hdr);
+ }
+ return 0;
+}
+
+int mail_index_map_parse_keywords(struct mail_index_map *map)
+{
+ struct mail_index *index = map->index;
+ const struct mail_index_ext *ext;
+ const struct mail_index_keyword_header *kw_hdr;
+ const struct mail_index_keyword_header_rec *kw_rec;
+ const char *name;
+ unsigned int i, name_area_end_offset, old_count;
+ uint32_t idx;
+
+ if (!mail_index_map_lookup_ext(map, MAIL_INDEX_EXT_KEYWORDS, &idx)) {
+ if (array_is_created(&map->keyword_idx_map))
+ array_clear(&map->keyword_idx_map);
+ return 0;
+ }
+ ext = array_idx(&map->extensions, idx);
+
+ /* Extension header contains:
+ - struct mail_index_keyword_header
+ - struct mail_index_keyword_header_rec * keywords_count
+ - const char names[] * keywords_count
+
+ The mail_index_keyword_header_rec are rather unnecessary nowadays.
+ They were originally an optimization when dovecot.index header kept
+ changing constantly, but nowadays the changes are usually read from
+ the .log changes, so re-reading dovecot.index header isn't common.
+ In a later version we could even remove it.
+ */
+ i_assert(ext->hdr_offset < map->hdr.header_size);
+ kw_hdr = MAIL_INDEX_MAP_HDR_OFFSET(map, ext->hdr_offset);
+ kw_rec = (const void *)(kw_hdr + 1);
+ name = (const char *)(kw_rec + kw_hdr->keywords_count);
+
+ old_count = !array_is_created(&map->keyword_idx_map) ? 0 :
+ array_count(&map->keyword_idx_map);
+
+ /* make sure the header is valid */
+ if (kw_hdr->keywords_count < old_count) {
+ mail_index_set_error(index, "Corrupted index file %s: "
+ "Keywords removed unexpectedly",
+ index->filepath);
+ return -1;
+ }
+
+ if ((size_t)(name - (const char *)kw_hdr) > ext->hdr_size) {
+ mail_index_set_error(index, "Corrupted index file %s: "
+ "keywords_count larger than header size",
+ index->filepath);
+ return -1;
+ }
+
+ name_area_end_offset = (const char *)kw_hdr + ext->hdr_size - name;
+ for (i = 0; i < kw_hdr->keywords_count; i++) {
+ if (kw_rec[i].name_offset > name_area_end_offset) {
+ mail_index_set_error(index, "Corrupted index file %s: "
+ "name_offset points outside allocated header",
+ index->filepath);
+ return -1;
+ }
+ }
+ if (name[name_area_end_offset-1] != '\0') {
+ mail_index_set_error(index, "Corrupted index file %s: "
+ "Keyword header doesn't end with NUL",
+ index->filepath);
+ return -1;
+ }
+
+ /* create file -> index mapping */
+ if (!array_is_created(&map->keyword_idx_map))
+ i_array_init(&map->keyword_idx_map, kw_hdr->keywords_count);
+
+ size_t name_offset = 0;
+ /* Check that existing headers are still the same. */
+ for (i = 0; i < array_count(&map->keyword_idx_map); i++) {
+ const char *keyword = name + kw_rec[i].name_offset;
+ const unsigned int *old_idx;
+ unsigned int kw_idx;
+
+ if (kw_rec[i].name_offset != name_offset) {
+ /* this shouldn't happen, but the old code didn't check
+ for this so for safety keep this as a warning. */
+ e_warning(index->event,
+ "Corrupted index file %s: "
+ "Mismatching keyword name_offset",
+ index->filepath);
+ }
+ name_offset += strlen(keyword) + 1;
+
+ old_idx = array_idx(&map->keyword_idx_map, i);
+ if (!mail_index_keyword_lookup(index, keyword, &kw_idx) ||
+ kw_idx != *old_idx) {
+ mail_index_set_error(index, "Corrupted index file %s: "
+ "Keywords changed unexpectedly",
+ index->filepath);
+ return -1;
+ }
+ }
+
+ /* Register the newly seen keywords */
+ i = array_count(&map->keyword_idx_map);
+ for (; i < kw_hdr->keywords_count; i++) {
+ const char *keyword = name + kw_rec[i].name_offset;
+ unsigned int kw_idx;
+
+ if (kw_rec[i].name_offset != name_offset) {
+ /* this shouldn't happen, but the old code didn't check
+ for this so for safety keep this as a warning. */
+ e_warning(index->event,
+ "Corrupted index file %s: "
+ "Mismatching keyword name_offset",
+ index->filepath);
+ }
+ name_offset += strlen(keyword) + 1;
+
+ if (*keyword == '\0') {
+ mail_index_set_error(index, "Corrupted index file %s: "
+ "Empty keyword name in header",
+ index->filepath);
+ return -1;
+ }
+ mail_index_keyword_lookup_or_create(index, keyword, &kw_idx);
+ array_push_back(&map->keyword_idx_map, &kw_idx);
+ }
+ return 0;
+}
+
+bool mail_index_check_header_compat(struct mail_index *index,
+ const struct mail_index_header *hdr,
+ uoff_t file_size, const char **error_r)
+{
+ enum mail_index_header_compat_flags compat_flags = 0;
+
+#ifndef WORDS_BIGENDIAN
+ compat_flags |= MAIL_INDEX_COMPAT_LITTLE_ENDIAN;
+#endif
+
+ if (hdr->major_version != MAIL_INDEX_MAJOR_VERSION) {
+ /* major version change */
+ *error_r = t_strdup_printf("Major version changed (%u != %u)",
+ hdr->major_version, MAIL_INDEX_MAJOR_VERSION);
+ return FALSE;
+ }
+ if ((hdr->flags & MAIL_INDEX_HDR_FLAG_CORRUPTED) != 0) {
+ /* we've already complained about it */
+ *error_r = "Header's corrupted flag is set";
+ return FALSE;
+ }
+
+ if (hdr->compat_flags != compat_flags) {
+ /* architecture change */
+ *error_r = "CPU architecture changed";
+ return FALSE;
+ }
+
+ if (hdr->base_header_size < MAIL_INDEX_HEADER_MIN_SIZE ||
+ hdr->header_size < hdr->base_header_size) {
+ *error_r = t_strdup_printf(
+ "Corrupted header sizes (base %u, full %u)",
+ hdr->base_header_size, hdr->header_size);
+ return FALSE;
+ }
+ if (hdr->header_size > file_size) {
+ *error_r = t_strdup_printf(
+ "Header size is larger than file (%u > %"PRIuUOFF_T")",
+ hdr->header_size, file_size);
+ return FALSE;
+ }
+
+ if (hdr->indexid != index->indexid) {
+ if (index->indexid != 0) {
+ mail_index_set_error(index, "Index file %s: "
+ "indexid changed: %u -> %u",
+ index->filepath, index->indexid,
+ hdr->indexid);
+ }
+ index->indexid = hdr->indexid;
+ mail_transaction_log_indexid_changed(index->log);
+ }
+ return TRUE;
+}
+
+static void mail_index_map_clear_recent_flags(struct mail_index_map *map)
+{
+ struct mail_index_record *rec;
+ uint32_t seq;
+
+ for (seq = 1; seq <= map->hdr.messages_count; seq++) {
+ rec = MAIL_INDEX_REC_AT_SEQ(map, seq);
+ rec->flags &= ENUM_NEGATE(MAIL_RECENT);
+ }
+}
+
+int mail_index_map_check_header(struct mail_index_map *map,
+ const char **error_r)
+{
+ struct mail_index *index = map->index;
+ const struct mail_index_header *hdr = &map->hdr;
+
+ if (!mail_index_check_header_compat(index, hdr, UOFF_T_MAX, error_r))
+ return 0;
+
+ /* following some extra checks that only take a bit of CPU */
+ if (hdr->record_size < sizeof(struct mail_index_record)) {
+ *error_r = t_strdup_printf(
+ "record_size too small (%u < %zu)",
+ hdr->record_size, sizeof(struct mail_index_record));
+ return -1;
+ }
+
+ if (hdr->uid_validity == 0 && hdr->next_uid != 1) {
+ *error_r = t_strdup_printf(
+ "uidvalidity=0, but next_uid=%u", hdr->next_uid);
+ return 0;
+ }
+ if (hdr->next_uid == 0) {
+ *error_r = "next_uid=0";
+ return 0;
+ }
+ if (hdr->messages_count > map->rec_map->records_count) {
+ *error_r = t_strdup_printf(
+ "messages_count is higher in header than record map (%u > %u)",
+ hdr->messages_count, map->rec_map->records_count);
+ return 0;
+ }
+
+ if (hdr->seen_messages_count > hdr->messages_count) {
+ *error_r = t_strdup_printf(
+ "seen_messages_count %u > messages_count %u",
+ hdr->seen_messages_count, hdr->messages_count);
+ return 0;
+ }
+ if (hdr->deleted_messages_count > hdr->messages_count) {
+ *error_r = t_strdup_printf(
+ "deleted_messages_count %u > messages_count %u",
+ hdr->deleted_messages_count, hdr->messages_count);
+ return 0;
+ }
+ switch (hdr->minor_version) {
+ case 0:
+ /* upgrade silently from v1.0 */
+ map->hdr.unused_old_recent_messages_count = 0;
+ if (hdr->first_recent_uid == 0)
+ map->hdr.first_recent_uid = 1;
+ if (index->need_recreate == NULL)
+ index->need_recreate = i_strdup("Upgrading from index version 1.0");
+ /* fall through */
+ case 1:
+ /* pre-v1.1.rc6: make sure the \Recent flags are gone */
+ mail_index_map_clear_recent_flags(map);
+ map->hdr.minor_version = MAIL_INDEX_MINOR_VERSION;
+ /* fall through */
+ case 2:
+ /* pre-v2.2 (although should have been done in v2.1 already):
+ make sure the old unused fields are cleared */
+ map->hdr.unused_old_sync_size_part1 = 0;
+ map->hdr.log2_rotate_time = 0;
+ map->hdr.last_temp_file_scan = 0;
+ }
+ if (hdr->first_recent_uid == 0) {
+ *error_r = "first_recent_uid=0";
+ return 0;
+ }
+ if (hdr->first_recent_uid > hdr->next_uid) {
+ *error_r = t_strdup_printf(
+ "first_recent_uid %u > next_uid %u",
+ hdr->first_recent_uid, hdr->next_uid);
+ return 0;
+ }
+ if (hdr->first_unseen_uid_lowwater > hdr->next_uid) {
+ *error_r = t_strdup_printf(
+ "first_unseen_uid_lowwater %u > next_uid %u",
+ hdr->first_unseen_uid_lowwater, hdr->next_uid);
+ return 0;
+ }
+ if (hdr->first_deleted_uid_lowwater > hdr->next_uid) {
+ *error_r = t_strdup_printf(
+ "first_deleted_uid_lowwater %u > next_uid %u",
+ hdr->first_deleted_uid_lowwater, hdr->next_uid);
+ return 0;
+ }
+
+ if (hdr->messages_count > 0) {
+ /* last message's UID must be smaller than next_uid.
+ also make sure it's not zero. */
+ const struct mail_index_record *rec;
+
+ rec = MAIL_INDEX_REC_AT_SEQ(map, hdr->messages_count);
+ if (rec->uid == 0) {
+ *error_r = "last message has uid=0";
+ return -1;
+ }
+ if (rec->uid >= hdr->next_uid) {
+ *error_r = t_strdup_printf(
+ "last message uid %u >= next_uid %u",
+ rec->uid, hdr->next_uid);
+ return 0;
+ }
+ }
+ return 1;
+}
diff --git a/src/lib-index/mail-index-map-read.c b/src/lib-index/mail-index-map-read.c
new file mode 100644
index 0000000..6999bf1
--- /dev/null
+++ b/src/lib-index/mail-index-map-read.c
@@ -0,0 +1,519 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "nfs-workarounds.h"
+#include "mmap-util.h"
+#include "read-full.h"
+#include "mail-index-private.h"
+#include "mail-index-sync-private.h"
+#include "mail-transaction-log-private.h"
+#include "mail-index-modseq.h"
+#include "ioloop.h"
+
+static void mail_index_map_copy_hdr(struct mail_index_map *map,
+ const struct mail_index_header *hdr)
+{
+ if (hdr->base_header_size < sizeof(map->hdr)) {
+ /* header smaller than ours, make a copy so our newer headers
+ won't have garbage in them */
+ i_zero(&map->hdr);
+ memcpy(&map->hdr, hdr, hdr->base_header_size);
+ } else {
+ map->hdr = *hdr;
+ }
+
+ /* FIXME: backwards compatibility, remove later. In case this index is
+ accessed with Dovecot v1.0, avoid recent message counter errors. */
+ map->hdr.unused_old_recent_messages_count = 0;
+}
+
+static int mail_index_mmap(struct mail_index_map *map, uoff_t file_size)
+{
+ struct mail_index *index = map->index;
+ struct mail_index_record_map *rec_map = map->rec_map;
+ const struct mail_index_header *hdr;
+ const char *error;
+
+ i_assert(rec_map->mmap_base == NULL);
+
+ buffer_free(&rec_map->buffer);
+ if (file_size > SSIZE_T_MAX) {
+ /* too large file to map into memory */
+ mail_index_set_error(index, "Index file too large: %s",
+ index->filepath);
+ return -1;
+ }
+
+ rec_map->mmap_base = mmap(NULL, file_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE, index->fd, 0);
+ if (rec_map->mmap_base == MAP_FAILED) {
+ rec_map->mmap_base = NULL;
+ if (ioloop_time != index->last_mmap_error_time) {
+ index->last_mmap_error_time = ioloop_time;
+ mail_index_set_syscall_error(index, t_strdup_printf(
+ "mmap(size=%"PRIuUOFF_T")", file_size));
+ }
+ return -1;
+ }
+ rec_map->mmap_size = file_size;
+
+ hdr = rec_map->mmap_base;
+ if (rec_map->mmap_size >
+ offsetof(struct mail_index_header, major_version) &&
+ hdr->major_version != MAIL_INDEX_MAJOR_VERSION) {
+ /* major version change - handle silently */
+ return 0;
+ }
+
+ if (rec_map->mmap_size < MAIL_INDEX_HEADER_MIN_SIZE) {
+ mail_index_set_error(index, "Corrupted index file %s: "
+ "File too small (%zu)",
+ index->filepath, rec_map->mmap_size);
+ return 0;
+ }
+
+ if (!mail_index_check_header_compat(index, hdr, rec_map->mmap_size, &error)) {
+ /* Can't use this file */
+ mail_index_set_error(index, "Corrupted index file %s: %s",
+ index->filepath, error);
+ return 0;
+ }
+
+ rec_map->mmap_used_size = hdr->header_size +
+ hdr->messages_count * hdr->record_size;
+
+ if (rec_map->mmap_used_size <= rec_map->mmap_size)
+ rec_map->records_count = hdr->messages_count;
+ else {
+ rec_map->records_count =
+ (rec_map->mmap_size - hdr->header_size) /
+ hdr->record_size;
+ rec_map->mmap_used_size = hdr->header_size +
+ rec_map->records_count * hdr->record_size;
+ mail_index_set_error(index, "Corrupted index file %s: "
+ "messages_count too large (%u > %u)",
+ index->filepath, hdr->messages_count,
+ rec_map->records_count);
+ }
+
+ mail_index_map_copy_hdr(map, hdr);
+ buffer_set_used_size(map->hdr_copy_buf, 0);
+ buffer_append(map->hdr_copy_buf, rec_map->mmap_base, hdr->header_size);
+
+ rec_map->records = PTR_OFFSET(rec_map->mmap_base, map->hdr.header_size);
+ return 1;
+}
+
+static int mail_index_read_header(struct mail_index *index,
+ void *buf, size_t buf_size, size_t *pos_r)
+{
+ size_t pos;
+ int ret;
+
+ memset(buf, 0, sizeof(struct mail_index_header));
+
+ /* try to read the whole header, but it's not necessarily an error to
+ read less since the older versions of the index format could be
+ smaller. Request reading up to buf_size, but accept if we only got
+ the header. */
+ pos = 0;
+ do {
+ ret = pread(index->fd, PTR_OFFSET(buf, pos),
+ buf_size - pos, pos);
+ if (ret > 0)
+ pos += ret;
+ } while (ret > 0 && pos < sizeof(struct mail_index_header));
+
+ *pos_r = pos;
+ return ret;
+}
+
+static int
+mail_index_try_read_map(struct mail_index_map *map,
+ uoff_t file_size, bool *retry_r, bool try_retry)
+{
+ struct mail_index *index = map->index;
+ const struct mail_index_header *hdr;
+ unsigned char read_buf[IO_BLOCK_SIZE];
+ const char *error;
+ const void *buf;
+ void *data = NULL;
+ ssize_t ret;
+ size_t pos, records_size, initial_buf_pos = 0;
+ unsigned int records_count = 0, extra;
+
+ i_assert(map->rec_map->mmap_base == NULL);
+
+ *retry_r = FALSE;
+ ret = mail_index_read_header(index, read_buf, sizeof(read_buf), &pos);
+ buf = read_buf; hdr = buf;
+
+ if (pos > (ssize_t)offsetof(struct mail_index_header, major_version) &&
+ hdr->major_version != MAIL_INDEX_MAJOR_VERSION) {
+ /* major version change - handle silently */
+ return 0;
+ }
+
+ if (ret >= 0 && pos >= MAIL_INDEX_HEADER_MIN_SIZE &&
+ (ret > 0 || pos >= hdr->base_header_size)) {
+ if (!mail_index_check_header_compat(index, hdr, file_size, &error)) {
+ /* Can't use this file */
+ mail_index_set_error(index, "Corrupted index file %s: %s",
+ index->filepath, error);
+ return 0;
+ }
+
+ initial_buf_pos = pos;
+ if (pos > hdr->header_size)
+ pos = hdr->header_size;
+
+ /* place the base header into memory. */
+ buffer_set_used_size(map->hdr_copy_buf, 0);
+ buffer_append(map->hdr_copy_buf, buf, pos);
+
+ if (pos != hdr->header_size) {
+ /* @UNSAFE: read the rest of the header into memory */
+ data = buffer_append_space_unsafe(map->hdr_copy_buf,
+ hdr->header_size -
+ pos);
+ ret = pread_full(index->fd, data,
+ hdr->header_size - pos, pos);
+ }
+ }
+
+ if (ret > 0) {
+ /* header read, read the records now. */
+ records_size = (size_t)hdr->messages_count * hdr->record_size;
+ records_count = hdr->messages_count;
+
+ if (file_size - hdr->header_size < records_size ||
+ (hdr->record_size != 0 &&
+ records_size / hdr->record_size != hdr->messages_count)) {
+ records_count = (file_size - hdr->header_size) /
+ hdr->record_size;
+ records_size = (size_t)records_count * hdr->record_size;
+ mail_index_set_error(index, "Corrupted index file %s: "
+ "messages_count too large (%u > %u)",
+ index->filepath, hdr->messages_count,
+ records_count);
+ }
+
+ if (map->rec_map->buffer == NULL) {
+ map->rec_map->buffer =
+ buffer_create_dynamic(default_pool,
+ records_size);
+ }
+
+ /* @UNSAFE */
+ buffer_set_used_size(map->rec_map->buffer, 0);
+ if (initial_buf_pos <= hdr->header_size)
+ extra = 0;
+ else {
+ extra = initial_buf_pos - hdr->header_size;
+ buffer_append(map->rec_map->buffer,
+ CONST_PTR_OFFSET(buf, hdr->header_size),
+ extra);
+ }
+ if (records_size > extra) {
+ data = buffer_append_space_unsafe(map->rec_map->buffer,
+ records_size - extra);
+ ret = pread_full(index->fd, data, records_size - extra,
+ hdr->header_size + extra);
+ }
+ }
+
+ if (ret < 0) {
+ if (errno == ESTALE && try_retry) {
+ /* a new index file was renamed over this one. */
+ *retry_r = TRUE;
+ return 0;
+ }
+ mail_index_set_syscall_error(index, "pread_full()");
+ return -1;
+ }
+ if (ret == 0) {
+ mail_index_set_error(index,
+ "Corrupted index file %s: File too small",
+ index->filepath);
+ return 0;
+ }
+
+ map->rec_map->records =
+ buffer_get_modifiable_data(map->rec_map->buffer, NULL);
+ map->rec_map->records_count = records_count;
+
+ mail_index_map_copy_hdr(map, hdr);
+ i_assert(map->hdr_copy_buf->used == map->hdr.header_size);
+ return 1;
+}
+
+static int mail_index_read_map(struct mail_index_map *map, uoff_t file_size)
+{
+ struct mail_index *index = map->index;
+ struct stat st;
+ unsigned int i;
+ int ret;
+ bool try_retry, retry;
+
+ for (i = 0;; i++) {
+ try_retry = i < MAIL_INDEX_ESTALE_RETRY_COUNT;
+ if (file_size == UOFF_T_MAX) {
+ /* fstat() below failed */
+ ret = 0;
+ retry = try_retry;
+ } else {
+ ret = mail_index_try_read_map(map, file_size,
+ &retry, try_retry);
+ }
+ if (ret != 0 || !retry)
+ break;
+
+ /* ESTALE - reopen index file */
+ mail_index_close_file(index);
+
+ ret = mail_index_try_open_only(index);
+ if (ret <= 0) {
+ if (ret == 0) {
+ /* the file was lost */
+ errno = ENOENT;
+ mail_index_set_syscall_error(index, "open()");
+ }
+ return -1;
+ }
+
+ if (fstat(index->fd, &st) == 0)
+ file_size = st.st_size;
+ else {
+ if (!ESTALE_FSTAT(errno)) {
+ mail_index_set_syscall_error(index, "fstat()");
+ return -1;
+ }
+ file_size = UOFF_T_MAX;
+ }
+ }
+ return ret;
+}
+
+/* returns -1 = error, 0 = index files are unusable,
+ 1 = index files are usable or at least repairable */
+static int
+mail_index_map_latest_file(struct mail_index *index, const char **reason_r)
+{
+ struct mail_index_map *old_map, *new_map;
+ struct stat st;
+ uoff_t file_size;
+ bool use_mmap, reopened, unusable = FALSE;
+ const char *error;
+ int ret, try;
+
+ *reason_r = NULL;
+
+ index->reopen_main_index = FALSE;
+ ret = mail_index_reopen_if_changed(index, &reopened, reason_r);
+ if (ret <= 0) {
+ if (ret < 0)
+ return -1;
+
+ /* the index file is lost/broken. let's hope that we can
+ build it from the transaction log. */
+ return 1;
+ }
+ i_assert(index->fd != -1);
+
+ if ((index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0)
+ nfs_flush_attr_cache_fd_locked(index->filepath, index->fd);
+
+ if (fstat(index->fd, &st) == 0)
+ file_size = st.st_size;
+ else {
+ if (!ESTALE_FSTAT(errno)) {
+ mail_index_set_syscall_error(index, "fstat()");
+ return -1;
+ }
+ file_size = UOFF_T_MAX;
+ }
+
+ /* mmaping seems to be slower than just reading the file, so even if
+ mmap isn't disabled don't use it unless the file is large enough */
+ use_mmap = (index->flags & MAIL_INDEX_OPEN_FLAG_MMAP_DISABLE) == 0 &&
+ file_size != UOFF_T_MAX && file_size > MAIL_INDEX_MMAP_MIN_SIZE;
+
+ new_map = mail_index_map_alloc(index);
+ if (use_mmap) {
+ ret = mail_index_mmap(new_map, file_size);
+ } else {
+ ret = mail_index_read_map(new_map, file_size);
+ }
+ if (ret == 0) {
+ /* the index files are unusable */
+ unusable = TRUE;
+ }
+
+ for (try = 0; ret > 0; try++) {
+ /* make sure the header is ok before using this mapping */
+ ret = mail_index_map_check_header(new_map, &error);
+ if (ret < 0) {
+ mail_index_set_error(index,
+ "Corrupted index file %s: %s",
+ index->filepath, error);
+ }
+ if (ret > 0) T_BEGIN {
+ if (mail_index_map_parse_extensions(new_map) < 0)
+ ret = 0;
+ else if (mail_index_map_parse_keywords(new_map) < 0)
+ ret = 0;
+ } T_END;
+ if (ret != 0 || try == 2) {
+ if (ret < 0) {
+ *reason_r = "Corrupted index file";
+ unusable = TRUE;
+ ret = 0;
+ }
+ break;
+ }
+
+ /* fsck and try again */
+ old_map = index->map;
+ index->map = new_map;
+ if (mail_index_fsck(index) < 0) {
+ ret = -1;
+ break;
+ }
+
+ /* fsck replaced the map */
+ new_map = index->map;
+ index->map = old_map;
+ }
+ if (ret <= 0) {
+ mail_index_unmap(&new_map);
+ return ret < 0 ? -1 : (unusable ? 0 : 1);
+ }
+ i_assert(new_map->rec_map->records != NULL);
+
+ index->main_index_hdr_log_file_seq = new_map->hdr.log_file_seq;
+ index->main_index_hdr_log_file_tail_offset =
+ new_map->hdr.log_file_tail_offset;
+ mail_index_modseq_hdr_snapshot_update(new_map);
+
+ mail_index_unmap(&index->map);
+ index->map = new_map;
+ *reason_r = t_strdup_printf("Index mapped (file_seq=%u)",
+ index->map->hdr.log_file_seq);
+ return 1;
+}
+
+static int
+mail_index_map_latest_sync(struct mail_index *index,
+ enum mail_index_sync_handler_type type,
+ const char *reason)
+{
+ const char *map_reason, *reopen_reason;
+ bool reopened;
+ int ret;
+
+ if (index->log->head == NULL || index->indexid == 0) {
+ /* we're creating the index file, we don't have any
+ logs yet */
+ return 1;
+ }
+
+ /* and update the map with the latest changes from transaction log */
+ ret = mail_index_sync_map(&index->map, type, &map_reason);
+ if (ret != 0)
+ return ret;
+
+ if (index->fd == -1) {
+ reopen_reason = "Index not open";
+ reopened = FALSE;
+ } else {
+ /* Check if the index was recreated while we were opening it.
+ This is unlikely, but could happen if
+ mail_index_log_optimization_settings.max_size is tiny. */
+ ret = mail_index_reopen_if_changed(index, &reopened, &reopen_reason);
+ if (ret < 0)
+ return -1;
+ if (ret == 0) {
+ /* Index was unexpectedly lost. The mailbox was
+ probably deleted while we were opening it. Handle
+ this as an error. */
+ index->index_deleted = TRUE;
+ return -1;
+ }
+ }
+ if (!reopened) {
+ /* fsck the index and try to reopen */
+ mail_index_set_error(index, "Index %s: %s: %s - fscking "
+ "(reopen_reason: %s)",
+ index->filepath, reason, map_reason,
+ reopen_reason);
+ if (!index->readonly) {
+ if (mail_index_fsck(index) < 0)
+ return -1;
+ }
+ }
+
+ ret = mail_index_map_latest_file(index, &reason);
+ if (ret > 0 && index->indexid != 0) {
+ ret = mail_index_sync_map(&index->map, type, &map_reason);
+ if (ret == 0) {
+ mail_index_set_error(index, "Index %s: %s: %s",
+ index->filepath, reason, map_reason);
+ }
+ }
+ return ret;
+}
+
+int mail_index_map(struct mail_index *index,
+ enum mail_index_sync_handler_type type)
+{
+ const char *reason;
+ int ret;
+
+ i_assert(!index->mapping);
+
+ index->mapping = TRUE;
+
+ if (index->map == NULL)
+ index->map = mail_index_map_alloc(index);
+
+ /* first try updating the existing mapping from transaction log. */
+ if (!index->initial_mapped || index->reopen_main_index) {
+ /* index is being created/opened for the first time */
+ ret = 0;
+ } else if (mail_index_sync_map_want_index_reopen(index->map, type)) {
+ /* it's likely more efficient to reopen the index file than
+ sync from the transaction log. */
+ ret = 0;
+ } else {
+ /* sync the map from the transaction log. */
+ ret = mail_index_sync_map(&index->map, type, &reason);
+ if (ret == 0) {
+ e_debug(index->event,
+ "Couldn't sync map from transaction log: %s - "
+ "reopening index instead",
+ reason);
+ }
+ }
+
+ if (ret == 0) {
+ /* try to open and read the latest index. if it fails, we'll
+ fallback to updating the existing mapping from transaction
+ logs (which we'll also do even if the reopening succeeds).
+ if index files are unusable (e.g. major version change)
+ don't even try to use the transaction log. */
+ ret = mail_index_map_latest_file(index, &reason);
+ if (ret > 0) {
+ ret = mail_index_map_latest_sync(index, type, reason);
+ } else if (ret == 0 && !index->readonly) {
+ /* make sure we don't try to open the file again */
+ if (unlink(index->filepath) < 0 && errno != ENOENT)
+ mail_index_set_syscall_error(index, "unlink()");
+ }
+ }
+
+ if (ret >= 0)
+ index->initial_mapped = TRUE;
+ index->mapping = FALSE;
+ return ret;
+}
diff --git a/src/lib-index/mail-index-map.c b/src/lib-index/mail-index-map.c
new file mode 100644
index 0000000..6ac2b93
--- /dev/null
+++ b/src/lib-index/mail-index-map.c
@@ -0,0 +1,595 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "str-sanitize.h"
+#include "mmap-util.h"
+#include "mail-index-private.h"
+#include "mail-index-modseq.h"
+
+void mail_index_map_init_extbufs(struct mail_index_map *map,
+ unsigned int initial_count)
+{
+#define EXTENSION_NAME_APPROX_LEN 20
+#define EXT_GLOBAL_ALLOC_SIZE \
+ ((sizeof(map->extensions) + sizeof(buffer_t)) * 2)
+#define EXT_PER_ALLOC_SIZE \
+ (EXTENSION_NAME_APPROX_LEN + \
+ sizeof(struct mail_index_ext) + sizeof(uint32_t))
+ size_t size;
+
+ if (map->extension_pool == NULL) {
+ size = EXT_GLOBAL_ALLOC_SIZE +
+ initial_count * EXT_PER_ALLOC_SIZE;
+ map->extension_pool =
+ pool_alloconly_create(MEMPOOL_GROWING"map extensions",
+ nearest_power(size));
+ } else {
+ p_clear(map->extension_pool);
+
+ /* try to use the existing pool's size for initial_count so
+ we don't grow it needlessly */
+ size = p_get_max_easy_alloc_size(map->extension_pool);
+ if (size > EXT_GLOBAL_ALLOC_SIZE + EXT_PER_ALLOC_SIZE) {
+ initial_count = (size - EXT_GLOBAL_ALLOC_SIZE) /
+ EXT_PER_ALLOC_SIZE;
+ }
+ }
+
+ p_array_init(&map->extensions, map->extension_pool, initial_count);
+ p_array_init(&map->ext_id_map, map->extension_pool, initial_count);
+}
+
+bool mail_index_map_lookup_ext(struct mail_index_map *map, const char *name,
+ uint32_t *idx_r)
+{
+ const struct mail_index_ext *ext;
+
+ if (!array_is_created(&map->extensions))
+ return FALSE;
+
+ array_foreach(&map->extensions, ext) {
+ if (strcmp(ext->name, name) == 0) {
+ *idx_r = array_foreach_idx(&map->extensions, ext);
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+unsigned int mail_index_map_ext_hdr_offset(unsigned int name_len)
+{
+ size_t size = sizeof(struct mail_index_ext_header) + name_len;
+ return MAIL_INDEX_HEADER_SIZE_ALIGN(size);
+}
+
+uint32_t
+mail_index_map_register_ext(struct mail_index_map *map,
+ const char *name, uint32_t ext_offset,
+ const struct mail_index_ext_header *ext_hdr)
+{
+ struct mail_index_ext *ext;
+ uint32_t idx, ext_map_idx, empty_idx = (uint32_t)-1;
+
+ i_assert(mail_index_ext_name_is_valid(name));
+
+ if (!array_is_created(&map->extensions)) {
+ mail_index_map_init_extbufs(map, 5);
+ idx = 0;
+ } else {
+ idx = array_count(&map->extensions);
+ }
+ i_assert(!mail_index_map_lookup_ext(map, name, &ext_map_idx));
+
+ ext = array_append_space(&map->extensions);
+ ext->name = p_strdup(map->extension_pool, name);
+ ext->ext_offset = ext_offset;
+ ext->hdr_offset = ext_offset == (uint32_t)-1 ? (uint32_t)-1 :
+ ext_offset + mail_index_map_ext_hdr_offset(strlen(name));
+ ext->hdr_size = ext_hdr->hdr_size;
+ ext->record_offset = ext_hdr->record_offset;
+ ext->record_size = ext_hdr->record_size;
+ ext->record_align = ext_hdr->record_align;
+ ext->reset_id = ext_hdr->reset_id;
+
+ ext->index_idx = mail_index_ext_register(map->index, name,
+ ext_hdr->hdr_size,
+ ext_hdr->record_size,
+ ext_hdr->record_align);
+
+ /* Update index ext_id -> map ext_id mapping. Fill non-used
+ ext_ids with (uint32_t)-1 */
+ while (array_count(&map->ext_id_map) < ext->index_idx)
+ array_push_back(&map->ext_id_map, &empty_idx);
+ array_idx_set(&map->ext_id_map, ext->index_idx, &idx);
+ return idx;
+}
+
+int mail_index_map_ext_get_next(struct mail_index_map *map,
+ unsigned int *offset_p,
+ const struct mail_index_ext_header **ext_hdr_r,
+ const char **name_r)
+{
+ const struct mail_index_ext_header *ext_hdr;
+ unsigned int offset, name_offset;
+
+ offset = *offset_p;
+ *name_r = "";
+
+ /* Extension header contains:
+ - struct mail_index_ext_header
+ - name (not 0-terminated)
+ - 64bit alignment padding
+ - extension header contents
+ - 64bit alignment padding
+ */
+ name_offset = offset + sizeof(*ext_hdr);
+ ext_hdr = MAIL_INDEX_MAP_HDR_OFFSET(map, offset);
+ if (offset + sizeof(*ext_hdr) >= map->hdr.header_size)
+ return -1;
+
+ offset += mail_index_map_ext_hdr_offset(ext_hdr->name_size);
+ if (offset > map->hdr.header_size)
+ return -1;
+
+ *name_r = t_strndup(MAIL_INDEX_MAP_HDR_OFFSET(map, name_offset),
+ ext_hdr->name_size);
+ if (strcmp(*name_r, str_sanitize(*name_r, SIZE_MAX)) != 0) {
+ /* we allow only plain ASCII names, so this extension
+ is most likely broken */
+ *name_r = "";
+ }
+
+ /* finally make sure that the hdr_size is small enough.
+ do this last so that we could return a usable name. */
+ offset += MAIL_INDEX_HEADER_SIZE_ALIGN(ext_hdr->hdr_size);
+ if (offset > map->hdr.header_size)
+ return -1;
+
+ *offset_p = offset;
+ *ext_hdr_r = ext_hdr;
+ return 0;
+}
+
+static int
+mail_index_map_ext_hdr_check_record(const struct mail_index_header *hdr,
+ const struct mail_index_ext_header *ext_hdr,
+ const char **error_r)
+{
+ if (ext_hdr->record_align == 0) {
+ *error_r = "Record field alignment is zero";
+ return -1;
+ }
+
+ /* until we get 128 bit CPUs having a larger alignment is pointless */
+ if (ext_hdr->record_align > sizeof(uint64_t)) {
+ *error_r = "Record alignment is too large";
+ return -1;
+ }
+ /* a large record size is most likely a bug somewhere. the maximum
+ record size is limited to 64k anyway, so try to fail earlier. */
+ if (ext_hdr->record_size >= 32768) {
+ *error_r = "Record size is too large";
+ return -1;
+ }
+
+ if (ext_hdr->record_offset == 0) {
+ /* if we get here from extension introduction, record_offset=0
+ and hdr->record_size hasn't been updated yet */
+ return 0;
+ }
+
+ if (ext_hdr->record_offset + ext_hdr->record_size > hdr->record_size) {
+ *error_r = t_strdup_printf("Record field points "
+ "outside record size (%u+%u > %u)",
+ ext_hdr->record_offset,
+ ext_hdr->record_size,
+ hdr->record_size);
+ return -1;
+ }
+
+ if ((ext_hdr->record_offset % ext_hdr->record_align) != 0) {
+ *error_r = t_strdup_printf("Record field alignment %u "
+ "not used", ext_hdr->record_align);
+ return -1;
+ }
+ if ((hdr->record_size % ext_hdr->record_align) != 0) {
+ *error_r = t_strdup_printf("Record size not aligned by %u "
+ "as required by extension",
+ ext_hdr->record_align);
+ return -1;
+ }
+ return 0;
+}
+
+int mail_index_map_ext_hdr_check(const struct mail_index_header *hdr,
+ const struct mail_index_ext_header *ext_hdr,
+ const char *name, const char **error_r)
+{
+ if (ext_hdr->record_size == 0 && ext_hdr->hdr_size == 0) {
+ *error_r = "Invalid field values";
+ return -1;
+ }
+ if (!mail_index_ext_name_is_valid(name)) {
+ *error_r = "Invalid name";
+ return -1;
+ }
+
+ if (ext_hdr->record_size != 0) {
+ if (mail_index_map_ext_hdr_check_record(hdr, ext_hdr,
+ error_r) < 0)
+ return -1;
+ }
+ if (ext_hdr->hdr_size > MAIL_INDEX_EXT_HEADER_MAX_SIZE) {
+ *error_r = t_strdup_printf("Headersize too large (%u)",
+ ext_hdr->hdr_size);
+ return -1;
+ }
+ return 0;
+}
+
+static void mail_index_header_init(struct mail_index *index,
+ struct mail_index_header *hdr)
+{
+ i_assert((sizeof(*hdr) % sizeof(uint64_t)) == 0);
+
+ i_zero(hdr);
+
+ hdr->major_version = MAIL_INDEX_MAJOR_VERSION;
+ hdr->minor_version = MAIL_INDEX_MINOR_VERSION;
+ hdr->base_header_size = sizeof(*hdr);
+ hdr->header_size = sizeof(*hdr);
+ hdr->record_size = sizeof(struct mail_index_record);
+
+#ifndef WORDS_BIGENDIAN
+ hdr->compat_flags |= MAIL_INDEX_COMPAT_LITTLE_ENDIAN;
+#endif
+
+ hdr->indexid = index->indexid;
+ hdr->log_file_seq = 1;
+ hdr->next_uid = 1;
+ hdr->first_recent_uid = 1;
+}
+
+struct mail_index_map *mail_index_map_alloc(struct mail_index *index)
+{
+ struct mail_index_map tmp_map;
+
+ i_zero(&tmp_map);
+ mail_index_header_init(index, &tmp_map.hdr);
+ tmp_map.hdr_copy_buf = t_buffer_create(sizeof(tmp_map.hdr));
+ buffer_append(tmp_map.hdr_copy_buf, &tmp_map.hdr, sizeof(tmp_map.hdr));
+ tmp_map.index = index;
+
+ /* a bit kludgy way to do this, but it initializes everything
+ nicely and correctly */
+ return mail_index_map_clone(&tmp_map);
+}
+
+static void mail_index_record_map_free(struct mail_index_map *map,
+ struct mail_index_record_map *rec_map)
+{
+ if (rec_map->buffer != NULL) {
+ i_assert(rec_map->mmap_base == NULL);
+ buffer_free(&rec_map->buffer);
+ } else if (rec_map->mmap_base != NULL) {
+ i_assert(rec_map->buffer == NULL);
+ if (munmap(rec_map->mmap_base, rec_map->mmap_size) < 0)
+ mail_index_set_syscall_error(map->index, "munmap()");
+ rec_map->mmap_base = NULL;
+ }
+ array_free(&rec_map->maps);
+ if (rec_map->modseq != NULL)
+ mail_index_map_modseq_free(&rec_map->modseq);
+ i_free(rec_map);
+}
+
+static void mail_index_record_map_unlink(struct mail_index_map *map)
+{
+ struct mail_index_map *const *maps;
+ unsigned int idx = UINT_MAX;
+
+ array_foreach(&map->rec_map->maps, maps) {
+ if (*maps == map) {
+ idx = array_foreach_idx(&map->rec_map->maps, maps);
+ break;
+ }
+ }
+ i_assert(idx != UINT_MAX);
+
+ array_delete(&map->rec_map->maps, idx, 1);
+ if (array_count(&map->rec_map->maps) == 0) {
+ mail_index_record_map_free(map, map->rec_map);
+ map->rec_map = NULL;
+ }
+}
+
+void mail_index_unmap(struct mail_index_map **_map)
+{
+ struct mail_index_map *map = *_map;
+
+ *_map = NULL;
+ if (--map->refcount > 0)
+ return;
+
+ i_assert(map->refcount == 0);
+ mail_index_record_map_unlink(map);
+
+ pool_unref(&map->extension_pool);
+ if (array_is_created(&map->keyword_idx_map))
+ array_free(&map->keyword_idx_map);
+ buffer_free(&map->hdr_copy_buf);
+ i_free(map);
+}
+
+static void mail_index_map_copy_records(struct mail_index_record_map *dest,
+ const struct mail_index_record_map *src,
+ unsigned int record_size)
+{
+ size_t size;
+
+ size = src->records_count * record_size;
+ /* +1% so we have a bit of space to grow. useful for huge mailboxes. */
+ dest->buffer = buffer_create_dynamic(default_pool,
+ size + I_MAX(size/100, 1024));
+ buffer_append(dest->buffer, src->records, size);
+
+ dest->records = buffer_get_modifiable_data(dest->buffer, NULL);
+ dest->records_count = src->records_count;
+}
+
+static void mail_index_map_copy_header(struct mail_index_map *dest,
+ const struct mail_index_map *src)
+{
+ /* use src->hdr copy directly, because if we got here
+ from syncing it has the latest changes. */
+ if (src != dest)
+ dest->hdr = src->hdr;
+ if (dest->hdr_copy_buf != NULL) {
+ if (src == dest)
+ return;
+
+ buffer_set_used_size(dest->hdr_copy_buf, 0);
+ } else {
+ dest->hdr_copy_buf =
+ buffer_create_dynamic(default_pool,
+ dest->hdr.header_size);
+ }
+ buffer_append(dest->hdr_copy_buf, &dest->hdr,
+ I_MIN(sizeof(dest->hdr), src->hdr.base_header_size));
+ if (src != dest) {
+ buffer_write(dest->hdr_copy_buf, src->hdr.base_header_size,
+ MAIL_INDEX_MAP_HDR_OFFSET(src, src->hdr.base_header_size),
+ src->hdr.header_size - src->hdr.base_header_size);
+ }
+ i_assert(dest->hdr_copy_buf->used == dest->hdr.header_size);
+}
+
+static struct mail_index_record_map *
+mail_index_record_map_alloc(struct mail_index_map *map)
+{
+ struct mail_index_record_map *rec_map;
+
+ rec_map = i_new(struct mail_index_record_map, 1);
+ i_array_init(&rec_map->maps, 4);
+ array_push_back(&rec_map->maps, &map);
+ return rec_map;
+}
+
+struct mail_index_map *mail_index_map_clone(const struct mail_index_map *map)
+{
+ struct mail_index_map *mem_map;
+ struct mail_index_ext *ext;
+ unsigned int count;
+
+ mem_map = i_new(struct mail_index_map, 1);
+ mem_map->index = map->index;
+ mem_map->refcount = 1;
+ if (map->rec_map == NULL) {
+ mem_map->rec_map = mail_index_record_map_alloc(mem_map);
+ mem_map->rec_map->buffer =
+ buffer_create_dynamic(default_pool, 1024);
+ } else {
+ mem_map->rec_map = map->rec_map;
+ array_push_back(&mem_map->rec_map->maps, &mem_map);
+ }
+
+ mail_index_map_copy_header(mem_map, map);
+
+ /* copy extensions */
+ if (array_is_created(&map->ext_id_map)) {
+ count = array_count(&map->ext_id_map);
+ mail_index_map_init_extbufs(mem_map, count + 2);
+
+ array_append_array(&mem_map->extensions, &map->extensions);
+ array_append_array(&mem_map->ext_id_map, &map->ext_id_map);
+
+ /* fix the name pointers to use our own pool */
+ array_foreach_modifiable(&mem_map->extensions, ext) {
+ i_assert(ext->record_offset + ext->record_size <=
+ mem_map->hdr.record_size);
+ ext->name = p_strdup(mem_map->extension_pool,
+ ext->name);
+ }
+ }
+
+ /* copy keyword map */
+ if (array_is_created(&map->keyword_idx_map)) {
+ i_array_init(&mem_map->keyword_idx_map,
+ array_count(&map->keyword_idx_map) + 4);
+ array_append_array(&mem_map->keyword_idx_map,
+ &map->keyword_idx_map);
+ }
+
+ return mem_map;
+}
+
+void mail_index_record_map_move_to_private(struct mail_index_map *map)
+{
+ struct mail_index_record_map *new_map;
+ const struct mail_index_record *rec;
+
+ if (array_count(&map->rec_map->maps) > 1) {
+ /* Multiple references to the rec_map. Create a clone of the
+ rec_map, which is in memory. */
+ new_map = mail_index_record_map_alloc(map);
+ mail_index_map_copy_records(new_map, map->rec_map,
+ map->hdr.record_size);
+ mail_index_record_map_unlink(map);
+ map->rec_map = new_map;
+ if (map->rec_map->modseq != NULL)
+ new_map->modseq = mail_index_map_modseq_clone(map->rec_map->modseq);
+ } else {
+ new_map = map->rec_map;
+ }
+
+ if (new_map->records_count != map->hdr.messages_count) {
+ /* The rec_map has more messages than what map contains.
+ These messages aren't necessary (and may confuse the caller),
+ so truncate them away. */
+ i_assert(new_map->records_count > map->hdr.messages_count);
+ new_map->records_count = map->hdr.messages_count;
+ if (new_map->records_count == 0)
+ new_map->last_appended_uid = 0;
+ else {
+ rec = MAIL_INDEX_REC_AT_SEQ(map, new_map->records_count);
+ new_map->last_appended_uid = rec->uid;
+ }
+ buffer_set_used_size(new_map->buffer, new_map->records_count *
+ map->hdr.record_size);
+ }
+}
+
+void mail_index_map_move_to_memory(struct mail_index_map *map)
+{
+ struct mail_index_record_map *new_map;
+
+ if (map->rec_map->mmap_base == NULL) {
+ /* rec_map is already in memory */
+ return;
+ }
+
+ /* Move the rec_map contents to memory. If this is the only map that
+ refers to the rec_map, it can be directly replaced and the old
+ content munmap()ed. Otherwise, create a new rec_map for this map. */
+ if (array_count(&map->rec_map->maps) == 1)
+ new_map = map->rec_map;
+ else {
+ new_map = mail_index_record_map_alloc(map);
+ new_map->modseq = map->rec_map->modseq == NULL ? NULL :
+ mail_index_map_modseq_clone(map->rec_map->modseq);
+ }
+
+ mail_index_map_copy_records(new_map, map->rec_map,
+ map->hdr.record_size);
+ mail_index_map_copy_header(map, map);
+
+ if (new_map != map->rec_map) {
+ mail_index_record_map_unlink(map);
+ map->rec_map = new_map;
+ } else {
+ if (munmap(new_map->mmap_base, new_map->mmap_size) < 0)
+ mail_index_set_syscall_error(map->index, "munmap()");
+ new_map->mmap_base = NULL;
+ }
+}
+
+bool mail_index_map_get_ext_idx(struct mail_index_map *map,
+ uint32_t ext_id, uint32_t *idx_r)
+{
+ const uint32_t *id;
+
+ if (!array_is_created(&map->ext_id_map) ||
+ ext_id >= array_count(&map->ext_id_map))
+ return FALSE;
+
+ id = array_idx(&map->ext_id_map, ext_id);
+ *idx_r = *id;
+ return *idx_r != (uint32_t)-1;
+}
+
+static uint32_t mail_index_bsearch_uid(struct mail_index_map *map,
+ uint32_t uid, uint32_t left_idx,
+ int nearest_side)
+{
+ const struct mail_index_record *rec_base, *rec;
+ uint32_t idx, right_idx, record_size;
+
+ i_assert(map->hdr.messages_count <= map->rec_map->records_count);
+
+ rec_base = map->rec_map->records;
+ record_size = map->hdr.record_size;
+
+ idx = left_idx;
+ right_idx = I_MIN(map->hdr.messages_count, uid);
+
+ i_assert(right_idx < INT_MAX);
+ while (left_idx < right_idx) {
+ idx = (left_idx + right_idx) / 2;
+
+ rec = CONST_PTR_OFFSET(rec_base, idx * record_size);
+ if (rec->uid < uid)
+ left_idx = idx+1;
+ else if (rec->uid > uid)
+ right_idx = idx;
+ else
+ break;
+ }
+ i_assert(idx < map->hdr.messages_count);
+
+ rec = CONST_PTR_OFFSET(rec_base, idx * record_size);
+ if (rec->uid != uid) {
+ if (nearest_side > 0) {
+ /* we want uid or larger */
+ return rec->uid > uid ? idx+1 :
+ (idx == map->hdr.messages_count-1 ? 0 : idx+2);
+ } else {
+ /* we want uid or smaller */
+ return rec->uid < uid ? idx + 1 : idx;
+ }
+ }
+
+ return idx+1;
+}
+
+void mail_index_map_lookup_seq_range(struct mail_index_map *map,
+ uint32_t first_uid, uint32_t last_uid,
+ uint32_t *first_seq_r,
+ uint32_t *last_seq_r)
+{
+ i_assert(first_uid > 0);
+ i_assert(first_uid <= last_uid);
+
+ if (map->hdr.messages_count == 0) {
+ *first_seq_r = *last_seq_r = 0;
+ return;
+ }
+
+ *first_seq_r = mail_index_bsearch_uid(map, first_uid, 0, 1);
+ if (*first_seq_r == 0 ||
+ MAIL_INDEX_REC_AT_SEQ(map, *first_seq_r)->uid > last_uid) {
+ *first_seq_r = *last_seq_r = 0;
+ return;
+ }
+
+ if (last_uid >= map->hdr.next_uid-1) {
+ /* we want the last message */
+ last_uid = map->hdr.next_uid-1;
+ if (first_uid > last_uid) {
+ *first_seq_r = *last_seq_r = 0;
+ return;
+ }
+
+ *last_seq_r = map->hdr.messages_count;
+ return;
+ }
+
+ if (first_uid == last_uid)
+ *last_seq_r = *first_seq_r;
+ else {
+ /* optimization - binary lookup only from right side: */
+ *last_seq_r = mail_index_bsearch_uid(map, last_uid,
+ *first_seq_r - 1, -1);
+ }
+ i_assert(*last_seq_r >= *first_seq_r);
+}
diff --git a/src/lib-index/mail-index-modseq.c b/src/lib-index/mail-index-modseq.c
new file mode 100644
index 0000000..4285983
--- /dev/null
+++ b/src/lib-index/mail-index-modseq.c
@@ -0,0 +1,733 @@
+/* Copyright (c) 2008-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "mail-transaction-log-private.h"
+#include "mail-index-private.h"
+#include "mail-index-sync-private.h"
+#include "mail-index-modseq.h"
+
+ARRAY_DEFINE_TYPE(modseqs, uint64_t);
+
+enum modseq_metadata_idx {
+ /* must be in the same order as enum mail_flags */
+ METADATA_MODSEQ_IDX_ANSWERED = 0,
+ METADATA_MODSEQ_IDX_FLAGGED,
+ METADATA_MODSEQ_IDX_DELETED,
+ METADATA_MODSEQ_IDX_SEEN,
+ METADATA_MODSEQ_IDX_DRAFT,
+
+ METADATA_MODSEQ_IDX_KEYWORD_START
+};
+
+struct metadata_modseqs {
+ ARRAY_TYPE(modseqs) modseqs;
+};
+
+struct mail_index_map_modseq {
+ /* indexes use enum modseq_metadata_idx */
+ ARRAY(struct metadata_modseqs) metadata_modseqs;
+};
+
+struct mail_index_modseq_sync {
+ struct mail_index_sync_map_ctx *sync_map_ctx;
+ struct mail_index_view *view;
+ struct mail_transaction_log_view *log_view;
+ struct mail_index_map_modseq *mmap;
+};
+
+void mail_index_modseq_init(struct mail_index *index)
+{
+ index->modseq_ext_id =
+ mail_index_ext_register(index, MAIL_INDEX_MODSEQ_EXT_NAME,
+ sizeof(struct mail_index_modseq_header),
+ sizeof(uint64_t), sizeof(uint64_t));
+}
+
+static uint64_t mail_index_modseq_get_head(struct mail_index *index)
+{
+ return index->log->head == NULL ? 1 :
+ I_MAX(index->log->head->sync_highest_modseq, 1);
+}
+
+void mail_index_modseq_enable(struct mail_index *index)
+{
+ struct mail_index_transaction *trans;
+ struct mail_index_view *view;
+ struct mail_index_modseq_header hdr;
+ uint32_t ext_map_idx;
+
+ if (index->modseqs_enabled)
+ return;
+
+ if (!mail_index_map_get_ext_idx(index->map, index->modseq_ext_id,
+ &ext_map_idx)) {
+ /* modseqs not enabled to the index yet, add them. */
+ view = mail_index_view_open(index);
+ trans = mail_index_transaction_begin(view, 0);
+
+ i_zero(&hdr);
+ hdr.highest_modseq = mail_index_modseq_get_head(index);
+ mail_index_update_header_ext(trans, index->modseq_ext_id,
+ 0, &hdr, sizeof(hdr));
+
+ /* commit also refreshes the index, which syncs the modseqs */
+ (void)mail_index_transaction_commit(&trans);
+ mail_index_view_close(&view);
+
+ /* get the modseq extension to index map */
+ if (!mail_index_map_get_ext_idx(index->map,
+ index->modseq_ext_id,
+ &ext_map_idx)) {
+ /* didn't work for some reason */
+ return;
+ }
+ }
+ index->modseqs_enabled = TRUE;
+}
+
+bool mail_index_have_modseq_tracking(struct mail_index *index)
+{
+ return mail_index_map_get_modseq_header(index->map) != NULL;
+}
+
+void mail_index_modseq_hdr_snapshot_update(struct mail_index_map *map)
+{
+ const struct mail_index_modseq_header *hdr =
+ mail_index_map_get_modseq_header(map);
+ if (hdr != NULL)
+ map->modseq_hdr_snapshot = *hdr;
+ else
+ i_zero(&map->modseq_hdr_snapshot);
+}
+
+const struct mail_index_modseq_header *
+mail_index_map_get_modseq_header(struct mail_index_map *map)
+{
+ const struct mail_index_ext *ext;
+ uint32_t idx;
+
+ if (!mail_index_map_get_ext_idx(map, map->index->modseq_ext_id, &idx))
+ return NULL;
+
+ ext = array_idx(&map->extensions, idx);
+ if (ext->hdr_size != sizeof(struct mail_index_modseq_header))
+ return NULL;
+
+ return MAIL_INDEX_MAP_HDR_OFFSET(map, ext->hdr_offset);
+}
+
+uint64_t mail_index_map_modseq_get_highest(struct mail_index_map *map)
+{
+ const struct mail_index_modseq_header *modseq_hdr;
+
+ modseq_hdr = mail_index_map_get_modseq_header(map);
+ if (modseq_hdr != NULL && modseq_hdr->highest_modseq != 0)
+ return modseq_hdr->highest_modseq;
+ else {
+ /* fallback to returning the log head. if modseqs aren't
+ enabled, we return 0. */
+ return map->index->log->head == NULL ? 0 :
+ map->index->log->head->sync_highest_modseq;
+ }
+}
+
+uint64_t mail_index_modseq_get_highest(struct mail_index_view *view)
+{
+ return mail_index_map_modseq_get_highest(view->map);
+}
+
+static struct mail_index_map_modseq *
+mail_index_map_modseq(struct mail_index_view *view)
+{
+ struct mail_index_map_modseq *mmap = view->map->rec_map->modseq;
+ uint32_t ext_map_idx;
+
+ if (mmap != NULL)
+ return mmap;
+
+ /* don't start tracking until we've seen modseq extension intro */
+ if (!mail_index_map_get_ext_idx(view->map, view->index->modseq_ext_id,
+ &ext_map_idx))
+ return NULL;
+
+ mmap = i_new(struct mail_index_map_modseq, 1);
+ i_array_init(&mmap->metadata_modseqs,
+ METADATA_MODSEQ_IDX_KEYWORD_START +
+ array_count(&view->index->keywords));
+ view->map->rec_map->modseq = mmap;
+ return mmap;
+}
+
+uint64_t mail_index_modseq_lookup(struct mail_index_view *view, uint32_t seq)
+{
+ struct mail_index_map_modseq *mmap = mail_index_map_modseq(view);
+ struct mail_index_map *map;
+ const struct mail_index_ext *ext;
+ const struct mail_index_record *rec;
+ const uint64_t *modseqp;
+ uint32_t ext_map_idx;
+
+ if (mmap == NULL)
+ return mail_index_modseq_get_head(view->index);
+
+ rec = mail_index_lookup_full(view, seq, &map, NULL);
+ if (!mail_index_map_get_ext_idx(map, view->index->modseq_ext_id,
+ &ext_map_idx)) {
+ /* not enabled yet */
+ return mail_index_modseq_get_head(view->index);
+ }
+
+ ext = array_idx(&map->extensions, ext_map_idx);
+ modseqp = CONST_PTR_OFFSET(rec, ext->record_offset);
+ if (*modseqp == 0) {
+ /* If we're here because we just enabled modseqs, we'll return
+ the same modseq (initial highestmodseq) for all messages.
+ The next sync will change these zeros to initial
+ highestmodseq or higher.
+
+ If we're here because a message got appended but modseq
+ wasn't set (older Dovecot?), we'll again use the current
+ highest modseq. This isn't exactly correct, but it gets
+ fixed after the next sync and this situation shouldn't
+ normally happen anyway. */
+ return mail_index_modseq_get_highest(view);
+ }
+ return *modseqp;
+}
+
+int mail_index_modseq_set(struct mail_index_view *view,
+ uint32_t seq, uint64_t min_modseq)
+{
+ struct mail_index_map_modseq *mmap = mail_index_map_modseq(view);
+ const struct mail_index_ext *ext;
+ struct mail_index_record *rec;
+ uint64_t *modseqp;
+ uint32_t ext_map_idx;
+
+ if (mmap == NULL)
+ return -1;
+
+ rec = MAIL_INDEX_REC_AT_SEQ(view->map, seq);
+ if (!mail_index_map_get_ext_idx(view->map, view->index->modseq_ext_id,
+ &ext_map_idx))
+ return -1;
+
+ ext = array_idx(&view->map->extensions, ext_map_idx);
+ modseqp = PTR_OFFSET(rec, ext->record_offset);
+ if (*modseqp > min_modseq)
+ return 0;
+ else {
+ *modseqp = min_modseq;
+ return 1;
+ }
+}
+
+static uint64_t
+modseq_idx_lookup(struct mail_index_map_modseq *mmap,
+ unsigned int idx, uint32_t seq)
+{
+ const struct metadata_modseqs *metadata;
+ const uint64_t *modseqs;
+ unsigned int count;
+
+ metadata = array_get(&mmap->metadata_modseqs, &count);
+ if (idx >= count || !array_is_created(&metadata[idx].modseqs))
+ return 0;
+
+ modseqs = array_get(&metadata[idx].modseqs, &count);
+ return seq > count ? 0 : modseqs[seq-1];
+}
+
+uint64_t mail_index_modseq_lookup_flags(struct mail_index_view *view,
+ enum mail_flags flags_mask,
+ uint32_t seq)
+{
+ struct mail_index_map_modseq *mmap = mail_index_map_modseq(view);
+ unsigned int i;
+ uint64_t modseq, highest_modseq = 0;
+
+ if (mmap != NULL) {
+ /* first try to find a specific match */
+ for (i = 0; i < METADATA_MODSEQ_IDX_KEYWORD_START; i++) {
+ if ((flags_mask & (1 << i)) != 0) {
+ modseq = modseq_idx_lookup(mmap, i, seq);
+ if (highest_modseq < modseq)
+ highest_modseq = modseq;
+ }
+ }
+ }
+
+ if (highest_modseq == 0) {
+ /* no specific matches, fallback to using the highest */
+ highest_modseq = mail_index_modseq_lookup(view, seq);
+ }
+ return highest_modseq;
+}
+
+uint64_t mail_index_modseq_lookup_keywords(struct mail_index_view *view,
+ const struct mail_keywords *keywords,
+ uint32_t seq)
+{
+ struct mail_index_map_modseq *mmap = mail_index_map_modseq(view);
+ unsigned int i, metadata_idx;
+ uint64_t modseq, highest_modseq = 0;
+
+ if (mmap != NULL) {
+ /* first try to find a specific match */
+ for (i = 0; i < keywords->count; i++) {
+ metadata_idx = METADATA_MODSEQ_IDX_KEYWORD_START +
+ keywords->idx[i];
+
+ modseq = modseq_idx_lookup(mmap, metadata_idx, seq);
+ if (highest_modseq < modseq)
+ highest_modseq = modseq;
+ }
+ }
+
+ if (highest_modseq == 0) {
+ /* no specific matches, fallback to using the highest */
+ highest_modseq = mail_index_modseq_lookup(view, seq);
+ }
+ return highest_modseq;
+}
+
+static void
+mail_index_modseq_update(struct mail_index_modseq_sync *ctx,
+ uint64_t modseq, bool nonzeros,
+ uint32_t seq1, uint32_t seq2)
+{
+ const struct mail_index_ext *ext;
+ struct mail_index_record *rec;
+ uint32_t ext_map_idx;
+ uint64_t *modseqp;
+
+ if (!mail_index_map_get_ext_idx(ctx->view->map,
+ ctx->view->index->modseq_ext_id,
+ &ext_map_idx))
+ return;
+
+ ext = array_idx(&ctx->view->map->extensions, ext_map_idx);
+ for (; seq1 <= seq2; seq1++) {
+ rec = MAIL_INDEX_REC_AT_SEQ(ctx->view->map, seq1);
+ modseqp = PTR_OFFSET(rec, ext->record_offset);
+ if (*modseqp == 0 || (nonzeros && *modseqp < modseq))
+ *modseqp = modseq;
+ }
+}
+
+static bool
+mail_index_modseq_update_to_highest(struct mail_index_modseq_sync *ctx,
+ uint32_t seq1, uint32_t seq2)
+{
+ uint64_t modseq;
+
+ if (ctx->mmap == NULL)
+ return FALSE;
+
+ modseq = mail_transaction_log_view_get_prev_modseq(ctx->log_view);
+ mail_index_modseq_update(ctx, modseq, TRUE, seq1, seq2);
+ return TRUE;
+}
+
+static void
+mail_index_modseq_update_old_rec(struct mail_index_modseq_sync *ctx,
+ const struct mail_transaction_header *thdr,
+ const void *tdata)
+{
+ ARRAY_TYPE(seq_range) uids = ARRAY_INIT;
+ const struct seq_range *rec;
+ buffer_t uid_buf;
+ unsigned int i, count;
+ uint32_t seq1, seq2;
+
+ switch (thdr->type & MAIL_TRANSACTION_TYPE_MASK) {
+ case MAIL_TRANSACTION_APPEND: {
+ const struct mail_index_record *appends = tdata;
+
+ count = thdr->size / sizeof(*appends);
+ for (i = 0; i < count; i++) {
+ if (mail_index_lookup_seq(ctx->view,
+ appends[i].uid, &seq1)) {
+ (void)mail_index_modseq_update_to_highest(ctx, seq1, seq1);
+ }
+ }
+ return;
+ }
+ case MAIL_TRANSACTION_FLAG_UPDATE: {
+ buffer_create_from_const_data(&uid_buf, tdata, thdr->size);
+ array_create_from_buffer(&uids, &uid_buf,
+ sizeof(struct mail_transaction_flag_update));
+ break;
+ }
+ case MAIL_TRANSACTION_KEYWORD_UPDATE: {
+ const struct mail_transaction_keyword_update *rec = tdata;
+ unsigned int seqset_offset;
+
+ seqset_offset = sizeof(*rec) + rec->name_size;
+ if ((seqset_offset % 4) != 0)
+ seqset_offset += 4 - (seqset_offset % 4);
+
+ buffer_create_from_const_data(&uid_buf,
+ CONST_PTR_OFFSET(tdata, seqset_offset),
+ thdr->size - seqset_offset);
+ array_create_from_buffer(&uids, &uid_buf, sizeof(uint32_t)*2);
+ break;
+ }
+ case MAIL_TRANSACTION_KEYWORD_RESET:
+ buffer_create_from_const_data(&uid_buf, tdata, thdr->size);
+ array_create_from_buffer(&uids, &uid_buf,
+ sizeof(struct mail_transaction_keyword_reset));
+ break;
+ case MAIL_TRANSACTION_ATTRIBUTE_UPDATE:
+ break;
+ default:
+ return;
+ }
+
+ /* update modseqs */
+ count = array_is_created(&uids) ? array_count(&uids) : 0;
+ for (i = 0; i < count; i++) {
+ rec = array_idx(&uids, i);
+ if (mail_index_lookup_seq_range(ctx->view, rec->seq1, rec->seq2,
+ &seq1, &seq2))
+ (void)mail_index_modseq_update_to_highest(ctx, seq1, seq2);
+ }
+}
+
+static void mail_index_modseq_sync_init(struct mail_index_modseq_sync *ctx)
+{
+ struct mail_index_map *map = ctx->view->map;
+ const struct mail_index_ext *ext;
+ const struct mail_index_modseq_header *hdr;
+ const struct mail_transaction_header *thdr;
+ const void *tdata;
+ const char *reason;
+ uint32_t ext_map_idx;
+ uint32_t end_seq;
+ uoff_t end_offset;
+ uint64_t cur_modseq;
+ bool reset;
+ int ret;
+
+ if (!mail_index_map_get_ext_idx(map, ctx->view->index->modseq_ext_id,
+ &ext_map_idx))
+ i_unreached();
+ ext = array_idx(&map->extensions, ext_map_idx);
+
+ /* get the current highest_modseq. don't change any modseq below it. */
+ hdr = MAIL_INDEX_MAP_HDR_OFFSET(map, ext->hdr_offset);
+
+ /* Scan logs for updates between ext_hdr.log_* .. view position.
+ There are two reasons why there could be any:
+
+ 1) We just enabled modseqs and we're filling the initial values.
+ 2) A non-modseq-aware Dovecot version added new messages and wrote
+ dovecot.index file. */
+ mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
+ &end_seq, &end_offset);
+ if (end_seq < hdr->log_seq ||
+ (end_seq == hdr->log_seq && end_offset <= hdr->log_offset)) {
+ /* modseqs are up to date */
+ return;
+ }
+
+ ctx->log_view = mail_transaction_log_view_open(ctx->view->index->log);
+ ret = mail_transaction_log_view_set(ctx->log_view,
+ I_MAX(1, hdr->log_seq),
+ hdr->log_offset,
+ end_seq, end_offset, &reset, &reason);
+ if (ret <= 0) {
+ /* missing files / error - try with only the last file */
+ ret = mail_transaction_log_view_set(ctx->log_view, end_seq, 0,
+ end_seq, end_offset,
+ &reset, &reason);
+ /* since we don't know if we skipped some changes, set all
+ modseqs to beginning of the latest file. */
+ cur_modseq = mail_transaction_log_view_get_prev_modseq(
+ ctx->log_view);
+ if (cur_modseq < hdr->highest_modseq) {
+ /* should happen only when setting initial modseqs.
+ we may already have returned highest_modseq as
+ some messages' modseq value. don't shrink it. */
+ cur_modseq = hdr->highest_modseq;
+ }
+ mail_index_modseq_update(ctx, cur_modseq, TRUE, 1,
+ map->hdr.messages_count);
+ } else {
+ /* we have all the logs. replace zero modseqs with the current
+ highest modseq (we may have already returned it for them). */
+ mail_index_modseq_update(ctx, hdr->highest_modseq, FALSE, 1,
+ map->hdr.messages_count);
+ }
+ if (ret > 0) {
+ while (mail_transaction_log_view_next(ctx->log_view,
+ &thdr, &tdata) > 0) {
+ T_BEGIN {
+ mail_index_modseq_update_old_rec(ctx, thdr,
+ tdata);
+ } T_END;
+ }
+ }
+ mail_transaction_log_view_close(&ctx->log_view);
+}
+
+struct mail_index_modseq_sync *
+mail_index_modseq_sync_begin(struct mail_index_sync_map_ctx *sync_map_ctx)
+{
+ struct mail_index_modseq_sync *ctx;
+
+ ctx = i_new(struct mail_index_modseq_sync, 1);
+ ctx->sync_map_ctx = sync_map_ctx;
+ ctx->view = sync_map_ctx->view;
+ ctx->mmap = mail_index_map_modseq(ctx->view);
+ if (ctx->mmap != NULL) {
+ mail_index_modseq_sync_init(ctx);
+ ctx->log_view = ctx->view->log_view;
+ }
+ return ctx;
+}
+
+static void mail_index_modseq_update_header(struct mail_index_modseq_sync *ctx)
+{
+ struct mail_index_view *view = ctx->view;
+ struct mail_index_map *map = view->map;
+ const struct mail_index_ext *ext;
+ const struct mail_index_modseq_header *old_modseq_hdr;
+ struct mail_index_modseq_header new_modseq_hdr;
+ uint32_t ext_map_idx, log_seq;
+ uoff_t log_offset;
+ uint64_t highest_modseq;
+
+ if (!mail_index_map_get_ext_idx(map, view->index->modseq_ext_id,
+ &ext_map_idx))
+ return;
+
+ mail_transaction_log_view_get_prev_pos(view->log_view,
+ &log_seq, &log_offset);
+ highest_modseq = mail_transaction_log_view_get_prev_modseq(view->log_view);
+
+ ext = array_idx(&map->extensions, ext_map_idx);
+ old_modseq_hdr = MAIL_INDEX_MAP_HDR_OFFSET(map, ext->hdr_offset);
+
+ if (old_modseq_hdr->log_seq < log_seq ||
+ (old_modseq_hdr->log_seq == log_seq &&
+ old_modseq_hdr->log_offset < log_offset)) {
+ i_zero(&new_modseq_hdr);
+ new_modseq_hdr.highest_modseq = highest_modseq;
+ new_modseq_hdr.log_seq = log_seq;
+ new_modseq_hdr.log_offset = log_offset;
+
+ buffer_write(map->hdr_copy_buf, ext->hdr_offset,
+ &new_modseq_hdr, sizeof(new_modseq_hdr));
+ i_assert(map->hdr_copy_buf->used == map->hdr.header_size);
+ }
+}
+
+void mail_index_modseq_sync_end(struct mail_index_modseq_sync **_ctx)
+{
+ struct mail_index_modseq_sync *ctx = *_ctx;
+
+ *_ctx = NULL;
+ if (ctx->mmap != NULL) {
+ i_assert(ctx->mmap == ctx->view->map->rec_map->modseq);
+ mail_index_modseq_update_header(ctx);
+ }
+ i_free(ctx);
+}
+
+void mail_index_modseq_sync_map_replaced(struct mail_index_modseq_sync *ctx)
+{
+ ctx->mmap = mail_index_map_modseq(ctx->view);
+}
+
+void mail_index_modseq_hdr_update(struct mail_index_modseq_sync *ctx)
+{
+ if (ctx->mmap == NULL) {
+ ctx->mmap = mail_index_map_modseq(ctx->view);
+ i_assert(ctx->mmap != NULL);
+ mail_index_modseq_sync_init(ctx);
+ ctx->log_view = ctx->view->log_view;
+ }
+}
+
+void mail_index_modseq_append(struct mail_index_modseq_sync *ctx, uint32_t seq)
+{
+ (void)mail_index_modseq_update_to_highest(ctx, seq, seq);
+}
+
+void mail_index_modseq_expunge(struct mail_index_modseq_sync *ctx,
+ uint32_t seq1, uint32_t seq2)
+{
+ struct metadata_modseqs *metadata;
+
+ if (ctx->mmap == NULL)
+ return;
+
+ seq1--;
+ array_foreach_modifiable(&ctx->mmap->metadata_modseqs, metadata) {
+ if (array_is_created(&metadata->modseqs))
+ array_delete(&metadata->modseqs, seq1, seq2-seq1);
+ }
+}
+
+static void
+modseqs_update(ARRAY_TYPE(modseqs) *array, uint32_t seq1, uint32_t seq2,
+ uint64_t value)
+{
+ uint64_t *modseqp;
+
+ for (; seq1 <= seq2; seq1++) {
+ modseqp = array_idx_get_space(array, seq1-1);
+ if (*modseqp < value)
+ *modseqp = value;
+ }
+}
+
+static void
+modseqs_idx_update(struct mail_index_modseq_sync *ctx, unsigned int idx,
+ uint32_t seq1, uint32_t seq2)
+{
+ struct metadata_modseqs *metadata;
+ uint64_t modseq;
+
+ if (!ctx->view->index->modseqs_enabled) {
+ /* we want to keep permanent modseqs updated, but don't bother
+ updating in-memory per-flag updates */
+ return;
+ }
+
+ modseq = mail_transaction_log_view_get_prev_modseq(ctx->log_view);
+ metadata = array_idx_get_space(&ctx->mmap->metadata_modseqs, idx);
+ if (!array_is_created(&metadata->modseqs))
+ i_array_init(&metadata->modseqs, seq2 + 16);
+ modseqs_update(&metadata->modseqs, seq1, seq2, modseq);
+}
+
+void mail_index_modseq_update_flags(struct mail_index_modseq_sync *ctx,
+ enum mail_flags flags_mask,
+ uint32_t seq1, uint32_t seq2)
+{
+ unsigned int i;
+
+ if (!mail_index_modseq_update_to_highest(ctx, seq1, seq2))
+ return;
+
+ for (i = 0; i < METADATA_MODSEQ_IDX_KEYWORD_START; i++) {
+ if ((flags_mask & (1 << i)) != 0)
+ modseqs_idx_update(ctx, i, seq1, seq2);
+ }
+}
+
+void mail_index_modseq_update_keyword(struct mail_index_modseq_sync *ctx,
+ unsigned int keyword_idx,
+ uint32_t seq1, uint32_t seq2)
+{
+ if (!mail_index_modseq_update_to_highest(ctx, seq1, seq2))
+ return;
+
+ modseqs_idx_update(ctx, METADATA_MODSEQ_IDX_KEYWORD_START + keyword_idx,
+ seq1, seq2);
+}
+
+void mail_index_modseq_reset_keywords(struct mail_index_modseq_sync *ctx,
+ uint32_t seq1, uint32_t seq2)
+{
+ unsigned int i, count;
+
+ if (!mail_index_modseq_update_to_highest(ctx, seq1, seq2))
+ return;
+
+ count = array_count(&ctx->mmap->metadata_modseqs);
+ for (i = METADATA_MODSEQ_IDX_KEYWORD_START; i < count; i++)
+ modseqs_idx_update(ctx, i, seq1, seq2);
+}
+
+struct mail_index_map_modseq *
+mail_index_map_modseq_clone(const struct mail_index_map_modseq *mmap)
+{
+ struct mail_index_map_modseq *new_mmap;
+ const struct metadata_modseqs *src_metadata;
+ struct metadata_modseqs *dest_metadata;
+ unsigned int i, count;
+
+ src_metadata = array_get(&mmap->metadata_modseqs, &count);
+
+ new_mmap = i_new(struct mail_index_map_modseq, 1);
+ i_array_init(&new_mmap->metadata_modseqs, count + 16);
+
+ for (i = 0; i < count; i++) {
+ dest_metadata = array_append_space(&new_mmap->metadata_modseqs);
+ if (array_is_created(&src_metadata[i].modseqs)) {
+ i_array_init(&dest_metadata->modseqs,
+ array_count(&src_metadata[i].modseqs));
+ array_append_array(&dest_metadata->modseqs,
+ &src_metadata[i].modseqs);
+ }
+ }
+ return new_mmap;
+}
+
+void mail_index_map_modseq_free(struct mail_index_map_modseq **_mmap)
+{
+ struct mail_index_map_modseq *mmap = *_mmap;
+ struct metadata_modseqs *metadata;
+
+ *_mmap = NULL;
+
+ array_foreach_modifiable(&mmap->metadata_modseqs, metadata) {
+ if (array_is_created(&metadata->modseqs))
+ array_free(&metadata->modseqs);
+ }
+ array_free(&mmap->metadata_modseqs);
+ i_free(mmap);
+}
+
+bool mail_index_modseq_get_next_log_offset(struct mail_index_view *view,
+ uint64_t modseq, uint32_t *log_seq_r,
+ uoff_t *log_offset_r)
+{
+ struct mail_transaction_log *log = view->index->log;
+ struct mail_transaction_log_file *file, *prev_file;
+ const char *reason;
+ int ret;
+
+ if (log->files == NULL) {
+ /* we shouldn't normally get here */
+ return FALSE;
+ }
+ while (modseq < log->files->hdr.initial_modseq) {
+ /* try to find the previous log file if it still exists */
+ ret = mail_transaction_log_find_file(log,
+ log->files->hdr.file_seq - 1, FALSE, &file, &reason);
+ if (ret <= 0)
+ return FALSE;
+ }
+
+ prev_file = NULL;
+ for (file = log->files; file != NULL; file = file->next) {
+ if (modseq < file->hdr.initial_modseq)
+ break;
+ prev_file = file;
+ }
+
+ if (prev_file == NULL) {
+ /* the log file has been deleted already */
+ return FALSE;
+ }
+
+ *log_seq_r = prev_file->hdr.file_seq;
+ if (mail_transaction_log_file_get_modseq_next_offset(prev_file, modseq,
+ log_offset_r) < 0)
+ return FALSE;
+
+ if (*log_seq_r > view->log_file_head_seq ||
+ (*log_seq_r == view->log_file_head_seq &&
+ *log_offset_r > view->log_file_head_offset)) {
+ /* modseq is already beyond our view. move it back so the
+ caller won't be confused. */
+ *log_seq_r = view->log_file_head_seq;
+ *log_offset_r = view->log_file_head_offset;
+ }
+ return TRUE;
+}
diff --git a/src/lib-index/mail-index-modseq.h b/src/lib-index/mail-index-modseq.h
new file mode 100644
index 0000000..f0fe48a
--- /dev/null
+++ b/src/lib-index/mail-index-modseq.h
@@ -0,0 +1,66 @@
+#ifndef MAIL_INDEX_MODSEQ_H
+#define MAIL_INDEX_MODSEQ_H
+
+#include "mail-types.h"
+
+#define MAIL_INDEX_MODSEQ_EXT_NAME "modseq"
+
+struct mail_keywords;
+struct mail_index;
+struct mail_index_map;
+struct mail_index_view;
+struct mail_index_modseq;
+struct mail_index_map_modseq;
+struct mail_index_sync_map_ctx;
+
+void mail_index_modseq_init(struct mail_index *index);
+
+/* Save a copy of the current modseq header to map->modseq_hdr_snapshot. This
+ is expected to be called when reading the dovecot.index header before any
+ changes are applied on top of it from dovecot.index.log. */
+void mail_index_modseq_hdr_snapshot_update(struct mail_index_map *map);
+
+const struct mail_index_modseq_header *
+mail_index_map_get_modseq_header(struct mail_index_map *map);
+uint64_t mail_index_map_modseq_get_highest(struct mail_index_map *map);
+void mail_index_modseq_enable(struct mail_index *index);
+bool mail_index_have_modseq_tracking(struct mail_index *index);
+uint64_t mail_index_modseq_get_highest(struct mail_index_view *view);
+
+uint64_t mail_index_modseq_lookup(struct mail_index_view *view, uint32_t seq);
+uint64_t mail_index_modseq_lookup_flags(struct mail_index_view *view,
+ enum mail_flags flags_mask,
+ uint32_t seq);
+uint64_t mail_index_modseq_lookup_keywords(struct mail_index_view *view,
+ const struct mail_keywords *keywords,
+ uint32_t seq);
+int mail_index_modseq_set(struct mail_index_view *view,
+ uint32_t seq, uint64_t min_modseq);
+
+struct mail_index_modseq_sync *
+mail_index_modseq_sync_begin(struct mail_index_sync_map_ctx *sync_map_ctx);
+void mail_index_modseq_sync_end(struct mail_index_modseq_sync **ctx);
+
+void mail_index_modseq_sync_map_replaced(struct mail_index_modseq_sync *ctx);
+void mail_index_modseq_hdr_update(struct mail_index_modseq_sync *ctx);
+void mail_index_modseq_append(struct mail_index_modseq_sync *ctx, uint32_t seq);
+void mail_index_modseq_expunge(struct mail_index_modseq_sync *ctx,
+ uint32_t seq1, uint32_t seq2);
+void mail_index_modseq_update_flags(struct mail_index_modseq_sync *ctx,
+ enum mail_flags flags_mask,
+ uint32_t seq1, uint32_t seq2);
+void mail_index_modseq_update_keyword(struct mail_index_modseq_sync *ctx,
+ unsigned int keyword_idx,
+ uint32_t seq1, uint32_t seq2);
+void mail_index_modseq_reset_keywords(struct mail_index_modseq_sync *ctx,
+ uint32_t seq1, uint32_t seq2);
+
+struct mail_index_map_modseq *
+mail_index_map_modseq_clone(const struct mail_index_map_modseq *mmap);
+void mail_index_map_modseq_free(struct mail_index_map_modseq **mmap);
+
+bool mail_index_modseq_get_next_log_offset(struct mail_index_view *view,
+ uint64_t modseq, uint32_t *log_seq_r,
+ uoff_t *log_offset_r);
+
+#endif
diff --git a/src/lib-index/mail-index-private.h b/src/lib-index/mail-index-private.h
new file mode 100644
index 0000000..26f7e39
--- /dev/null
+++ b/src/lib-index/mail-index-private.h
@@ -0,0 +1,437 @@
+#ifndef MAIL_INDEX_PRIVATE_H
+#define MAIL_INDEX_PRIVATE_H
+
+#include "file-lock.h"
+#include "mail-index.h"
+#include "mail-index-util.h"
+#include "mail-index-view-private.h"
+#include "mail-index-transaction-private.h"
+
+#include <sys/stat.h>
+
+struct mail_transaction_header;
+struct mail_transaction_log_view;
+struct mail_index_sync_map_ctx;
+
+/* How large index files to mmap() instead of reading to memory. */
+#define MAIL_INDEX_MMAP_MIN_SIZE (1024*64)
+/* How many times to retry opening index files if read/fstat returns ESTALE.
+ This happens with NFS when the file has been deleted (ie. index file was
+ rewritten by another computer than us). */
+#define MAIL_INDEX_ESTALE_RETRY_COUNT NFS_ESTALE_RETRY_COUNT
+/* Large extension header sizes are probably caused by file corruption, so
+ try to catch them by limiting the header size. */
+#define MAIL_INDEX_EXT_HEADER_MAX_SIZE (1024*1024*16-1)
+
+#define MAIL_INDEX_IS_IN_MEMORY(index) \
+ ((index)->dir == NULL)
+
+#define MAIL_INDEX_MAP_IS_IN_MEMORY(map) \
+ ((map)->rec_map->mmap_base == NULL)
+
+#define MAIL_INDEX_MAP_IDX(map, idx) \
+ ((struct mail_index_record *) \
+ PTR_OFFSET((map)->rec_map->records, (idx) * (map)->hdr.record_size))
+#define MAIL_INDEX_REC_AT_SEQ(map, seq) \
+ ((struct mail_index_record *) \
+ PTR_OFFSET((map)->rec_map->records, ((seq)-1) * (map)->hdr.record_size))
+
+#define MAIL_TRANSACTION_FLAG_UPDATE_IS_INTERNAL(u) \
+ ((((u)->add_flags | (u)->remove_flags) & MAIL_INDEX_FLAGS_MASK) == 0 && \
+ (u)->modseq_inc_flag == 0)
+
+#define MAIL_INDEX_EXT_KEYWORDS "keywords"
+#define MAIL_INDEX_EXT_NAME_MAX_LENGTH 64
+
+typedef int mail_index_expunge_handler_t(struct mail_index_sync_map_ctx *ctx,
+ const void *data, void **sync_context);
+
+#define MAIL_INDEX_HEADER_SIZE_ALIGN(size) \
+ (((size) + 7) & ~7U)
+
+/* In-memory copy of struct mail_index_ext_header */
+struct mail_index_ext {
+ const char *name;
+ uint32_t index_idx; /* index ext_id */
+ uint32_t reset_id;
+ uint32_t ext_offset; /* points to beginning of mail_index_ext_header */
+ uint32_t hdr_offset; /* points to mail_index_ext_header.data[] */
+ uint32_t hdr_size; /* size of mail_index_ext_header.data[] */
+ uint16_t record_offset;
+ uint16_t record_size;
+ uint16_t record_align;
+};
+
+struct mail_index_ext_header {
+ /* Size of data[], i.e. the extension size in header */
+ uint32_t hdr_size;
+ /* If reset_id changes, all of the extension record data is
+ invalidated. For example with cache files reset_id must match the
+ cache header's file_seq or the cache offsets aren't valid. */
+ uint32_t reset_id;
+ /* Offset of this extension in struct mail_index_record. */
+ uint16_t record_offset;
+ /* Size of this extension in struct mail_index_record. */
+ uint16_t record_size;
+ /* Required alignment of this extension in struct mail_index_record.
+ It's expected that record_offset is correctly aligned. This is used
+ only when rearranging fields due to adding/removing other
+ extensions. */
+ uint16_t record_align;
+ /* Size of name[], which contains the extension's unique name. */
+ uint16_t name_size;
+ /* unsigned char name[name_size]; */
+ /* Extension header data, if any. This starts from the next 64-bit
+ aligned offset after name[]. */
+ /* unsigned char data[hdr_size]; */
+};
+
+struct mail_index_keyword_header {
+ uint32_t keywords_count;
+ /* struct mail_index_keyword_header_rec[] */
+ /* char name[][] */
+};
+
+struct mail_index_keyword_header_rec {
+ uint32_t unused; /* for backwards compatibility */
+ uint32_t name_offset; /* relative to beginning of name[] */
+};
+
+enum mail_index_sync_handler_type {
+ MAIL_INDEX_SYNC_HANDLER_FILE = 0x01,
+ MAIL_INDEX_SYNC_HANDLER_HEAD = 0x02,
+ MAIL_INDEX_SYNC_HANDLER_VIEW = 0x04
+};
+
+struct mail_index_registered_ext {
+ const char *name;
+ uint32_t index_idx; /* index ext_id */
+ uint32_t hdr_size; /* size of mail_index_ext_header.data[] */
+ uint16_t record_size;
+ uint16_t record_align;
+
+ mail_index_expunge_handler_t *expunge_handler;
+};
+
+struct mail_index_modseq_header {
+ /* highest used modseq */
+ uint64_t highest_modseq;
+ /* last tracked log file position */
+ uint32_t log_seq;
+ uint32_t log_offset;
+};
+
+struct mail_index_record_map {
+ ARRAY(struct mail_index_map *) maps;
+
+ void *mmap_base;
+ size_t mmap_size, mmap_used_size;
+
+ buffer_t *buffer;
+
+ void *records; /* struct mail_index_record[] */
+ unsigned int records_count;
+
+ struct mail_index_map_modseq *modseq;
+ uint32_t last_appended_uid;
+};
+
+#define MAIL_INDEX_MAP_HDR_OFFSET(map, hdr_offset) \
+ CONST_PTR_OFFSET((map)->hdr_copy_buf->data, hdr_offset)
+struct mail_index_map {
+ struct mail_index *index;
+ int refcount;
+
+ /* Copy of the base header for convenience. Note that base_header_size
+ may be smaller or larger than this struct. If it's smaller, the last
+ fields in the struct are filled with zeroes. */
+ struct mail_index_header hdr;
+ /* Copy of the full header. */
+ buffer_t *hdr_copy_buf;
+
+ pool_t extension_pool;
+ ARRAY(struct mail_index_ext) extensions;
+ ARRAY(uint32_t) ext_id_map; /* index -> file */
+
+ ARRAY(unsigned int) keyword_idx_map; /* file -> index */
+
+ struct mail_index_modseq_header modseq_hdr_snapshot;
+
+ struct mail_index_record_map *rec_map;
+};
+
+struct mail_index_module_register {
+ unsigned int id;
+};
+
+union mail_index_module_context {
+ struct mail_index_module_register *reg;
+};
+
+struct mail_index_settings {
+ /* Directory path for .cache file. Set via
+ mail_index_set_cache_dir(). */
+ char *cache_dir;
+
+ /* fsyncing behavior. Set via mail_index_set_fsync_mode(). */
+ enum fsync_mode fsync_mode;
+ enum mail_index_fsync_mask fsync_mask;
+
+ /* Index file permissions. Set via mail_index_set_permissions(). */
+ mode_t mode;
+ gid_t gid;
+ char *gid_origin;
+
+ /* Lock settings. Set via mail_index_set_lock_method(). */
+ enum file_lock_method lock_method;
+ unsigned int max_lock_timeout_secs;
+
+ /* Initial extension added to newly created indexes. Set via
+ mail_index_set_ext_init_data(). */
+ uint32_t ext_hdr_init_id;
+ void *ext_hdr_init_data;
+};
+
+struct mail_index_error {
+ /* Human-readable error text */
+ char *text;
+
+ /* Error happened because there's no disk space, i.e. syscall failed
+ with ENOSPC or EDQUOT. */
+ bool nodiskspace:1;
+};
+
+struct mail_index {
+ /* Directory path for the index, or NULL for in-memory indexes. */
+ char *dir;
+ /* Filename prefix for the index, e.g. "dovecot.index." */
+ char *prefix;
+ struct event *event;
+ enum mail_index_open_flags flags;
+ struct mail_index_settings set;
+ struct mail_index_optimization_settings optimization_set;
+
+ struct mail_cache *cache;
+ struct mail_transaction_log *log;
+
+ char *filepath;
+ int fd;
+ /* Linked list of currently opened views */
+ struct mail_index_view *views;
+ /* Latest map */
+ struct mail_index_map *map;
+
+ /* ID number that permanently identifies the index. This is stored in
+ the index files' headers. If the indexids suddenly changes, it means
+ that the index has been completely recreated and needs to be
+ reopened (e.g. the mailbox was deleted and recreated while it
+ was open). */
+ uint32_t indexid;
+ /* Views initially use this same ID value. This ID is incremented
+ whenever something unexpected happens to the index that prevents
+ syncing existing views. When the view's inconsistency_id doesn't
+ match this one, the view is marked as inconsistent. */
+ unsigned int inconsistency_id;
+ /* How many times this index has been opened with mail_index_open(). */
+ unsigned int open_count;
+
+ /* These contain the log_file_seq and log_file_tail_offset that exists
+ in dovecot.index file's header. These are used to figure out if it's
+ time to rewrite the dovecot.index file. Note that these aren't
+ available in index->map->hdr, because it gets updated when
+ transaction log file is read. */
+ uint32_t main_index_hdr_log_file_seq;
+ uint32_t main_index_hdr_log_file_tail_offset;
+
+ /* log file which last updated index_deleted */
+ uint32_t index_delete_changed_file_seq;
+
+ /* transaction log head seq/offset when we last fscked */
+ uint32_t fsck_log_head_file_seq;
+ uoff_t fsck_log_head_file_offset;
+
+ /* syncing will update this if non-NULL */
+ struct mail_index_transaction_commit_result *sync_commit_result;
+ /* Delayed log2_rotate_time update to mail_index_header. This is set
+ and unset within the same sync. */
+ uint32_t hdr_log2_rotate_time_delayed_update;
+
+ /* Registered extensions */
+ pool_t extension_pool;
+ ARRAY(struct mail_index_registered_ext) extensions;
+
+ /* All keywords that have ever been used in this index. Keywords are
+ only added here, never removed. */
+ pool_t keywords_pool;
+ ARRAY_TYPE(keywords) keywords;
+ HASH_TABLE(char *, void *) keywords_hash; /* name -> unsigned int idx */
+
+ /* Registered extension IDs */
+ uint32_t keywords_ext_id;
+ uint32_t modseq_ext_id;
+
+ /* Module-specific contexts. */
+ ARRAY(union mail_index_module_context *) module_contexts;
+
+ /* Last error returned by mail_index_get_error_message().
+ Cleared by mail_index_reset_error(). */
+ struct mail_index_error last_error;
+ /* Timestamp when mmap() failure was logged the last time. This is used
+ to prevent logging the same error too rapidly. This could happen
+ e.g. if mmap()ing a large cache file that exceeeds process's
+ VSZ limit. */
+ time_t last_mmap_error_time;
+ /* If non-NULL, dovecot.index should be recreated as soon as possible.
+ The reason for why the recreation is wanted is stored as human-
+ readable text. */
+ char *need_recreate;
+
+ /* Mapping has noticed non-external MAIL_TRANSACTION_INDEX_DELETED
+ record, i.e. a request to mark the index deleted. The next sync
+ will finish the deletion by writing external
+ MAIL_TRANSACTION_INDEX_DELETED record. */
+ bool index_delete_requested:1;
+ /* Mapping has noticed external MAIL_TRANSACTION_INDEX_DELETED record,
+ or index was unexpectedly deleted under us. No more changes are
+ allowed to the index, except undeletion. */
+ bool index_deleted:1;
+ /* .log is locked for syncing. This is the main exclusive lock for
+ indexes. */
+ bool log_sync_locked:1;
+ /* Main index or .log couldn't be opened read-write */
+ bool readonly:1;
+ /* mail_index_map() is running */
+ bool mapping:1;
+ /* mail_index_sync_*() is running */
+ bool syncing:1;
+ /* Mapping has read more from .log than it preferred. Use
+ mail_index_base_optimization_settings.rewrite_min_log_bytes the next
+ time when checking if index needs a rewrite. */
+ bool index_min_write:1;
+ /* mail_index_modseq_enable() has been called. Track per-flag
+ modseq numbers in memory (global modseqs are tracked anyway). */
+ bool modseqs_enabled:1;
+ /* mail_index_open() is creating new index files */
+ bool initial_create:1;
+ /* TRUE after mail_index_map() has succeeded */
+ bool initial_mapped:1;
+ /* The next mail_index_map() must reopen the main index, because the
+ currently opened one is too old. */
+ bool reopen_main_index:1;
+ /* Index has been fsck'd, but mail_index_reset_fscked() hasn't been
+ called yet. */
+ bool fscked:1;
+};
+
+extern struct mail_index_module_register mail_index_module_register;
+extern struct event_category event_category_mail_index;
+
+/* Add/replace expunge handler for specified extension. */
+void mail_index_register_expunge_handler(struct mail_index *index,
+ uint32_t ext_id,
+ mail_index_expunge_handler_t *callback);
+void mail_index_unregister_expunge_handler(struct mail_index *index,
+ uint32_t ext_id);
+
+int mail_index_create_tmp_file(struct mail_index *index,
+ const char *path_prefix, const char **path_r);
+
+int mail_index_try_open_only(struct mail_index *index);
+void mail_index_close_file(struct mail_index *index);
+/* Returns 1 if index was successfully (re-)opened, 0 if the index no longer
+ exists, -1 if I/O error. If 1 is returned, reopened_r=TRUE if a new index
+ was actually reopened (or if index wasn't even open before this call). */
+int mail_index_reopen_if_changed(struct mail_index *index, bool *reopened_r,
+ const char **reason_r);
+/* Update/rewrite the main index file from index->map */
+void mail_index_write(struct mail_index *index, bool want_rotate,
+ const char *reason);
+
+void mail_index_flush_read_cache(struct mail_index *index, const char *path,
+ int fd, bool locked);
+
+int mail_index_lock_fd(struct mail_index *index, const char *path, int fd,
+ int lock_type, unsigned int timeout_secs,
+ struct file_lock **lock_r);
+
+/* Allocate a new empty map. */
+struct mail_index_map *mail_index_map_alloc(struct mail_index *index);
+/* Replace index->map with the latest index changes. This may reopen the index
+ file and/or it may read the latest changes from transaction log. The log is
+ read up to EOF, but non-synced expunges are skipped.
+
+ If we mmap()ed the index file, the map is returned locked.
+
+ Returns 1 = ok, 0 = corrupted, -1 = error. */
+int mail_index_map(struct mail_index *index,
+ enum mail_index_sync_handler_type type);
+/* Unreference given mapping and unmap it if it's dropped to zero. */
+void mail_index_unmap(struct mail_index_map **map);
+/* Clone a map. It still points to the original rec_map. */
+struct mail_index_map *mail_index_map_clone(const struct mail_index_map *map);
+/* Make sure the map has its own private rec_map, cloning it if necessary. */
+void mail_index_record_map_move_to_private(struct mail_index_map *map);
+/* If map points to mmap()ed index, copy it to the memory. */
+void mail_index_map_move_to_memory(struct mail_index_map *map);
+
+void mail_index_fchown(struct mail_index *index, int fd, const char *path);
+
+bool mail_index_map_lookup_ext(struct mail_index_map *map, const char *name,
+ uint32_t *idx_r);
+bool mail_index_ext_name_is_valid(const char *name);
+uint32_t
+mail_index_map_register_ext(struct mail_index_map *map,
+ const char *name, uint32_t ext_offset,
+ const struct mail_index_ext_header *ext_hdr);
+bool mail_index_map_get_ext_idx(struct mail_index_map *map,
+ uint32_t ext_id, uint32_t *idx_r);
+const struct mail_index_ext *
+mail_index_view_get_ext(struct mail_index_view *view, uint32_t ext_id);
+
+void mail_index_map_lookup_seq_range(struct mail_index_map *map,
+ uint32_t first_uid, uint32_t last_uid,
+ uint32_t *first_seq_r,
+ uint32_t *last_seq_r);
+
+/* Returns 1 on success, 0 on non-critical errors we want to silently fix,
+ -1 if map isn't usable. The caller is responsible for logging the errors
+ if -1 is returned. */
+int mail_index_map_check_header(struct mail_index_map *map,
+ const char **error_r);
+/* Returns 1 if header is usable, 0 or -1 if not. The caller should log an
+ error if -1 is returned, but not if 0 is returned. */
+bool mail_index_check_header_compat(struct mail_index *index,
+ const struct mail_index_header *hdr,
+ uoff_t file_size, const char **error_r);
+int mail_index_map_parse_extensions(struct mail_index_map *map);
+int mail_index_map_parse_keywords(struct mail_index_map *map);
+
+void mail_index_map_init_extbufs(struct mail_index_map *map,
+ unsigned int initial_count);
+int mail_index_map_ext_get_next(struct mail_index_map *map,
+ unsigned int *offset,
+ const struct mail_index_ext_header **ext_hdr_r,
+ const char **name_r);
+int mail_index_map_ext_hdr_check(const struct mail_index_header *hdr,
+ const struct mail_index_ext_header *ext_hdr,
+ const char *name, const char **error_r);
+unsigned int mail_index_map_ext_hdr_offset(unsigned int name_len);
+
+void mail_index_fsck_locked(struct mail_index *index);
+
+/* Log an error and set it as the index's current error that is available
+ with mail_index_get_error_message(). */
+void mail_index_set_error(struct mail_index *index, const char *fmt, ...)
+ ATTR_FORMAT(2, 3) ATTR_COLD;
+/* Same as mail_index_set_error(), but don't log the error. */
+void mail_index_set_error_nolog(struct mail_index *index, const char *str)
+ ATTR_COLD;
+/* "%s failed with index file %s: %m" */
+void mail_index_set_syscall_error(struct mail_index *index,
+ const char *function) ATTR_COLD;
+/* "%s failed with file %s: %m" */
+void mail_index_file_set_syscall_error(struct mail_index *index,
+ const char *filepath,
+ const char *function) ATTR_COLD;
+
+#endif
diff --git a/src/lib-index/mail-index-strmap.c b/src/lib-index/mail-index-strmap.c
new file mode 100644
index 0000000..1287c73
--- /dev/null
+++ b/src/lib-index/mail-index-strmap.c
@@ -0,0 +1,1259 @@
+/* Copyright (c) 2008-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "bsearch-insert-pos.h"
+#include "istream.h"
+#include "ostream.h"
+#include "file-lock.h"
+#include "file-dotlock.h"
+#include "crc32.h"
+#include "safe-mkstemp.h"
+#include "str.h"
+#include "mail-index-private.h"
+#include "mail-index-strmap.h"
+
+#include <stdio.h>
+
+struct mail_index_strmap {
+ struct mail_index *index;
+ char *path;
+ int fd;
+ struct istream *input;
+
+ struct file_lock *file_lock;
+ struct dotlock *dotlock;
+ struct dotlock_settings dotlock_settings;
+};
+
+struct mail_index_strmap_view {
+ struct mail_index_strmap *strmap;
+ struct mail_index_view *view;
+
+ ARRAY_TYPE(mail_index_strmap_rec) recs;
+ ARRAY(uint32_t) recs_crc32;
+ struct hash2_table *hash;
+
+ mail_index_strmap_key_cmp_t *key_compare;
+ mail_index_strmap_rec_cmp_t *rec_compare;
+ mail_index_strmap_remap_t *remap_cb;
+ void *cb_context;
+
+ uoff_t last_read_block_offset;
+ uint32_t last_read_uid;
+ uint32_t last_added_uid;
+ uint32_t total_ref_count;
+
+ uint32_t last_ref_index;
+ uint32_t next_str_idx;
+ uint32_t lost_expunged_uid;
+
+ bool desynced:1;
+};
+
+struct mail_index_strmap_read_context {
+ struct mail_index_strmap_view *view;
+
+ struct istream *input;
+ uoff_t end_offset;
+ uint32_t highest_str_idx;
+ uint32_t uid_lookup_seq;
+ uint32_t lost_expunged_uid;
+
+ const unsigned char *data, *end, *str_idx_base;
+ struct mail_index_strmap_rec rec;
+ uint32_t next_ref_index;
+ unsigned int rec_size;
+
+ bool too_large_uids:1;
+};
+
+struct mail_index_strmap_view_sync {
+ struct mail_index_strmap_view *view;
+};
+
+struct mail_index_strmap_hash_key {
+ const char *str;
+ uint32_t crc32;
+};
+
+/* number of bytes required to store one string idx */
+#define STRMAP_FILE_STRIDX_SIZE (sizeof(uint32_t)*2)
+
+/* renumber the string indexes when highest string idx becomes larger than
+ <number of indexes>*STRMAP_FILE_MAX_STRIDX_MULTIPLIER */
+#define STRMAP_FILE_MAX_STRIDX_MULTIPLIER 2
+
+#define STRIDX_MUST_RENUMBER(highest_idx, n_unique_indexes) \
+ (highest_idx > n_unique_indexes * STRMAP_FILE_MAX_STRIDX_MULTIPLIER)
+
+#define MAIL_INDEX_STRMAP_TIMEOUT_SECS 10
+
+static const struct dotlock_settings default_dotlock_settings = {
+ .timeout = MAIL_INDEX_STRMAP_TIMEOUT_SECS,
+ .stale_timeout = 30
+};
+
+struct mail_index_strmap *
+mail_index_strmap_init(struct mail_index *index, const char *suffix)
+{
+ struct mail_index_strmap *strmap;
+
+ i_assert(index->open_count > 0);
+
+ strmap = i_new(struct mail_index_strmap, 1);
+ strmap->index = index;
+ strmap->path = i_strconcat(index->filepath, suffix, NULL);
+ strmap->fd = -1;
+
+ strmap->dotlock_settings = default_dotlock_settings;
+ strmap->dotlock_settings.use_excl_lock =
+ (index->flags & MAIL_INDEX_OPEN_FLAG_DOTLOCK_USE_EXCL) != 0;
+ strmap->dotlock_settings.nfs_flush =
+ (index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0;
+ return strmap;
+}
+
+static bool
+mail_index_strmap_read_rec_next(struct mail_index_strmap_read_context *ctx,
+ uint32_t *crc32_r);
+
+static void
+mail_index_strmap_set_syscall_error(struct mail_index_strmap *strmap,
+ const char *function)
+{
+ i_assert(function != NULL);
+
+ if (ENOSPACE(errno)) {
+ strmap->index->last_error.nodiskspace = TRUE;
+ if ((strmap->index->flags &
+ MAIL_INDEX_OPEN_FLAG_NEVER_IN_MEMORY) == 0)
+ return;
+ }
+
+ mail_index_set_error(strmap->index,
+ "%s failed with strmap index file %s: %m",
+ function, strmap->path);
+}
+
+static void mail_index_strmap_close(struct mail_index_strmap *strmap)
+{
+ if (strmap->file_lock != NULL)
+ file_lock_free(&strmap->file_lock);
+ else if (strmap->dotlock != NULL)
+ file_dotlock_delete(&strmap->dotlock);
+
+ if (strmap->fd != -1) {
+ if (close(strmap->fd) < 0)
+ mail_index_strmap_set_syscall_error(strmap, "close()");
+ strmap->fd = -1;
+ }
+ i_stream_unref(&strmap->input);
+}
+
+void mail_index_strmap_deinit(struct mail_index_strmap **_strmap)
+{
+ struct mail_index_strmap *strmap = *_strmap;
+
+ *_strmap = NULL;
+ mail_index_strmap_close(strmap);
+ i_free(strmap->path);
+ i_free(strmap);
+}
+
+static unsigned int mail_index_strmap_hash_key(const void *_key)
+{
+ const struct mail_index_strmap_hash_key *key = _key;
+
+ return key->crc32;
+}
+
+static bool
+mail_index_strmap_hash_cmp(const void *_key, const void *_value, void *context)
+{
+ const struct mail_index_strmap_hash_key *key = _key;
+ const struct mail_index_strmap_rec *rec = _value;
+ struct mail_index_strmap_view *view = context;
+
+ return view->key_compare(key->str, rec, view->cb_context);
+}
+
+struct mail_index_strmap_view *
+mail_index_strmap_view_open(struct mail_index_strmap *strmap,
+ struct mail_index_view *idx_view,
+ mail_index_strmap_key_cmp_t *key_compare_cb,
+ mail_index_strmap_rec_cmp_t *rec_compare_cb,
+ mail_index_strmap_remap_t *remap_cb,
+ void *context,
+ const ARRAY_TYPE(mail_index_strmap_rec) **recs_r,
+ const struct hash2_table **hash_r)
+{
+ struct mail_index_strmap_view *view;
+
+ view = i_new(struct mail_index_strmap_view, 1);
+ view->strmap = strmap;
+ view->view = idx_view;
+ view->key_compare = key_compare_cb;
+ view->rec_compare = rec_compare_cb;
+ view->remap_cb = remap_cb;
+ view->cb_context = context;
+ view->next_str_idx = 1;
+
+ i_array_init(&view->recs, 64);
+ i_array_init(&view->recs_crc32, 64);
+ view->hash = hash2_create(0, sizeof(struct mail_index_strmap_rec),
+ mail_index_strmap_hash_key,
+ mail_index_strmap_hash_cmp, view);
+ *recs_r = &view->recs;
+ *hash_r = view->hash;
+ return view;
+}
+
+void mail_index_strmap_view_close(struct mail_index_strmap_view **_view)
+{
+ struct mail_index_strmap_view *view = *_view;
+
+ *_view = NULL;
+ array_free(&view->recs);
+ array_free(&view->recs_crc32);
+ hash2_destroy(&view->hash);
+ i_free(view);
+}
+
+uint32_t mail_index_strmap_view_get_highest_idx(struct mail_index_strmap_view *view)
+{
+ return view->next_str_idx-1;
+}
+
+static void mail_index_strmap_view_reset(struct mail_index_strmap_view *view)
+{
+ view->remap_cb(NULL, 0, 0, view->cb_context);
+ array_clear(&view->recs);
+ array_clear(&view->recs_crc32);
+ hash2_clear(view->hash);
+
+ view->last_added_uid = 0;
+ view->lost_expunged_uid = 0;
+ view->desynced = FALSE;
+}
+
+void mail_index_strmap_view_set_corrupted(struct mail_index_strmap_view *view)
+{
+ mail_index_set_error(view->strmap->index,
+ "Corrupted strmap index file: %s",
+ view->strmap->path);
+ i_unlink(view->strmap->path);
+ mail_index_strmap_close(view->strmap);
+ mail_index_strmap_view_reset(view);
+}
+
+static int mail_index_strmap_open(struct mail_index_strmap_view *view)
+{
+ struct mail_index_strmap *strmap = view->strmap;
+ const struct mail_index_header *idx_hdr;
+ struct mail_index_strmap_header hdr;
+ const unsigned char *data;
+ size_t size;
+ int ret;
+
+ i_assert(strmap->fd == -1);
+
+ strmap->fd = open(strmap->path, O_RDWR);
+ if (strmap->fd == -1) {
+ if (errno == ENOENT)
+ return 0;
+ mail_index_strmap_set_syscall_error(strmap, "open()");
+ return -1;
+ }
+ strmap->input = i_stream_create_fd(strmap->fd, SIZE_MAX);
+ ret = i_stream_read_bytes(strmap->input, &data, &size, sizeof(hdr));
+ if (ret <= 0) {
+ if (ret < 0) {
+ mail_index_strmap_set_syscall_error(strmap, "read()");
+ mail_index_strmap_close(strmap);
+ } else {
+ i_assert(ret == 0);
+ mail_index_strmap_view_set_corrupted(view);
+ }
+ return ret;
+ }
+ memcpy(&hdr, data, sizeof(hdr));
+
+ idx_hdr = mail_index_get_header(view->view);
+ if (hdr.version != MAIL_INDEX_STRMAP_VERSION ||
+ hdr.uid_validity != idx_hdr->uid_validity) {
+ /* need to rebuild. if we already had something in the strmap,
+ we can keep it. */
+ i_unlink(strmap->path);
+ mail_index_strmap_close(strmap);
+ return 0;
+ }
+
+ /* we'll read the entire file from the beginning */
+ view->last_added_uid = 0;
+ view->last_read_uid = 0;
+ view->total_ref_count = 0;
+ view->last_read_block_offset = sizeof(struct mail_index_strmap_header);
+ view->next_str_idx = 1;
+
+ mail_index_strmap_view_reset(view);
+ return 0;
+}
+
+static bool mail_index_strmap_need_reopen(struct mail_index_strmap *strmap)
+{
+ struct stat st1, st2;
+
+ /* FIXME: nfs flush */
+ if (fstat(strmap->fd, &st1) < 0) {
+ if (!ESTALE_FSTAT(errno))
+ mail_index_strmap_set_syscall_error(strmap, "fstat()");
+ return TRUE;
+ }
+ if (stat(strmap->path, &st2) < 0) {
+ mail_index_strmap_set_syscall_error(strmap, "stat()");
+ return TRUE;
+ }
+ return st1.st_ino != st2.st_ino || !CMP_DEV_T(st1.st_dev, st2.st_dev);
+}
+
+static int mail_index_strmap_refresh(struct mail_index_strmap_view *view)
+{
+ uint32_t seq;
+
+ if (MAIL_INDEX_IS_IN_MEMORY(view->strmap->index))
+ return -1;
+
+ if (view->strmap->fd != -1) {
+ if (!mail_index_strmap_need_reopen(view->strmap)) {
+ if (view->lost_expunged_uid != 0) {
+ /* last read failed because view had a message
+ that didn't exist in the strmap (because it
+ was expunged by another session). if the
+ message still isn't expunged in this view,
+ just continue using the current strmap. */
+ if (mail_index_lookup_seq(view->view,
+ view->lost_expunged_uid, &seq))
+ return -1;
+ } else if (view->desynced) {
+ /* our view isn't synced with the disk, we
+ can't read strmap without first resetting
+ the view */
+ } else {
+ i_stream_sync(view->strmap->input);
+ return 0;
+ }
+ }
+ mail_index_strmap_close(view->strmap);
+ }
+
+ return mail_index_strmap_open(view);
+}
+
+static int
+mail_index_strmap_read_packed(struct mail_index_strmap_read_context *ctx,
+ uint32_t *num_r)
+{
+ const unsigned char *data;
+ const uint8_t *bytes, *p, *end;
+ size_t size;
+ int ret;
+
+ ret = i_stream_read_bytes(ctx->input, &data, &size, sizeof(*num_r));
+ if (ret <= 0)
+ return ret;
+
+ if (ctx->input->v_offset + size > ctx->end_offset)
+ size = ctx->end_offset - ctx->input->v_offset;
+ bytes = p = (const uint8_t *)data;
+ end = bytes + size;
+
+ if (mail_index_unpack_num(&p, end, num_r) < 0)
+ return -1;
+ i_stream_skip(ctx->input, p - bytes);
+ return 1;
+}
+
+static int
+mail_index_strmap_uid_exists(struct mail_index_strmap_read_context *ctx,
+ uint32_t uid)
+{
+ const struct mail_index_record *rec;
+
+ i_assert(ctx->uid_lookup_seq > 0);
+
+ if (ctx->uid_lookup_seq > ctx->view->view->map->hdr.messages_count) {
+ if (uid >= ctx->view->view->map->hdr.next_uid) {
+ /* thread index has larger UIDs than what we've seen
+ in our view. we'll have to read them again later
+ when we know about them */
+ ctx->too_large_uids = TRUE;
+ }
+ return 0;
+ }
+
+ rec = MAIL_INDEX_REC_AT_SEQ(ctx->view->view->map, ctx->uid_lookup_seq);
+ if (rec->uid == uid) {
+ ctx->uid_lookup_seq++;
+ return 1;
+ } else if (rec->uid > uid) {
+ return 0;
+ } else {
+ /* record that exists in index is missing from strmap.
+ see if it's because the strmap is corrupted or because
+ our current view is a bit stale and the message has already
+ been expunged. */
+ mail_index_refresh(ctx->view->view->index);
+ if (mail_index_is_expunged(ctx->view->view,
+ ctx->uid_lookup_seq))
+ ctx->lost_expunged_uid = rec->uid;
+ return -1;
+ }
+}
+
+static int
+mail_index_strmap_read_rec_first(struct mail_index_strmap_read_context *ctx,
+ uint32_t *crc32_r)
+{
+ size_t size;
+ uint32_t n, i, count, str_idx;
+ int ret;
+
+ /* <uid> <n> <crc32>*count <str_idx>*count
+ where
+ n = 0 -> count=1 (only Message-ID:)
+ n = 1 -> count=2 (Message-ID: + In-Reply-To:)
+ n = 2+ -> count=n (Message-ID: + References:)
+ */
+ if (mail_index_strmap_read_packed(ctx, &n) <= 0)
+ return -1;
+ count = n < 2 ? n + 1 : n;
+ ctx->view->total_ref_count += count;
+
+ ctx->rec_size = count * (sizeof(ctx->rec.str_idx) + sizeof(*crc32_r));
+ ret = mail_index_strmap_uid_exists(ctx, ctx->rec.uid);
+ if (ret < 0)
+ return -1;
+ if (i_stream_read_bytes(ctx->view->strmap->input, &ctx->data, &size, ctx->rec_size) <= 0)
+ return -1;
+ ctx->str_idx_base = ctx->data + count * sizeof(uint32_t);
+
+ if (ret == 0) {
+ /* this message has already been expunged, ignore it.
+ update highest string indexes anyway. */
+ for (i = 0; i < count; i++) {
+ memcpy(&str_idx, ctx->str_idx_base, sizeof(str_idx));
+ if (ctx->highest_str_idx < str_idx)
+ ctx->highest_str_idx = str_idx;
+ ctx->str_idx_base += sizeof(str_idx);
+ }
+ i_stream_skip(ctx->view->strmap->input, ctx->rec_size);
+ return 0;
+ }
+
+ /* everything exists. save it. FIXME: these ref_index values
+ are thread index specific, perhaps something more generic
+ should be used some day */
+ ctx->end = ctx->data + count * sizeof(*crc32_r);
+
+ ctx->next_ref_index = 0;
+ if (!mail_index_strmap_read_rec_next(ctx, crc32_r))
+ i_unreached();
+ ctx->next_ref_index = n == 1 ? 1 : 2;
+ return 1;
+}
+
+static bool
+mail_index_strmap_read_rec_next(struct mail_index_strmap_read_context *ctx,
+ uint32_t *crc32_r)
+{
+ if (ctx->data == ctx->end) {
+ i_stream_skip(ctx->view->strmap->input, ctx->rec_size);
+ return FALSE;
+ }
+
+ /* FIXME: str_idx could be stored as packed relative values
+ (first relative to highest_idx, the rest relative to the
+ previous str_idx) */
+
+ /* read the record contents */
+ memcpy(&ctx->rec.str_idx, ctx->str_idx_base, sizeof(ctx->rec.str_idx));
+ memcpy(crc32_r, ctx->data, sizeof(*crc32_r));
+
+ ctx->rec.ref_index = ctx->next_ref_index++;
+
+ if (ctx->highest_str_idx < ctx->rec.str_idx)
+ ctx->highest_str_idx = ctx->rec.str_idx;
+
+ /* get to the next record */
+ ctx->data += sizeof(*crc32_r);
+ ctx->str_idx_base += sizeof(ctx->rec.str_idx);
+ return TRUE;
+}
+
+static int
+strmap_read_block_init(struct mail_index_strmap_view *view,
+ struct mail_index_strmap_read_context *ctx)
+{
+ struct mail_index_strmap *strmap = view->strmap;
+ const unsigned char *data;
+ size_t size;
+ uint32_t block_size, seq1, seq2;
+ int ret;
+
+ if (view->last_read_uid + 1 >= view->view->map->hdr.next_uid) {
+ /* come back later when we know about the new UIDs */
+ return 0;
+ }
+
+ i_zero(ctx);
+ ret = i_stream_read_bytes(strmap->input, &data, &size,
+ sizeof(block_size));
+ if (ret <= 0) {
+ if (strmap->input->stream_errno == 0) {
+ /* no new data */
+ return 0;
+ }
+ mail_index_strmap_set_syscall_error(strmap, "read()");
+ return -1;
+ }
+ memcpy(&block_size, data, sizeof(block_size));
+ block_size = mail_index_offset_to_uint32(block_size) >> 2;
+ if (block_size == 0) {
+ /* the rest of the file is either not written, or the previous
+ write didn't finish */
+ return 0;
+ }
+ i_stream_skip(strmap->input, sizeof(block_size));
+
+ ctx->view = view;
+ ctx->input = strmap->input;
+ ctx->end_offset = strmap->input->v_offset + block_size;
+ if (ctx->end_offset < strmap->input->v_offset) {
+ /* block size too large */
+ mail_index_strmap_view_set_corrupted(view);
+ return -1;
+ }
+ ctx->rec.uid = view->last_read_uid + 1;
+
+ /* FIXME: when reading multiple blocks we shouldn't have to calculate
+ this every time */
+ if (!mail_index_lookup_seq_range(view->view, ctx->rec.uid, (uint32_t)-1,
+ &seq1, &seq2))
+ seq1 = mail_index_view_get_messages_count(view->view) + 1;
+ ctx->uid_lookup_seq = seq1;
+ return 1;
+}
+
+static int
+strmap_read_block_next(struct mail_index_strmap_read_context *ctx,
+ uint32_t *crc32_r)
+{
+ uint32_t uid_diff;
+ int ret;
+
+ if (mail_index_strmap_read_rec_next(ctx, crc32_r))
+ return 1;
+
+ /* get next UID */
+ do {
+ if (ctx->input->v_offset == ctx->end_offset) {
+ /* this block is done */
+ return 0;
+ }
+ if (mail_index_strmap_read_packed(ctx, &uid_diff) <= 0)
+ return -1;
+
+ ctx->rec.uid += uid_diff;
+ ret = mail_index_strmap_read_rec_first(ctx, crc32_r);
+ } while (ret == 0);
+ return ret;
+}
+
+static int
+strmap_read_block_deinit(struct mail_index_strmap_read_context *ctx, int ret,
+ bool update_block_offset)
+{
+ struct mail_index_strmap_view *view = ctx->view;
+ struct mail_index_strmap *strmap = view->strmap;
+
+ if (ctx->highest_str_idx > view->total_ref_count) {
+ /* if all string indexes are unique, highest_str_index equals
+ total_ref_count. otherwise it's always lower. */
+ mail_index_set_error(strmap->index,
+ "Corrupted strmap index file %s: "
+ "String indexes too high "
+ "(highest=%u max=%u)",
+ strmap->path, ctx->highest_str_idx,
+ view->total_ref_count);
+ mail_index_strmap_view_set_corrupted(view);
+ return -1;
+ }
+ if (ctx->lost_expunged_uid != 0) {
+ /* our view contained a message that had since been expunged. */
+ i_assert(ret < 0);
+ view->lost_expunged_uid = ctx->lost_expunged_uid;
+ } else if (ret < 0) {
+ if (strmap->input->stream_errno != 0)
+ mail_index_strmap_set_syscall_error(strmap, "read()");
+ else
+ mail_index_strmap_view_set_corrupted(view);
+ return -1;
+ } else if (update_block_offset && !ctx->too_large_uids) {
+ view->last_read_block_offset = strmap->input->v_offset;
+ view->last_read_uid = ctx->rec.uid;
+ }
+ if (view->next_str_idx <= ctx->highest_str_idx)
+ view->next_str_idx = ctx->highest_str_idx + 1;
+ return ret;
+}
+
+static bool
+strmap_view_sync_handle_conflict(struct mail_index_strmap_read_context *ctx,
+ const struct mail_index_strmap_rec *hash_rec,
+ struct hash2_iter *iter)
+{
+ uint32_t seq;
+
+ /* hopefully it's a message that has since been expunged */
+ if (!mail_index_lookup_seq(ctx->view->view, hash_rec->uid, &seq)) {
+ /* message is no longer in our view. remove it completely. */
+ hash2_remove_iter(ctx->view->hash, iter);
+ return TRUE;
+ }
+ if (mail_index_is_expunged(ctx->view->view, seq)) {
+ /* it's quite likely a conflict. we may not be able to verify
+ it, so just assume it is. nothing breaks even if we guess
+ wrong, the performance just suffers a bit. */
+ return FALSE;
+ }
+
+ /* 0 means "doesn't match", which is the only acceptable case */
+ return ctx->view->rec_compare(&ctx->rec, hash_rec,
+ ctx->view->cb_context) == 0;
+}
+
+static int
+strmap_view_sync_block_check_conflicts(struct mail_index_strmap_read_context *ctx,
+ uint32_t crc32)
+{
+ struct mail_index_strmap_rec *hash_rec;
+ struct hash2_iter iter;
+
+ if (crc32 == 0) {
+ /* unique string - there are no conflicts */
+ return 0;
+ }
+
+ /* check for conflicting string indexes. they may happen if
+
+ 1) msgid exists only for a message X that has been expunged
+ 2) another process doesn't see X, but sees msgid for another
+ message and writes it using a new string index
+ 3) if we still see X, we now see the same msgid with two
+ string indexes.
+
+ if we detect such a conflict, we can't continue using the
+ strmap index until X has been expunged. */
+ i_zero(&iter);
+ while ((hash_rec = hash2_iterate(ctx->view->hash,
+ crc32, &iter)) != NULL &&
+ hash_rec->str_idx != ctx->rec.str_idx) {
+ /* CRC32 matches, but string index doesn't */
+ if (!strmap_view_sync_handle_conflict(ctx, hash_rec, &iter)) {
+ ctx->lost_expunged_uid = hash_rec->uid;
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
+mail_index_strmap_view_sync_block(struct mail_index_strmap_read_context *ctx)
+{
+ struct mail_index_strmap_rec *hash_rec;
+ uint32_t crc32, prev_uid = 0;
+ int ret;
+
+ while ((ret = strmap_read_block_next(ctx, &crc32)) > 0) {
+ if (ctx->rec.uid <= ctx->view->last_added_uid) {
+ if (ctx->rec.uid < ctx->view->last_added_uid ||
+ prev_uid != ctx->rec.uid) {
+ /* we've already added this */
+ continue;
+ }
+ }
+ prev_uid = ctx->rec.uid;
+
+ if (strmap_view_sync_block_check_conflicts(ctx, crc32) < 0) {
+ ret = -1;
+ break;
+ }
+ ctx->view->last_added_uid = ctx->rec.uid;
+
+ /* add the record to records array */
+ array_push_back(&ctx->view->recs, &ctx->rec);
+ array_push_back(&ctx->view->recs_crc32, &crc32);
+
+ /* add a separate copy of the record to hash */
+ hash_rec = hash2_insert_hash(ctx->view->hash, crc32);
+ memcpy(hash_rec, &ctx->rec, sizeof(*hash_rec));
+ }
+ return strmap_read_block_deinit(ctx, ret, TRUE);
+}
+
+struct mail_index_strmap_view_sync *
+mail_index_strmap_view_sync_init(struct mail_index_strmap_view *view,
+ uint32_t *last_uid_r)
+{
+ struct mail_index_strmap_view_sync *sync;
+ struct mail_index_strmap_read_context ctx;
+ int ret;
+
+ sync = i_new(struct mail_index_strmap_view_sync, 1);
+ sync->view = view;
+
+ if (mail_index_strmap_refresh(view) < 0) {
+ /* reading the strmap failed - just ignore and do
+ this in-memory based on whatever we knew last */
+ } else if (view->strmap->input != NULL) {
+ i_stream_seek(view->strmap->input,
+ view->last_read_block_offset);
+ while ((ret = strmap_read_block_init(view, &ctx)) > 0) {
+ if (mail_index_strmap_view_sync_block(&ctx) < 0) {
+ ret = -1;
+ break;
+ }
+ if (ctx.too_large_uids)
+ break;
+ }
+
+ if (ret < 0) {
+ /* something failed - we can still use the strmap as far
+ as we managed to read it, but our view is now out
+ of sync */
+ view->desynced = TRUE;
+ } else {
+ i_assert(view->lost_expunged_uid == 0);
+ }
+ }
+ *last_uid_r = view->last_added_uid;
+ return sync;
+}
+
+static inline uint32_t crc32_str_nonzero(const char *str)
+{
+ /* we'll flip the bits because of a bug in our old crc32 code.
+ this keeps the index format backwards compatible with the new fixed
+ crc32 code. */
+ uint32_t value = crc32_str(str) ^ 0xffffffffU;
+ return value == 0 ? 1 : value;
+}
+
+void mail_index_strmap_view_sync_add(struct mail_index_strmap_view_sync *sync,
+ uint32_t uid, uint32_t ref_index,
+ const char *key)
+{
+ struct mail_index_strmap_view *view = sync->view;
+ struct mail_index_strmap_rec *rec, *old_rec;
+ struct mail_index_strmap_hash_key hash_key;
+ uint32_t str_idx;
+
+ i_assert(uid > view->last_added_uid ||
+ (uid == view->last_added_uid &&
+ ref_index > view->last_ref_index));
+
+ hash_key.str = key;
+ hash_key.crc32 = crc32_str_nonzero(key);
+
+ old_rec = hash2_lookup(view->hash, &hash_key);
+ if (old_rec != NULL) {
+ /* The string already exists, use the same unique idx */
+ str_idx = old_rec->str_idx;
+ } else {
+ /* Newly seen string, assign a new unique idx to it */
+ str_idx = view->next_str_idx++;
+ }
+ i_assert(str_idx != 0);
+
+ rec = hash2_insert(view->hash, &hash_key);
+ rec->uid = uid;
+ rec->ref_index = ref_index;
+ rec->str_idx = str_idx;
+ array_push_back(&view->recs, rec);
+ array_push_back(&view->recs_crc32, &hash_key.crc32);
+
+ view->last_added_uid = uid;
+ view->last_ref_index = ref_index;
+}
+
+void mail_index_strmap_view_sync_add_unique(struct mail_index_strmap_view_sync *sync,
+ uint32_t uid, uint32_t ref_index)
+{
+ struct mail_index_strmap_view *view = sync->view;
+ struct mail_index_strmap_rec rec;
+
+ i_assert(uid > view->last_added_uid ||
+ (uid == view->last_added_uid &&
+ ref_index > view->last_ref_index));
+
+ i_zero(&rec);
+ rec.uid = uid;
+ rec.ref_index = ref_index;
+ rec.str_idx = view->next_str_idx++;
+ array_push_back(&view->recs, &rec);
+ array_append_zero(&view->recs_crc32);
+
+ view->last_added_uid = uid;
+ view->last_ref_index = ref_index;
+}
+
+static void
+mail_index_strmap_zero_terminate(struct mail_index_strmap_view *view)
+{
+ /* zero-terminate the records array */
+ array_append_zero(&view->recs);
+ array_pop_back(&view->recs);
+}
+
+static void mail_index_strmap_view_renumber(struct mail_index_strmap_view *view)
+{
+ struct mail_index_strmap_read_context ctx;
+ struct mail_index_strmap_rec *recs, *hash_rec;
+ uint32_t prev_uid, str_idx, *recs_crc32, *renumber_map;
+ unsigned int i, dest, count, count2;
+ int ret;
+
+ i_zero(&ctx);
+ ctx.view = view;
+ ctx.uid_lookup_seq = 1;
+
+ /* create a map of old -> new index and remove records of
+ expunged messages */
+ renumber_map = i_new(uint32_t, view->next_str_idx);
+ str_idx = 0; prev_uid = 0;
+ recs = array_get_modifiable(&view->recs, &count);
+ recs_crc32 = array_get_modifiable(&view->recs_crc32, &count2);
+ i_assert(count == count2);
+
+ for (i = dest = 0; i < count; ) {
+ if (prev_uid != recs[i].uid) {
+ /* see if this record should be removed */
+ prev_uid = recs[i].uid;
+ ret = mail_index_strmap_uid_exists(&ctx, prev_uid);
+ i_assert(ret >= 0);
+ if (ret == 0) {
+ /* message expunged */
+ do {
+ i++;
+ } while (i < count && recs[i].uid == prev_uid);
+ continue;
+ }
+ }
+
+ i_assert(recs[i].str_idx < view->next_str_idx);
+ if (renumber_map[recs[i].str_idx] == 0)
+ renumber_map[recs[i].str_idx] = ++str_idx;
+ if (i != dest) {
+ recs[dest] = recs[i];
+ recs_crc32[dest] = recs_crc32[i];
+ }
+ i++; dest++;
+ }
+ i_assert(renumber_map[0] == 0);
+ array_delete(&view->recs, dest, i-dest);
+ array_delete(&view->recs_crc32, dest, i-dest);
+ mail_index_strmap_zero_terminate(view);
+
+ /* notify caller of the renumbering */
+ i_assert(str_idx <= view->next_str_idx);
+ view->remap_cb(renumber_map, view->next_str_idx, str_idx + 1,
+ view->cb_context);
+
+ /* renumber the indexes in-place and recreate the hash */
+ recs = array_get_modifiable(&view->recs, &count);
+ hash2_clear(view->hash);
+ for (i = 0; i < count; i++) {
+ recs[i].str_idx = renumber_map[recs[i].str_idx];
+ hash_rec = hash2_insert_hash(view->hash, recs_crc32[i]);
+ memcpy(hash_rec, &recs[i], sizeof(*hash_rec));
+ }
+
+ /* update the new next_str_idx only after remapping */
+ view->next_str_idx = str_idx + 1;
+ i_free(renumber_map);
+}
+
+static void mail_index_strmap_write_block(struct mail_index_strmap_view *view,
+ struct ostream *output,
+ unsigned int i, uint32_t base_uid)
+{
+ const struct mail_index_strmap_rec *recs;
+ const uint32_t *crc32;
+ unsigned int j, n, count, count2, uid_rec_count;
+ uint32_t block_size;
+ uint8_t *p, packed[MAIL_INDEX_PACK_MAX_SIZE*2];
+ uoff_t block_offset, end_offset;
+
+ /* skip over the block size for now, we don't know it yet */
+ block_offset = output->offset;
+ block_size = 0;
+ o_stream_nsend(output, &block_size, sizeof(block_size));
+
+ /* write records */
+ recs = array_get(&view->recs, &count);
+ crc32 = array_get(&view->recs_crc32, &count2);
+ i_assert(count == count2);
+ while (i < count) {
+ /* @UNSAFE: <uid diff> */
+ p = packed;
+ mail_index_pack_num(&p, recs[i].uid - base_uid);
+ base_uid = recs[i].uid;
+
+ /* find how many records belong to this UID */
+ uid_rec_count = 1;
+ for (j = i + 1; j < count; j++) {
+ if (recs[j].uid != base_uid)
+ break;
+ uid_rec_count++;
+ }
+ view->total_ref_count += uid_rec_count;
+
+ /* <n> <crc32>*count <str_idx>*count -
+ FIXME: thread index specific code */
+ i_assert(recs[i].ref_index == 0);
+ if (uid_rec_count == 1) {
+ /* Only Message-ID: header */
+ n = 0;
+ } else if (recs[i+1].ref_index == 1) {
+ /* In-Reply-To: header */
+ n = 1;
+ i_assert(uid_rec_count == 2);
+ } else {
+ /* References: header */
+ n = uid_rec_count;
+ i_assert(recs[i+1].ref_index == 2);
+ }
+
+ mail_index_pack_num(&p, n);
+ o_stream_nsend(output, packed, p-packed);
+ for (j = 0; j < uid_rec_count; j++)
+ o_stream_nsend(output, &crc32[i+j], sizeof(crc32[i+j]));
+ for (j = 0; j < uid_rec_count; j++) {
+ i_assert(j < 2 || recs[i+j].ref_index == j+1);
+ o_stream_nsend(output, &recs[i+j].str_idx,
+ sizeof(recs[i+j].str_idx));
+ }
+ i += uid_rec_count;
+ }
+
+ /* we know the block size now - write it */
+ block_size = output->offset - (block_offset + sizeof(block_size));
+ block_size = mail_index_uint32_to_offset(block_size << 2);
+ i_assert(block_size != 0);
+
+ end_offset = output->offset;
+ (void)o_stream_seek(output, block_offset);
+ o_stream_nsend(output, &block_size, sizeof(block_size));
+ (void)o_stream_seek(output, end_offset);
+
+ if (output->stream_errno != 0)
+ return;
+
+ i_assert(view->last_added_uid == recs[count-1].uid);
+ view->last_read_uid = recs[count-1].uid;
+ view->last_read_block_offset = output->offset;
+}
+
+static void
+mail_index_strmap_recreate_write(struct mail_index_strmap_view *view,
+ struct ostream *output)
+{
+ const struct mail_index_header *idx_hdr;
+ struct mail_index_strmap_header hdr;
+
+ idx_hdr = mail_index_get_header(view->view);
+
+ /* write header */
+ i_zero(&hdr);
+ hdr.version = MAIL_INDEX_STRMAP_VERSION;
+ hdr.uid_validity = idx_hdr->uid_validity;
+ o_stream_nsend(output, &hdr, sizeof(hdr));
+
+ view->total_ref_count = 0;
+ mail_index_strmap_write_block(view, output, 0, 1);
+}
+
+static int mail_index_strmap_recreate(struct mail_index_strmap_view *view)
+{
+ struct mail_index_strmap *strmap = view->strmap;
+ string_t *str;
+ struct ostream *output;
+ const char *temp_path;
+ int fd, ret = 0;
+
+ if (array_count(&view->recs) == 0) {
+ /* everything expunged - just unlink the existing index */
+ if (unlink(strmap->path) < 0 && errno != ENOENT)
+ mail_index_strmap_set_syscall_error(strmap, "unlink()");
+ return 0;
+ }
+
+ str = t_str_new(256);
+ str_append(str, strmap->path);
+ fd = safe_mkstemp_hostpid_group(str, view->view->index->set.mode,
+ view->view->index->set.gid,
+ view->view->index->set.gid_origin);
+ temp_path = str_c(str);
+
+ if (fd == -1) {
+ mail_index_set_error(strmap->index,
+ "safe_mkstemp_hostpid(%s) failed: %m",
+ temp_path);
+ return -1;
+ }
+ output = o_stream_create_fd(fd, 0);
+ o_stream_cork(output);
+ mail_index_strmap_recreate_write(view, output);
+ if (o_stream_finish(output) < 0) {
+ mail_index_set_error(strmap->index, "write(%s) failed: %s",
+ temp_path, o_stream_get_error(output));
+ ret = -1;
+ }
+ o_stream_destroy(&output);
+ if (close(fd) < 0) {
+ mail_index_set_error(strmap->index,
+ "close(%s) failed: %m", temp_path);
+ ret = -1;
+ } else if (ret == 0 && rename(temp_path, strmap->path) < 0) {
+ mail_index_set_error(strmap->index,
+ "rename(%s, %s) failed: %m",
+ temp_path, strmap->path);
+ ret = -1;
+ }
+ if (ret < 0)
+ i_unlink(temp_path);
+ return ret;
+}
+
+static int mail_index_strmap_lock(struct mail_index_strmap *strmap)
+{
+ unsigned int timeout_secs;
+ const char *error;
+ int ret;
+
+ i_assert(strmap->fd != -1);
+
+ if (strmap->index->set.lock_method != FILE_LOCK_METHOD_DOTLOCK) {
+ i_assert(strmap->file_lock == NULL);
+
+ struct file_lock_settings lock_set = {
+ .lock_method = strmap->index->set.lock_method,
+ };
+ timeout_secs = I_MIN(MAIL_INDEX_STRMAP_TIMEOUT_SECS,
+ strmap->index->set.max_lock_timeout_secs);
+ ret = file_wait_lock(strmap->fd, strmap->path, F_WRLCK,
+ &lock_set, timeout_secs,
+ &strmap->file_lock, &error);
+ if (ret <= 0) {
+ mail_index_set_error(strmap->index,
+ "file_wait_lock() failed with strmap index file %s: %s",
+ strmap->path, error);
+ }
+ } else {
+ i_assert(strmap->dotlock == NULL);
+
+ ret = file_dotlock_create(&strmap->dotlock_settings,
+ strmap->path, 0, &strmap->dotlock);
+ if (ret <= 0) {
+ mail_index_strmap_set_syscall_error(strmap,
+ "file_dotlock_create()");
+ }
+ }
+ return ret;
+}
+
+static void mail_index_strmap_unlock(struct mail_index_strmap *strmap)
+{
+ if (strmap->file_lock != NULL)
+ file_unlock(&strmap->file_lock);
+ else if (strmap->dotlock != NULL)
+ file_dotlock_delete(&strmap->dotlock);
+}
+
+static int
+strmap_rec_cmp(const uint32_t *uid, const struct mail_index_strmap_rec *rec)
+{
+ return *uid < rec->uid ? -1 :
+ (*uid > rec->uid ? 1 : 0);
+}
+
+static int
+mail_index_strmap_write_append(struct mail_index_strmap_view *view)
+{
+ struct mail_index_strmap_read_context ctx;
+ const struct mail_index_strmap_rec *old_recs;
+ unsigned int i, old_count;
+ struct ostream *output;
+ uint32_t crc32, next_uid;
+ bool full_block;
+ int ret;
+
+ /* Check first if another process had written new records to the file.
+ If there are any, hopefully they're the same as what we would be
+ writing. There are two problematic cases when messages have been
+ expunged recently:
+
+ 1) The file contains UIDs that we don't have. This means the string
+ indexes won't be compatible anymore, so we'll have to renumber ours
+ to match the ones in the strmap file.
+
+ Currently we don't bother handling 1) case. If indexes don't match
+ what we have, we just don't write anything.
+
+ 2) We have UIDs that don't exist in the file. We can't simply skip
+ those records, because other records may have pointers to them using
+ different string indexes than we have. Even if we renumbered those,
+ future appends by other processes might cause the same problem (they
+ see the string for the first time and assign it a new index, but we
+ already have internally given it another index). So the only
+ sensible choice is to write nothing and hope that the message goes
+ away soon. */
+ next_uid = view->last_read_uid + 1;
+ (void)array_bsearch_insert_pos(&view->recs, &next_uid,
+ strmap_rec_cmp, &i);
+
+ old_recs = array_get(&view->recs, &old_count);
+ if (i < old_count) {
+ while (i > 0 && old_recs[i-1].uid == old_recs[i].uid)
+ i--;
+ }
+
+ i_stream_sync(view->strmap->input);
+ i_stream_seek(view->strmap->input, view->last_read_block_offset);
+ full_block = TRUE; ret = 0;
+ while (i < old_count &&
+ (ret = strmap_read_block_init(view, &ctx)) > 0) {
+ while ((ret = strmap_read_block_next(&ctx, &crc32)) > 0) {
+ if (ctx.rec.uid != old_recs[i].uid ||
+ ctx.rec.str_idx != old_recs[i].str_idx) {
+ /* mismatch */
+ if (ctx.rec.uid > old_recs[i].uid) {
+ /* 1) case */
+ ctx.lost_expunged_uid = ctx.rec.uid;
+ } else if (ctx.rec.uid < old_recs[i].uid) {
+ /* 2) case */
+ ctx.lost_expunged_uid = old_recs[i].uid;
+ } else {
+ /* string index mismatch,
+ shouldn't happen */
+ }
+ ret = -1;
+ break;
+ }
+ if (++i == old_count) {
+ full_block = FALSE;
+ break;
+ }
+ }
+ if (strmap_read_block_deinit(&ctx, ret, full_block) < 0) {
+ ret = -1;
+ break;
+ }
+ }
+ if (ret < 0)
+ return -1;
+ if (i == old_count) {
+ /* nothing new to write */
+ return 0;
+ }
+ i_assert(full_block);
+ i_assert(old_recs[i].uid > view->last_read_uid);
+
+ /* write the new records */
+ output = o_stream_create_fd(view->strmap->fd, 0);
+ (void)o_stream_seek(output, view->last_read_block_offset);
+ o_stream_cork(output);
+ mail_index_strmap_write_block(view, output, i,
+ view->last_read_uid + 1);
+ if (o_stream_finish(output) < 0) {
+ mail_index_strmap_set_syscall_error(view->strmap, "write()");
+ ret = -1;
+ }
+ o_stream_destroy(&output);
+ return ret;
+}
+
+static int mail_index_strmap_write(struct mail_index_strmap_view *view)
+{
+ int ret;
+
+ /* FIXME: this renumbering doesn't work well when running for a long
+ time since records aren't removed from hash often enough */
+ if (STRIDX_MUST_RENUMBER(view->next_str_idx - 1,
+ hash2_count(view->hash))) {
+ mail_index_strmap_view_renumber(view);
+ if (!MAIL_INDEX_IS_IN_MEMORY(view->strmap->index)) {
+ if (mail_index_strmap_recreate(view) < 0) {
+ view->desynced = TRUE;
+ return -1;
+ }
+ }
+ return 0;
+ }
+
+ if (MAIL_INDEX_IS_IN_MEMORY(view->strmap->index) || view->desynced)
+ return 0;
+
+ if (view->strmap->fd == -1) {
+ /* initial file creation */
+ if (mail_index_strmap_recreate(view) < 0) {
+ view->desynced = TRUE;
+ return -1;
+ }
+ return 0;
+ }
+
+ /* append the new records to the strmap file */
+ if (mail_index_strmap_lock(view->strmap) <= 0) {
+ /* timeout / error */
+ ret = -1;
+ } else if (mail_index_strmap_need_reopen(view->strmap)) {
+ /* the file was already recreated - leave the syncing as it is
+ for now and let the next sync re-read the file. */
+ ret = 0;
+ } else {
+ ret = mail_index_strmap_write_append(view);
+ }
+ mail_index_strmap_unlock(view->strmap);
+ if (ret < 0)
+ view->desynced = TRUE;
+ return ret;
+}
+
+void mail_index_strmap_view_sync_commit(struct mail_index_strmap_view_sync **_sync)
+{
+ struct mail_index_strmap_view_sync *sync = *_sync;
+ struct mail_index_strmap_view *view = sync->view;
+
+ *_sync = NULL;
+ i_free(sync);
+
+ (void)mail_index_strmap_write(view);
+ mail_index_strmap_zero_terminate(view);
+
+ /* zero-terminate the records array */
+ array_append_zero(&view->recs);
+ array_pop_back(&view->recs);
+}
+
+void mail_index_strmap_view_sync_rollback(struct mail_index_strmap_view_sync **_sync)
+{
+ struct mail_index_strmap_view_sync *sync = *_sync;
+
+ *_sync = NULL;
+
+ mail_index_strmap_view_reset(sync->view);
+ mail_index_strmap_zero_terminate(sync->view);
+ i_free(sync);
+}
diff --git a/src/lib-index/mail-index-strmap.h b/src/lib-index/mail-index-strmap.h
new file mode 100644
index 0000000..c61afa6
--- /dev/null
+++ b/src/lib-index/mail-index-strmap.h
@@ -0,0 +1,81 @@
+#ifndef MAIL_INDEX_STRMAP_H
+#define MAIL_INDEX_STRMAP_H
+
+#include "hash2.h"
+
+struct mail_index;
+struct mail_index_view;
+
+struct mail_index_strmap_header {
+#define MAIL_INDEX_STRMAP_VERSION 1
+ uint8_t version;
+ uint8_t unused[3];
+
+ uint32_t uid_validity;
+};
+
+struct mail_index_strmap_rec {
+ uint32_t uid;
+ uint32_t ref_index;
+ /* unique index number for the string */
+ uint32_t str_idx;
+};
+ARRAY_DEFINE_TYPE(mail_index_strmap_rec, struct mail_index_strmap_rec);
+
+typedef bool
+mail_index_strmap_key_cmp_t(const char *key,
+ const struct mail_index_strmap_rec *rec,
+ void *context);
+/* Returns 1 if matches, 0 if not, -1 if one of the records is expunged and
+ the result can't be determined */
+typedef int
+mail_index_strmap_rec_cmp_t(const struct mail_index_strmap_rec *rec1,
+ const struct mail_index_strmap_rec *rec2,
+ void *context);
+/* called when string indexes are renumbered. idx_map[old_idx] = new_idx.
+ if new_idx is 0, the record was expunged. As a special case if count=0,
+ the strmap was reset. */
+typedef void mail_index_strmap_remap_t(const uint32_t *idx_map,
+ unsigned int old_count,
+ unsigned int new_count, void *context);
+
+struct mail_index_strmap *
+mail_index_strmap_init(struct mail_index *index, const char *suffix);
+void mail_index_strmap_deinit(struct mail_index_strmap **strmap);
+
+/* Returns strmap records and hash that can be used for read-only access.
+ The records array always terminates with a record containing zeros (but it's
+ not counted in the array count). */
+struct mail_index_strmap_view *
+mail_index_strmap_view_open(struct mail_index_strmap *strmap,
+ struct mail_index_view *idx_view,
+ mail_index_strmap_key_cmp_t *key_compare_cb,
+ mail_index_strmap_rec_cmp_t *rec_compare_cb,
+ mail_index_strmap_remap_t *remap_cb,
+ void *context,
+ const ARRAY_TYPE(mail_index_strmap_rec) **recs_r,
+ const struct hash2_table **hash_r);
+void mail_index_strmap_view_close(struct mail_index_strmap_view **view);
+void mail_index_strmap_view_set_corrupted(struct mail_index_strmap_view *view)
+ ATTR_COLD;
+
+/* Return the highest used string index. */
+uint32_t mail_index_strmap_view_get_highest_idx(struct mail_index_strmap_view *view);
+
+/* Synchronize strmap: Caller adds missing entries, expunged messages may be
+ removed internally and the changes are written to disk. Note that the strmap
+ recs/hash shouldn't be used until _sync_commit() is called, because the
+ string indexes may be renumbered if another process had already written the
+ same changes as us. */
+struct mail_index_strmap_view_sync *
+mail_index_strmap_view_sync_init(struct mail_index_strmap_view *view,
+ uint32_t *last_uid_r);
+void mail_index_strmap_view_sync_add(struct mail_index_strmap_view_sync *sync,
+ uint32_t uid, uint32_t ref_index,
+ const char *key);
+void mail_index_strmap_view_sync_add_unique(struct mail_index_strmap_view_sync *sync,
+ uint32_t uid, uint32_t ref_index);
+void mail_index_strmap_view_sync_commit(struct mail_index_strmap_view_sync **sync);
+void mail_index_strmap_view_sync_rollback(struct mail_index_strmap_view_sync **sync);
+
+#endif
diff --git a/src/lib-index/mail-index-sync-ext.c b/src/lib-index/mail-index-sync-ext.c
new file mode 100644
index 0000000..5d453f3
--- /dev/null
+++ b/src/lib-index/mail-index-sync-ext.c
@@ -0,0 +1,735 @@
+/* Copyright (c) 2004-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "buffer.h"
+#include "mail-index-view-private.h"
+#include "mail-index-sync-private.h"
+#include "mail-index-modseq.h"
+#include "mail-transaction-log.h"
+
+
+void mail_index_sync_init_expunge_handlers(struct mail_index_sync_map_ctx *ctx)
+{
+ const struct mail_index_ext *ext;
+ const struct mail_index_registered_ext *rext;
+ const uint32_t *id_map;
+ void **contexts;
+ struct mail_index_expunge_handler eh;
+ unsigned int ext_count, id_map_count;
+ unsigned int rext_count, context_count, count;
+ uint32_t idx_ext_id, map_ext_id;
+
+ if (!array_is_created(&ctx->view->map->extensions))
+ return;
+
+ i_zero(&eh);
+ if (array_is_created(&ctx->expunge_handlers))
+ array_clear(&ctx->expunge_handlers);
+ else
+ i_array_init(&ctx->expunge_handlers, 64);
+
+ rext = array_get(&ctx->view->index->extensions, &rext_count);
+ ext = array_get(&ctx->view->map->extensions, &ext_count);
+ id_map = array_get(&ctx->view->map->ext_id_map, &id_map_count);
+ contexts = array_get_modifiable(&ctx->extra_contexts, &context_count);
+
+ i_assert(context_count >= rext_count);
+ count = I_MIN(rext_count, id_map_count);
+ for (idx_ext_id = 0; idx_ext_id < count; idx_ext_id++) {
+ if (rext[idx_ext_id].expunge_handler == NULL)
+ continue;
+ map_ext_id = id_map[idx_ext_id];
+ if (map_ext_id == (uint32_t)-1)
+ continue;
+
+ eh.handler = rext[idx_ext_id].expunge_handler;
+ eh.sync_context = &contexts[idx_ext_id];
+ eh.record_offset = ext[map_ext_id].record_offset;
+ array_push_back(&ctx->expunge_handlers, &eh);
+ }
+ ctx->expunge_handlers_set = TRUE;
+ ctx->expunge_handlers_used = TRUE;
+}
+
+void
+mail_index_sync_deinit_expunge_handlers(struct mail_index_sync_map_ctx *ctx)
+{
+ const struct mail_index_expunge_handler *eh;
+
+ if (!array_is_created(&ctx->expunge_handlers))
+ return;
+
+ array_foreach(&ctx->expunge_handlers, eh) {
+ if (eh->sync_context != NULL)
+ eh->handler(ctx, NULL, eh->sync_context);
+ }
+ array_free(&ctx->expunge_handlers);
+}
+
+void mail_index_sync_init_handlers(struct mail_index_sync_map_ctx *ctx)
+{
+ unsigned int count;
+
+ if (!array_is_created(&ctx->view->map->extensions))
+ return;
+
+ /* set space for extra contexts */
+ count = array_count(&ctx->view->index->extensions);
+ i_assert(count > 0);
+
+ if (!array_is_created(&ctx->extra_contexts))
+ i_array_init(&ctx->extra_contexts, count);
+
+ /* make sure the extra_contexts contains everything */
+ (void)array_idx_get_space(&ctx->extra_contexts, count - 1);
+ /* we need to update the expunge handler list in case they had
+ already been called */
+ ctx->expunge_handlers_set = FALSE;
+}
+
+void mail_index_sync_deinit_handlers(struct mail_index_sync_map_ctx *ctx)
+{
+ if (array_is_created(&ctx->extra_contexts))
+ array_free(&ctx->extra_contexts);
+}
+
+static struct mail_index_ext_header *
+get_ext_header(struct mail_index_map *map, const struct mail_index_ext *ext)
+{
+ struct mail_index_ext_header *ext_hdr;
+ void *hdr_base;
+
+ /* do some kludgy jumping to get to it. */
+ hdr_base = buffer_get_modifiable_data(map->hdr_copy_buf, NULL);
+ ext_hdr = PTR_OFFSET(hdr_base, ext->ext_offset);
+ i_assert(memcmp((char *)(ext_hdr + 1),
+ ext->name, strlen(ext->name)) == 0);
+ return ext_hdr;
+}
+
+static int mail_index_ext_align_cmp(const void *p1, const void *p2)
+{
+ const struct mail_index_ext *const *e1 = p1, *const *e2 = p2;
+
+ return (int)(*e2)->record_align - (int)(*e1)->record_align;
+}
+
+static void sync_ext_reorder(struct mail_index_map *map, uint32_t ext_map_idx,
+ uint16_t old_ext_size)
+{
+ struct mail_index_ext *ext, **sorted;
+ struct mail_index_ext_header *ext_hdr;
+ uint16_t *old_offsets, *copy_sizes, min_align, max_align;
+ uint32_t offset, new_record_size, rec_idx;
+ unsigned int i, count;
+ const void *src;
+ buffer_t *new_buffer;
+ size_t new_buffer_size;
+
+ i_assert(MAIL_INDEX_MAP_IS_IN_MEMORY(map) && map->refcount == 1);
+
+ ext = array_get_modifiable(&map->extensions, &count);
+ i_assert(ext_map_idx < count);
+
+ /* @UNSAFE */
+ old_offsets = t_new(uint16_t, count);
+ copy_sizes = t_new(uint16_t, count);
+ sorted = t_new(struct mail_index_ext *, count);
+ for (i = 0; i < count; i++) {
+ old_offsets[i] = ext[i].record_offset;
+ copy_sizes[i] = ext[i].record_size;
+ ext[i].record_offset = 0;
+ sorted[i] = &ext[i];
+ }
+ qsort(sorted, count, sizeof(struct mail_index_ext *),
+ mail_index_ext_align_cmp);
+
+ if (copy_sizes[ext_map_idx] > old_ext_size) {
+ /* we are growing the extension record. remember this
+ so we don't write extra data while copying the record */
+ copy_sizes[ext_map_idx] = old_ext_size;
+ }
+
+ /* we simply try to use the extensions with largest alignment
+ requirement first. FIXME: if the extension sizes don't match
+ alignment, this may not give the minimal layout. */
+ offset = MAIL_INDEX_RECORD_MIN_SIZE;
+ max_align = sizeof(uint32_t);
+ for (;;) {
+ min_align = (uint16_t)-1;
+ for (i = 0; i < count; i++) {
+ if (sorted[i]->record_align > max_align)
+ max_align = sorted[i]->record_align;
+
+ if (sorted[i]->record_offset == 0 &&
+ sorted[i]->record_size > 0) {
+ if ((offset % sorted[i]->record_align) == 0)
+ break;
+ if (sorted[i]->record_align < min_align)
+ min_align = sorted[i]->record_align;
+ }
+ }
+ if (i == count) {
+ if (min_align == (uint16_t)-1) {
+ /* all done */
+ break;
+ }
+ /* we have to leave space here */
+ i_assert(min_align > 1 && min_align < (uint16_t)-1);
+ offset += min_align - (offset % min_align);
+ } else {
+ sorted[i]->record_offset = offset;
+ offset += sorted[i]->record_size;
+ }
+
+ i_assert(offset < (uint16_t)-1);
+ }
+
+ if ((offset % max_align) != 0) {
+ /* keep record size divisible with maximum alignment */
+ offset += max_align - (offset % max_align);
+ }
+ new_record_size = offset;
+ i_assert(new_record_size >= sizeof(struct mail_index_record));
+
+ /* copy the records to new buffer */
+ new_buffer_size = map->rec_map->records_count * new_record_size;
+ new_buffer = buffer_create_dynamic(default_pool, new_buffer_size);
+ src = map->rec_map->records;
+ offset = 0;
+ for (rec_idx = 0; rec_idx < map->rec_map->records_count; rec_idx++) {
+ /* write the base record */
+ buffer_write(new_buffer, offset, src,
+ sizeof(struct mail_index_record));
+
+ /* write extensions */
+ for (i = 0; i < count; i++) {
+ buffer_write(new_buffer, offset + ext[i].record_offset,
+ CONST_PTR_OFFSET(src, old_offsets[i]),
+ copy_sizes[i]);
+ }
+ src = CONST_PTR_OFFSET(src, map->hdr.record_size);
+ offset += new_record_size;
+ }
+
+ if (new_buffer->used != new_buffer_size) {
+ /* we didn't fully write the last record */
+ size_t space = new_buffer_size - new_buffer->used;
+ i_assert(space < new_record_size);
+ buffer_append_zero(new_buffer, space);
+ }
+
+ buffer_free(&map->rec_map->buffer);
+ map->rec_map->buffer = new_buffer;
+ map->rec_map->records =
+ buffer_get_modifiable_data(map->rec_map->buffer, NULL);
+ map->hdr.record_size = new_record_size;
+
+ /* update record offsets in headers */
+ for (i = 0; i < count; i++) {
+ ext_hdr = get_ext_header(map, &ext[i]);
+ ext_hdr->record_offset = ext[i].record_offset;
+ }
+}
+
+static void
+sync_ext_resize(const struct mail_transaction_ext_intro *u,
+ uint32_t ext_map_idx, struct mail_index_sync_map_ctx *ctx,
+ bool no_shrink)
+{
+ struct mail_index_map *map;
+ struct mail_index_ext *ext;
+ struct mail_index_ext_header *ext_hdr;
+ uint32_t old_padded_hdr_size, new_padded_hdr_size, old_record_size;
+ bool reorder = FALSE;
+
+ ext = array_idx_modifiable(&ctx->view->map->extensions, ext_map_idx);
+ old_padded_hdr_size = MAIL_INDEX_HEADER_SIZE_ALIGN(ext->hdr_size);
+ new_padded_hdr_size = MAIL_INDEX_HEADER_SIZE_ALIGN(u->hdr_size);
+
+ if (ext->record_align != u->record_align ||
+ ext->record_size != u->record_size) {
+ /* record changed */
+ } else if (new_padded_hdr_size < old_padded_hdr_size) {
+ /* header is shrunk. do we allow? */
+ if (no_shrink)
+ return;
+ } else if (ext->hdr_size == u->hdr_size) {
+ /* no changes */
+ return;
+ }
+ /* something changed. get ourself a new map before we start changing
+ anything in it. */
+ map = mail_index_sync_get_atomic_map(ctx);
+ /* ext was duplicated to the new map. */
+ ext = array_idx_modifiable(&map->extensions, ext_map_idx);
+
+ if (new_padded_hdr_size < old_padded_hdr_size) {
+ /* header shrank */
+ if (no_shrink)
+ new_padded_hdr_size = old_padded_hdr_size;
+ else {
+ buffer_delete(map->hdr_copy_buf,
+ ext->hdr_offset + new_padded_hdr_size,
+ old_padded_hdr_size - new_padded_hdr_size);
+ ext->hdr_size = u->hdr_size;
+ }
+ } else if (new_padded_hdr_size > old_padded_hdr_size) {
+ /* header grown */
+ buffer_insert_zero(map->hdr_copy_buf,
+ ext->hdr_offset + old_padded_hdr_size,
+ new_padded_hdr_size - old_padded_hdr_size);
+ ext->hdr_size = u->hdr_size;
+ } else {
+ if (ext->hdr_size != u->hdr_size) {
+ /* aligned sizes were the same, but the actual sizes
+ had changed */
+ ext->hdr_size = u->hdr_size;
+ }
+ }
+
+ if (ext->record_align < u->record_align ||
+ (ext->record_align > u->record_align && !no_shrink)) {
+ ext->record_align = u->record_align;
+ reorder = TRUE;
+ }
+
+ old_record_size = ext->record_size;
+ if (ext->record_size < u->record_size ||
+ (ext->record_size > u->record_size && !no_shrink)) {
+ ext->record_size = u->record_size;
+ reorder = TRUE;
+ }
+
+ i_assert((map->hdr_copy_buf->used % sizeof(uint64_t)) == 0);
+ map->hdr.header_size = map->hdr_copy_buf->used;
+
+ ext_hdr = get_ext_header(map, ext);
+ ext_hdr->reset_id = ext->reset_id;
+ ext_hdr->hdr_size = ext->hdr_size;
+ ext_hdr->record_offset = ext->record_offset;
+ ext_hdr->record_size = ext->record_size;
+ ext_hdr->record_align = ext->record_align;
+
+ if (new_padded_hdr_size != old_padded_hdr_size) {
+ /* move all hdr_offset of all extensions after this one */
+ unsigned int i, count = array_count(&map->extensions);
+ ssize_t diff = (ssize_t)new_padded_hdr_size -
+ (ssize_t)old_padded_hdr_size;
+
+ ext = array_front_modifiable(&map->extensions);
+ for (i = ext_map_idx + 1; i < count; i++) {
+ ext[i].ext_offset += diff;
+ ext[i].hdr_offset += diff;
+ }
+ }
+
+ if (reorder)
+ sync_ext_reorder(map, ext_map_idx, old_record_size);
+}
+
+static bool
+mail_index_sync_ext_unknown_complain(struct mail_index_sync_map_ctx *ctx,
+ uint32_t ext_map_idx)
+{
+ unsigned char *p;
+
+ if (ext_map_idx >= 1024) {
+ /* don't try to track too high values */
+ return TRUE;
+ }
+
+ if (ctx->unknown_extensions == NULL) {
+ ctx->unknown_extensions =
+ buffer_create_dynamic(default_pool, ext_map_idx + 8);
+ }
+ p = buffer_get_space_unsafe(ctx->unknown_extensions, ext_map_idx, 1);
+ if (*p != 0) {
+ /* we've already complained once */
+ return FALSE;
+ }
+ *p = 1;
+ return TRUE;
+}
+
+static void
+mail_index_sync_ext_init_new(struct mail_index_sync_map_ctx *ctx,
+ const char *name,
+ const struct mail_index_ext_header *ext_hdr,
+ uint32_t *ext_map_idx_r)
+{
+ struct mail_index_map *map;
+ const struct mail_index_ext *ext;
+ buffer_t *hdr_buf;
+ uint32_t ext_map_idx;
+
+ i_assert(mail_index_ext_name_is_valid(name));
+
+ /* be sure to get a unique mapping before we modify the extensions,
+ otherwise other map users will see the new extension but not the
+ data records that sync_ext_reorder() adds. */
+ map = mail_index_sync_get_atomic_map(ctx);
+
+ hdr_buf = map->hdr_copy_buf;
+ i_assert(hdr_buf->used == map->hdr.header_size);
+
+ if (MAIL_INDEX_HEADER_SIZE_ALIGN(hdr_buf->used) != hdr_buf->used) {
+ /* we need to add padding between base header and extensions */
+ buffer_append_zero(hdr_buf,
+ MAIL_INDEX_HEADER_SIZE_ALIGN(hdr_buf->used) -
+ hdr_buf->used);
+ }
+
+ /* register record offset initially using zero,
+ sync_ext_reorder() will fix it. */
+ ext_map_idx = mail_index_map_register_ext(map, name, hdr_buf->used,
+ ext_hdr);
+ ext = array_idx(&map->extensions, ext_map_idx);
+
+ /* <ext_hdr> <name> [padding] [header data] */
+ i_assert(ext_hdr->name_size == strlen(name));
+ buffer_append(hdr_buf, ext_hdr, sizeof(*ext_hdr));
+ buffer_append(hdr_buf, name, ext_hdr->name_size);
+ /* header must begin and end in correct alignment */
+ buffer_append_zero(hdr_buf,
+ MAIL_INDEX_HEADER_SIZE_ALIGN(hdr_buf->used) - hdr_buf->used +
+ MAIL_INDEX_HEADER_SIZE_ALIGN(ext->hdr_size));
+ i_assert(hdr_buf->used ==
+ ext->hdr_offset + MAIL_INDEX_HEADER_SIZE_ALIGN(ext->hdr_size));
+ i_assert((hdr_buf->used % sizeof(uint64_t)) == 0);
+
+ map->hdr.header_size = hdr_buf->used;
+
+ mail_index_sync_init_handlers(ctx);
+ sync_ext_reorder(map, ext_map_idx, 0);
+ i_assert(ext->record_offset != 0 || ext->record_size == 0);
+
+ *ext_map_idx_r = ext_map_idx;
+}
+
+int mail_index_sync_ext_intro(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_ext_intro *u)
+{
+ struct mail_index_map *map = ctx->view->map;
+ struct mail_index_ext_header ext_hdr;
+ const struct mail_index_ext *ext;
+ const char *name, *error;
+ uint32_t ext_map_idx;
+ bool no_shrink;
+
+ /* default to ignoring the following extension updates in case this
+ intro is corrupted */
+ ctx->cur_ext_map_idx = (uint32_t)-2;
+ ctx->cur_ext_ignore = TRUE;
+ ctx->cur_ext_record_size = 0;
+
+ if (u->ext_id != (uint32_t)-1 &&
+ (!array_is_created(&map->extensions) ||
+ u->ext_id >= array_count(&map->extensions))) {
+ /* The extension ID is unknown in this map. */
+ if (map->hdr.log_file_seq == 0) {
+ /* This map was generated by
+ view_sync_get_log_lost_changes(). There's no need to
+ update any extensions, because they won't be used
+ anyway. Any extension lookups will be accessed via
+ the latest index map. */
+ i_assert(map->rec_map != ctx->view->index->map->rec_map);
+ return 1;
+ }
+ if (!mail_index_sync_ext_unknown_complain(ctx, u->ext_id))
+ return -1;
+ mail_index_sync_set_corrupted(ctx,
+ "Extension introduction for unknown id %u", u->ext_id);
+ return -1;
+ }
+
+ if (u->ext_id == (uint32_t)-1 && u->name_size == 0) {
+ mail_index_sync_set_corrupted(ctx,
+ "Extension introduction without id or name");
+ return -1;
+ }
+
+ if (u->ext_id != (uint32_t)-1) {
+ name = NULL;
+ ext_map_idx = u->ext_id;
+ } else {
+ name = t_strndup(u + 1, u->name_size);
+ if (!mail_index_map_lookup_ext(map, name, &ext_map_idx))
+ ext_map_idx = (uint32_t)-1;
+ }
+ if (ext_map_idx == (uint32_t)-1)
+ ext = NULL;
+ else {
+ ext = array_idx(&map->extensions, ext_map_idx);
+ name = ext->name;
+ }
+ i_assert(name != NULL);
+
+ if (!ctx->internal_update &&
+ strcmp(name, MAIL_INDEX_EXT_KEYWORDS) == 0) {
+ /* Keyword extension is handled internally by the keyword
+ code. Any attempt to modify them directly could cause
+ assert-crashes later, so prevent them immediately. */
+ mail_index_sync_set_corrupted(ctx,
+ "Extension introduction for keywords");
+ return -1;
+ }
+
+ i_zero(&ext_hdr);
+ ext_hdr.name_size = strlen(name);
+ ext_hdr.reset_id = u->reset_id;
+ ext_hdr.hdr_size = u->hdr_size;
+ ext_hdr.record_size = u->record_size;
+ ext_hdr.record_align = u->record_align;
+ no_shrink = (u->flags & MAIL_TRANSACTION_EXT_INTRO_FLAG_NO_SHRINK) != 0;
+
+ /* make sure the header looks valid before doing anything with it */
+ if (mail_index_map_ext_hdr_check(&map->hdr, &ext_hdr,
+ name, &error) < 0) {
+ mail_index_sync_set_corrupted(ctx,
+ "Broken extension introduction: %s", error);
+ return -1;
+ }
+
+ ctx->cur_ext_record_size = u->record_size;
+ if (ext != NULL) {
+ /* exists already */
+ if (u->reset_id == ext->reset_id) {
+ /* check if we need to resize anything */
+ sync_ext_resize(u, ext_map_idx, ctx, no_shrink);
+ ctx->cur_ext_ignore = FALSE;
+ } else {
+ /* extension was reset and this transaction hadn't
+ yet seen it. ignore this update (except for
+ resets). */
+ ctx->cur_ext_ignore = TRUE;
+ }
+
+ ctx->cur_ext_map_idx = ext_map_idx;
+ return 1;
+ }
+
+ mail_index_sync_ext_init_new(ctx, name, &ext_hdr, &ext_map_idx);
+
+ ctx->cur_ext_ignore = FALSE;
+ ctx->cur_ext_map_idx = ctx->internal_update ?
+ (uint32_t)-1 : ext_map_idx;
+ return 1;
+}
+
+static void mail_index_sync_ext_clear(struct mail_index_view *view,
+ struct mail_index_map *map,
+ struct mail_index_ext *ext)
+{
+ struct mail_index_record *rec;
+ uint32_t seq;
+
+ memset(buffer_get_space_unsafe(map->hdr_copy_buf, ext->hdr_offset,
+ ext->hdr_size), 0, ext->hdr_size);
+ i_assert(map->hdr_copy_buf->used == map->hdr.header_size);
+
+ for (seq = 1; seq <= view->map->rec_map->records_count; seq++) {
+ rec = MAIL_INDEX_REC_AT_SEQ(view->map, seq);
+ memset(PTR_OFFSET(rec, ext->record_offset), 0,
+ ext->record_size);
+ }
+}
+
+int mail_index_sync_ext_reset(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_ext_reset *u)
+{
+ struct mail_index_map *map;
+ struct mail_index_ext_header *ext_hdr;
+ struct mail_index_ext *ext;
+
+ if (ctx->cur_ext_map_idx == (uint32_t)-1) {
+ mail_index_sync_set_corrupted(ctx,
+ "Extension reset without intro prefix");
+ return -1;
+ }
+ if (ctx->cur_ext_map_idx == (uint32_t)-2 && ctx->cur_ext_ignore) {
+ /* previous extension intro was broken */
+ return -1;
+ }
+ /* since we're resetting the extension, don't check cur_ext_ignore */
+
+ /* a new index file will be created, so the old data won't be
+ accidentally used by other processes. */
+ map = mail_index_sync_get_atomic_map(ctx);
+
+ ext = array_idx_modifiable(&map->extensions, ctx->cur_ext_map_idx);
+ ext->reset_id = u->new_reset_id;
+
+ if (u->preserve_data == 0)
+ mail_index_sync_ext_clear(ctx->view, map, ext);
+
+ ext_hdr = get_ext_header(map, ext);
+ ext_hdr->reset_id = u->new_reset_id;
+ return 1;
+}
+
+int mail_index_sync_ext_hdr_update(struct mail_index_sync_map_ctx *ctx,
+ uint32_t offset, uint32_t size,
+ const void *data)
+{
+ struct mail_index_map *map = ctx->view->map;
+ const struct mail_index_ext *ext;
+
+ if (ctx->cur_ext_map_idx == (uint32_t)-1) {
+ mail_index_sync_set_corrupted(ctx,
+ "Extension header update without intro prefix");
+ return -1;
+ }
+ if (ctx->cur_ext_ignore)
+ return 1;
+
+ ext = array_idx(&map->extensions, ctx->cur_ext_map_idx);
+ if (offset + size > ext->hdr_size) {
+ mail_index_sync_set_corrupted(ctx,
+ "Extension header update points outside header size");
+ return -1;
+ }
+
+ buffer_write(map->hdr_copy_buf, ext->hdr_offset + offset, data, size);
+ i_assert(map->hdr_copy_buf->used == map->hdr.header_size);
+
+ if (ext->index_idx == ctx->view->index->modseq_ext_id)
+ mail_index_modseq_hdr_update(ctx->modseq_ctx);
+ return 1;
+}
+
+int
+mail_index_sync_ext_rec_update(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_ext_rec_update *u)
+{
+ struct mail_index_view *view = ctx->view;
+ struct mail_index_record *rec;
+ const struct mail_index_ext *ext;
+ void *old_data;
+ uint32_t seq;
+
+ i_assert(ctx->cur_ext_map_idx != (uint32_t)-1);
+ i_assert(!ctx->cur_ext_ignore);
+
+ if (u->uid == 0 || u->uid >= view->map->hdr.next_uid) {
+ mail_index_sync_set_corrupted(ctx,
+ "Extension record update for invalid uid=%u", u->uid);
+ return -1;
+ }
+
+ if (!mail_index_lookup_seq(view, u->uid, &seq))
+ return 1;
+
+ ext = array_idx(&view->map->extensions, ctx->cur_ext_map_idx);
+ i_assert(ext->record_offset + ctx->cur_ext_record_size <=
+ view->map->hdr.record_size);
+
+ rec = MAIL_INDEX_REC_AT_SEQ(view->map, seq);
+ old_data = PTR_OFFSET(rec, ext->record_offset);
+
+ /* @UNSAFE */
+ memcpy(old_data, u + 1, ctx->cur_ext_record_size);
+ if (ctx->cur_ext_record_size < ext->record_size) {
+ memset(PTR_OFFSET(old_data, ctx->cur_ext_record_size), 0,
+ ext->record_size - ctx->cur_ext_record_size);
+ }
+ return 1;
+}
+
+int
+mail_index_sync_ext_atomic_inc(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_ext_atomic_inc *u)
+{
+ struct mail_index_view *view = ctx->view;
+ struct mail_index_record *rec;
+ const struct mail_index_ext *ext;
+ void *data;
+ uint32_t seq;
+ uint64_t min_value, max_value, orig_num;
+
+ i_assert(ctx->cur_ext_map_idx != (uint32_t)-1);
+ i_assert(!ctx->cur_ext_ignore);
+
+ if (u->uid == 0 || u->uid >= view->map->hdr.next_uid) {
+ mail_index_sync_set_corrupted(ctx,
+ "Extension record inc for invalid uid=%u", u->uid);
+ return -1;
+ }
+
+ if (!mail_index_lookup_seq(view, u->uid, &seq))
+ return 1;
+
+ ext = array_idx(&view->map->extensions, ctx->cur_ext_map_idx);
+ i_assert(ext->record_offset + ctx->cur_ext_record_size <=
+ view->map->hdr.record_size);
+
+ rec = MAIL_INDEX_REC_AT_SEQ(view->map, seq);
+ data = PTR_OFFSET(rec, ext->record_offset);
+
+ min_value = u->diff >= 0 ? 0 : (uint64_t)(-(int64_t)u->diff);
+
+ max_value = ctx->cur_ext_record_size == 8 ? (uint64_t)-1 :
+ ((uint64_t)1 << (ctx->cur_ext_record_size*8)) - 1;
+ if (u->diff <= 0) {
+ /* skip */
+ } else if (max_value >= (uint32_t)u->diff) {
+ max_value -= u->diff;
+ } else {
+ mail_index_sync_set_corrupted(ctx,
+ "Extension record inc diff=%d larger than max value=%u "
+ "(uid=%u)", u->diff, (unsigned int)max_value, u->uid);
+ return -1;
+ }
+
+ switch (ctx->cur_ext_record_size) {
+ case 1: {
+ uint8_t *num = data;
+
+ orig_num = *num;
+ if (orig_num >= min_value && orig_num <= max_value)
+ *num += u->diff;
+ break;
+ }
+ case 2: {
+ uint16_t *num = data;
+ orig_num = *num;
+ if (orig_num >= min_value && orig_num <= max_value)
+ *num += u->diff;
+ break;
+ }
+ case 4: {
+ uint32_t *num = data;
+ orig_num = *num;
+ if (orig_num >= min_value && orig_num <= max_value)
+ *num += u->diff;
+ break;
+ }
+ case 8: {
+ uint64_t *num = data;
+ orig_num = *num;
+ if (orig_num >= min_value && orig_num <= max_value)
+ *num += u->diff;
+ break;
+ }
+ default:
+ mail_index_sync_set_corrupted(ctx,
+ "Extension record inc with invalid size=%u",
+ ctx->cur_ext_record_size);
+ return -1;
+ }
+ if (orig_num < min_value) {
+ mail_index_sync_set_corrupted(ctx,
+ "Extension record inc drops number below zero "
+ "(uid=%u, diff=%d, orig=%"PRIu64")",
+ u->uid, u->diff, orig_num);
+ return -1;
+ } else if (orig_num > max_value) {
+ mail_index_sync_set_corrupted(ctx,
+ "Extension record inc overflows number "
+ "(uid=%u, diff=%d, orig=%"PRIu64")",
+ u->uid, u->diff, orig_num);
+ return -1;
+ }
+ return 1;
+}
diff --git a/src/lib-index/mail-index-sync-keywords.c b/src/lib-index/mail-index-sync-keywords.c
new file mode 100644
index 0000000..2c45156
--- /dev/null
+++ b/src/lib-index/mail-index-sync-keywords.c
@@ -0,0 +1,347 @@
+/* Copyright (c) 2004-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "buffer.h"
+#include "mail-index-modseq.h"
+#include "mail-index-view-private.h"
+#include "mail-index-sync-private.h"
+#include "mail-transaction-log.h"
+
+static bool
+keyword_lookup(struct mail_index_sync_map_ctx *ctx,
+ const char *keyword_name, unsigned int *idx_r)
+{
+ struct mail_index_map *map = ctx->view->map;
+ const unsigned int *idx_map;
+ unsigned int i, count, keyword_idx;
+
+ if (array_is_created(&map->keyword_idx_map) &&
+ mail_index_keyword_lookup(ctx->view->index, keyword_name,
+ &keyword_idx)) {
+ /* FIXME: slow. maybe create index -> file mapping as well */
+ idx_map = array_get(&map->keyword_idx_map, &count);
+ for (i = 0; i < count; i++) {
+ if (idx_map[i] == keyword_idx) {
+ *idx_r = i;
+ return TRUE;
+ }
+ }
+ }
+ return FALSE;
+}
+
+static buffer_t *
+keywords_get_header_buf(struct mail_index_map *map,
+ const struct mail_index_ext *ext,
+ unsigned int new_count, unsigned int *keywords_count_r,
+ size_t *rec_offset_r, size_t *name_offset_root_r,
+ size_t *name_offset_r)
+{
+ buffer_t *buf;
+ const struct mail_index_keyword_header *kw_hdr;
+ const struct mail_index_keyword_header_rec *kw_rec;
+ const char *name;
+ struct mail_index_keyword_header new_kw_hdr;
+ uint32_t offset;
+
+ kw_hdr = MAIL_INDEX_MAP_HDR_OFFSET(map, ext->hdr_offset);
+ kw_rec = (const void *)(kw_hdr + 1);
+ name = (const char *)(kw_rec + kw_hdr->keywords_count);
+
+ if (kw_hdr->keywords_count == 0)
+ return NULL;
+
+ i_assert((size_t)(name - (const char *)kw_hdr) < ext->hdr_size);
+
+ new_kw_hdr = *kw_hdr;
+ new_kw_hdr.keywords_count += new_count;
+ *keywords_count_r = new_kw_hdr.keywords_count;
+
+ offset = kw_rec[kw_hdr->keywords_count-1].name_offset;
+ offset += strlen(name + offset) + 1;
+
+ buf = t_buffer_create(512);
+ buffer_append(buf, &new_kw_hdr, sizeof(new_kw_hdr));
+ buffer_append(buf, kw_rec, sizeof(*kw_rec) * kw_hdr->keywords_count);
+ *rec_offset_r = buf->used;
+ buffer_write(buf, buf->used + sizeof(*kw_rec) * new_count,
+ name, offset);
+ *name_offset_root_r = buf->used;
+ *name_offset_r = offset;
+ return buf;
+}
+
+static void keywords_ext_register(struct mail_index_sync_map_ctx *ctx,
+ uint32_t ext_map_idx, uint32_t reset_id,
+ uint32_t hdr_size, uint32_t keywords_count)
+{
+ buffer_t ext_intro_buf;
+ struct mail_transaction_ext_intro *u;
+ unsigned char ext_intro_data[sizeof(*u) +
+ sizeof(MAIL_INDEX_EXT_KEYWORDS)-1];
+
+ i_assert(keywords_count > 0);
+
+ buffer_create_from_data(&ext_intro_buf, ext_intro_data,
+ sizeof(ext_intro_data));
+
+ u = buffer_append_space_unsafe(&ext_intro_buf, sizeof(*u));
+ u->ext_id = ext_map_idx;
+ u->reset_id = reset_id;
+ u->hdr_size = hdr_size;
+ u->record_size = (keywords_count + CHAR_BIT - 1) / CHAR_BIT;
+ if ((u->record_size % 4) != 0) {
+ /* since we aren't properly aligned anyway,
+ reserve one extra byte for future */
+ u->record_size++;
+ }
+ u->record_align = 1;
+
+ if (ext_map_idx == (uint32_t)-1) {
+ u->name_size = strlen(MAIL_INDEX_EXT_KEYWORDS);
+ buffer_append(&ext_intro_buf, MAIL_INDEX_EXT_KEYWORDS,
+ u->name_size);
+ }
+
+ ctx->internal_update = TRUE;
+ if (mail_index_sync_ext_intro(ctx, u) < 0)
+ i_panic("Keyword extension growing failed");
+ ctx->internal_update = FALSE;
+}
+
+static void
+keywords_header_add(struct mail_index_sync_map_ctx *ctx,
+ const char *keyword_name, unsigned int *keyword_idx_r)
+{
+ struct mail_index_map *map;
+ const struct mail_index_ext *ext = NULL;
+ struct mail_index_keyword_header *kw_hdr;
+ struct mail_index_keyword_header_rec kw_rec;
+ uint32_t ext_map_idx;
+ buffer_t *buf = NULL;
+ size_t keyword_len, rec_offset, name_offset, name_offset_root;
+ unsigned int keywords_count;
+
+ /* if we crash in the middle of writing the header, the
+ keywords are more or less corrupted. avoid that by
+ making sure the header is updated atomically. */
+ map = mail_index_sync_get_atomic_map(ctx);
+
+ if (!mail_index_map_lookup_ext(map, MAIL_INDEX_EXT_KEYWORDS,
+ &ext_map_idx))
+ ext_map_idx = (uint32_t)-1;
+ else {
+ /* update existing header */
+ ext = array_idx(&map->extensions, ext_map_idx);
+ buf = keywords_get_header_buf(map, ext, 1, &keywords_count,
+ &rec_offset, &name_offset_root,
+ &name_offset);
+ }
+
+ if (buf == NULL) {
+ /* create new / replace broken header */
+ const unsigned int initial_keywords_count = 1;
+
+ buf = t_buffer_create(512);
+ kw_hdr = buffer_append_space_unsafe(buf, sizeof(*kw_hdr));
+ kw_hdr->keywords_count = initial_keywords_count;
+
+ keywords_count = kw_hdr->keywords_count;
+ rec_offset = buf->used;
+ name_offset_root = rec_offset +
+ initial_keywords_count * sizeof(kw_rec);
+ name_offset = 0;
+ }
+
+ /* add the keyword */
+ i_zero(&kw_rec);
+ kw_rec.name_offset = name_offset;
+
+ keyword_len = strlen(keyword_name) + 1;
+ buffer_write(buf, rec_offset, &kw_rec, sizeof(kw_rec));
+ buffer_write(buf, name_offset_root, keyword_name, keyword_len);
+
+ rec_offset += sizeof(kw_rec);
+ kw_rec.name_offset += keyword_len;
+ name_offset_root += keyword_len;
+
+ if ((buf->used % 4) != 0)
+ buffer_append_zero(buf, 4 - (buf->used % 4));
+
+ if (ext == NULL || buf->used > ext->hdr_size ||
+ (uint32_t)ext->record_size * CHAR_BIT < keywords_count) {
+ /* if we need to grow the buffer, add some padding */
+ buffer_append_zero(buf, 128);
+ keywords_ext_register(ctx, ext_map_idx,
+ ext == NULL ? 0 : ext->reset_id,
+ buf->used, keywords_count);
+
+ /* map may have changed */
+ map = ctx->view->map;
+
+ if (!mail_index_map_lookup_ext(map, MAIL_INDEX_EXT_KEYWORDS,
+ &ext_map_idx))
+ i_unreached();
+ ext = array_idx(&map->extensions, ext_map_idx);
+
+ i_assert(ext->hdr_size == buf->used);
+ }
+
+ buffer_copy(map->hdr_copy_buf, ext->hdr_offset, buf, 0, buf->used);
+ i_assert(map->hdr_copy_buf->used == map->hdr.header_size);
+
+ if (mail_index_map_parse_keywords(map) < 0)
+ i_panic("Keyword update corrupted keywords header");
+
+ *keyword_idx_r = keywords_count - 1;
+ i_assert(*keyword_idx_r / CHAR_BIT < ext->record_size);
+}
+
+static int
+keywords_update_records(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_index_ext *ext,
+ unsigned int keyword_idx, enum modify_type type,
+ uint32_t uid1, uint32_t uid2)
+{
+ struct mail_index_view *view = ctx->view;
+ struct mail_index_record *rec;
+ unsigned char *data, data_mask;
+ unsigned int data_offset;
+ uint32_t seq1, seq2;
+
+ i_assert(keyword_idx != UINT_MAX);
+
+ if (!mail_index_lookup_seq_range(view, uid1, uid2, &seq1, &seq2))
+ return 1;
+
+ mail_index_modseq_update_keyword(ctx->modseq_ctx, keyword_idx,
+ seq1, seq2);
+
+ data_offset = keyword_idx / CHAR_BIT;
+ data_mask = 1 << (keyword_idx % CHAR_BIT);
+
+ i_assert(data_offset < ext->record_size);
+ data_offset += ext->record_offset;
+
+ i_assert(data_offset >= MAIL_INDEX_RECORD_MIN_SIZE);
+
+ switch (type) {
+ case MODIFY_ADD:
+ for (; seq1 <= seq2; seq1++) {
+ rec = MAIL_INDEX_REC_AT_SEQ(view->map, seq1);
+ data = PTR_OFFSET(rec, data_offset);
+ *data |= data_mask;
+ }
+ break;
+ case MODIFY_REMOVE:
+ data_mask = (unsigned char)~data_mask;
+ for (; seq1 <= seq2; seq1++) {
+ rec = MAIL_INDEX_REC_AT_SEQ(view->map, seq1);
+ data = PTR_OFFSET(rec, data_offset);
+ *data &= data_mask;
+ }
+ break;
+ default:
+ i_unreached();
+ }
+ return 1;
+}
+
+int mail_index_sync_keywords(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_header *hdr,
+ const struct mail_transaction_keyword_update *rec)
+{
+ struct mail_index_view *view = ctx->view;
+ const char *keyword_name;
+ const struct mail_index_ext *ext;
+ const uint32_t *uid, *end;
+ uint32_t seqset_offset, ext_map_idx;
+ unsigned int keyword_idx;
+ int ret;
+
+ i_assert(rec->name_size > 0);
+
+ seqset_offset = sizeof(*rec) + rec->name_size;
+ if ((seqset_offset % 4) != 0)
+ seqset_offset += 4 - (seqset_offset % 4);
+ i_assert(seqset_offset < hdr->size);
+
+ uid = CONST_PTR_OFFSET(rec, seqset_offset);
+ end = CONST_PTR_OFFSET(rec, hdr->size);
+
+ keyword_name = t_strndup(rec + 1, rec->name_size);
+ if (!keyword_lookup(ctx, keyword_name, &keyword_idx))
+ keywords_header_add(ctx, keyword_name, &keyword_idx);
+
+ /* if the keyword wasn't found, the "keywords" extension was created.
+ if it was found, the record size should already be correct, but
+ in case it isn't just fix it ourself. */
+ if (!mail_index_map_lookup_ext(view->map, MAIL_INDEX_EXT_KEYWORDS,
+ &ext_map_idx))
+ i_unreached();
+
+ ext = array_idx(&view->map->extensions, ext_map_idx);
+ if (keyword_idx / CHAR_BIT >= ext->record_size) {
+ if (rec->modify_type == MODIFY_REMOVE) {
+ /* nothing to do */
+ return 1;
+ }
+
+ /* grow the record size */
+ keywords_ext_register(ctx, ext_map_idx, ext->reset_id,
+ ext->hdr_size,
+ array_count(&view->map->keyword_idx_map));
+ if (!mail_index_map_lookup_ext(view->map,
+ MAIL_INDEX_EXT_KEYWORDS,
+ &ext_map_idx))
+ i_unreached();
+ ext = array_idx(&view->map->extensions, ext_map_idx);
+ }
+
+ while (uid+2 <= end) {
+ ret = keywords_update_records(ctx, ext, keyword_idx,
+ rec->modify_type,
+ uid[0], uid[1]);
+ if (ret <= 0)
+ return ret;
+
+ uid += 2;
+ }
+
+ return 1;
+}
+
+int
+mail_index_sync_keywords_reset(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_header *hdr,
+ const struct mail_transaction_keyword_reset *r)
+{
+ struct mail_index_map *map = ctx->view->map;
+ struct mail_index_record *rec;
+ const struct mail_index_ext *ext;
+ const struct mail_transaction_keyword_reset *end;
+ uint32_t ext_map_idx, seq1, seq2;
+
+ if (!mail_index_map_lookup_ext(map, MAIL_INDEX_EXT_KEYWORDS,
+ &ext_map_idx)) {
+ /* nothing to do */
+ return 1;
+ }
+
+ ext = array_idx(&map->extensions, ext_map_idx);
+ end = CONST_PTR_OFFSET(r, hdr->size);
+ for (; r != end; r++) {
+ if (!mail_index_lookup_seq_range(ctx->view, r->uid1, r->uid2,
+ &seq1, &seq2))
+ continue;
+
+ mail_index_modseq_reset_keywords(ctx->modseq_ctx, seq1, seq2);
+ for (; seq1 <= seq2; seq1++) {
+ rec = MAIL_INDEX_REC_AT_SEQ(map, seq1);
+ memset(PTR_OFFSET(rec, ext->record_offset),
+ 0, ext->record_size);
+ }
+ }
+ return 1;
+}
diff --git a/src/lib-index/mail-index-sync-private.h b/src/lib-index/mail-index-sync-private.h
new file mode 100644
index 0000000..094c83d
--- /dev/null
+++ b/src/lib-index/mail-index-sync-private.h
@@ -0,0 +1,104 @@
+#ifndef MAIL_INDEX_SYNC_PRIVATE_H
+#define MAIL_INDEX_SYNC_PRIVATE_H
+
+#include "mail-index-private.h"
+#include "mail-transaction-log.h"
+
+struct uid_range {
+ uint32_t uid1, uid2;
+};
+ARRAY_DEFINE_TYPE(uid_range, struct uid_range);
+
+struct mail_index_sync_list {
+ const ARRAY_TYPE(uid_range) *array;
+ unsigned int idx;
+ unsigned int keyword_idx:31;
+ bool keyword_remove:1;
+};
+
+struct mail_index_expunge_handler {
+ mail_index_expunge_handler_t *handler;
+ void *context;
+ void **sync_context;
+ uint32_t record_offset;
+};
+
+struct mail_index_sync_map_ctx {
+ struct mail_index_view *view;
+ struct mail_index_modseq_sync *modseq_ctx;
+ uint32_t cur_ext_map_idx;
+ uint32_t cur_ext_record_size;
+
+ uint32_t ext_intro_seq;
+ uoff_t ext_intro_offset, ext_intro_end_offset;
+
+ ARRAY(struct mail_index_expunge_handler) expunge_handlers;
+ ARRAY(void *) extra_contexts;
+ buffer_t *unknown_extensions;
+
+ enum mail_index_sync_handler_type type;
+
+ bool sync_handlers_initialized:1;
+ bool expunge_handlers_set:1;
+ bool expunge_handlers_used:1;
+ bool cur_ext_ignore:1;
+ bool internal_update:1; /* used by keywords for ext_intro */
+ bool errors:1;
+};
+
+extern struct mail_transaction_map_functions mail_index_map_sync_funcs;
+
+void mail_index_sync_map_init(struct mail_index_sync_map_ctx *sync_map_ctx,
+ struct mail_index_view *view,
+ enum mail_index_sync_handler_type type);
+void mail_index_sync_map_deinit(struct mail_index_sync_map_ctx *sync_map_ctx);
+bool mail_index_sync_map_want_index_reopen(struct mail_index_map *map,
+ enum mail_index_sync_handler_type type);
+int mail_index_sync_map(struct mail_index_map **_map,
+ enum mail_index_sync_handler_type type,
+ const char **reason_r);
+
+int mail_index_sync_record(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_header *hdr,
+ const void *data);
+
+struct mail_index_map *
+mail_index_sync_get_atomic_map(struct mail_index_sync_map_ctx *ctx);
+
+void mail_index_sync_init_expunge_handlers(struct mail_index_sync_map_ctx *ctx);
+void
+mail_index_sync_deinit_expunge_handlers(struct mail_index_sync_map_ctx *ctx);
+void mail_index_sync_init_handlers(struct mail_index_sync_map_ctx *ctx);
+void mail_index_sync_deinit_handlers(struct mail_index_sync_map_ctx *ctx);
+
+int mail_index_sync_ext_intro(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_ext_intro *u);
+int mail_index_sync_ext_reset(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_ext_reset *u);
+int mail_index_sync_ext_hdr_update(struct mail_index_sync_map_ctx *ctx,
+ uint32_t offset, uint32_t size,
+ const void *data);
+int
+mail_index_sync_ext_rec_update(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_ext_rec_update *u);
+int
+mail_index_sync_ext_atomic_inc(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_ext_atomic_inc *u);
+
+int mail_index_sync_keywords(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_header *hdr,
+ const struct mail_transaction_keyword_update *rec);
+int
+mail_index_sync_keywords_reset(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_header *hdr,
+ const struct mail_transaction_keyword_reset *r);
+
+void mail_index_sync_set_corrupted(struct mail_index_sync_map_ctx *ctx,
+ const char *fmt, ...)
+ ATTR_FORMAT(2, 3) ATTR_COLD;
+
+#ifdef DEBUG
+void mail_index_map_check(struct mail_index_map *map);
+#endif
+
+#endif
diff --git a/src/lib-index/mail-index-sync-update.c b/src/lib-index/mail-index-sync-update.c
new file mode 100644
index 0000000..0c37995
--- /dev/null
+++ b/src/lib-index/mail-index-sync-update.c
@@ -0,0 +1,1087 @@
+/* Copyright (c) 2004-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "ioloop.h"
+#include "array.h"
+#include "mmap-util.h"
+#include "mail-index-modseq.h"
+#include "mail-index-view-private.h"
+#include "mail-index-sync-private.h"
+#include "mail-transaction-log.h"
+#include "mail-transaction-log-private.h"
+
+/* If we have less than this many bytes to sync from log file, don't bother
+ reading the main index */
+#define MAIL_INDEX_SYNC_MIN_READ_INDEX_SIZE 2048
+
+static void
+mail_index_sync_update_log_offset(struct mail_index_sync_map_ctx *ctx,
+ struct mail_index_map *map, bool eol)
+{
+ uint32_t prev_seq;
+ uoff_t prev_offset;
+
+ mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
+ &prev_seq, &prev_offset);
+ if (prev_seq == 0) {
+ /* handling lost changes in view syncing */
+ return;
+ }
+
+ if (!eol) {
+ if (prev_offset == ctx->ext_intro_end_offset &&
+ prev_seq == ctx->ext_intro_seq) {
+ /* previous transaction was an extension introduction.
+ we probably came here from
+ mail_index_sync_ext_reset(). if there are any more
+ views which want to continue syncing it needs the
+ intro. so back up a bit more.
+
+ don't do this in case the last transaction in the
+ log is the extension intro, so we don't keep trying
+ to sync it over and over again. */
+ prev_offset = ctx->ext_intro_offset;
+ }
+ map->hdr.log_file_seq = prev_seq;
+ } else {
+ i_assert(ctx->view->index->log->head->hdr.file_seq == prev_seq);
+ if (map->hdr.log_file_seq != prev_seq) {
+ map->hdr.log_file_seq = prev_seq;
+ map->hdr.log_file_tail_offset = 0;
+ }
+ }
+ map->hdr.log_file_head_offset = prev_offset;
+}
+
+static void mail_index_sync_replace_map(struct mail_index_sync_map_ctx *ctx,
+ struct mail_index_map *map)
+{
+ struct mail_index_view *view = ctx->view;
+
+ i_assert(view->map != map);
+
+ mail_index_sync_update_log_offset(ctx, view->map, FALSE);
+ mail_index_unmap(&view->map);
+ view->map = map;
+
+ if (ctx->type != MAIL_INDEX_SYNC_HANDLER_VIEW)
+ view->index->map = map;
+
+ mail_index_modseq_sync_map_replaced(ctx->modseq_ctx);
+}
+
+static struct mail_index_map *
+mail_index_sync_move_to_private_memory(struct mail_index_sync_map_ctx *ctx)
+{
+ struct mail_index_map *map = ctx->view->map;
+
+ if (map->refcount > 1) {
+ /* Multiple views point to this map. Make a copy of the map
+ (but not rec_map). */
+ map = mail_index_map_clone(map);
+ mail_index_sync_replace_map(ctx, map);
+ i_assert(ctx->view->map == map);
+ }
+
+ if (!MAIL_INDEX_MAP_IS_IN_MEMORY(ctx->view->map)) {
+ /* map points to mmap()ed area, copy it into memory. */
+ mail_index_map_move_to_memory(ctx->view->map);
+ mail_index_modseq_sync_map_replaced(ctx->modseq_ctx);
+ }
+ return map;
+}
+
+struct mail_index_map *
+mail_index_sync_get_atomic_map(struct mail_index_sync_map_ctx *ctx)
+{
+ /* First make sure we have a private map with rec_map pointing to
+ memory. */
+ (void)mail_index_sync_move_to_private_memory(ctx);
+ /* Next make sure the rec_map is also private to us. */
+ mail_index_record_map_move_to_private(ctx->view->map);
+ mail_index_modseq_sync_map_replaced(ctx->modseq_ctx);
+ return ctx->view->map;
+}
+
+static int
+mail_index_header_update_counts(struct mail_index_header *hdr,
+ uint8_t old_flags, uint8_t new_flags,
+ const char **error_r)
+{
+ if (((old_flags ^ new_flags) & MAIL_SEEN) != 0) {
+ /* different seen-flag */
+ if ((old_flags & MAIL_SEEN) != 0) {
+ if (hdr->seen_messages_count == 0) {
+ *error_r = "Seen counter wrong";
+ return -1;
+ }
+ hdr->seen_messages_count--;
+ } else {
+ if (hdr->seen_messages_count >= hdr->messages_count) {
+ *error_r = "Seen counter wrong";
+ return -1;
+ }
+
+ if (++hdr->seen_messages_count == hdr->messages_count)
+ hdr->first_unseen_uid_lowwater = hdr->next_uid;
+ }
+ }
+
+ if (((old_flags ^ new_flags) & MAIL_DELETED) != 0) {
+ /* different deleted-flag */
+ if ((old_flags & MAIL_DELETED) == 0) {
+ hdr->deleted_messages_count++;
+ if (hdr->deleted_messages_count > hdr->messages_count) {
+ *error_r = "Deleted counter wrong";
+ return -1;
+ }
+ } else {
+ if (hdr->deleted_messages_count == 0 ||
+ hdr->deleted_messages_count > hdr->messages_count) {
+ *error_r = "Deleted counter wrong";
+ return -1;
+ }
+
+ if (--hdr->deleted_messages_count == 0)
+ hdr->first_deleted_uid_lowwater = hdr->next_uid;
+ }
+ }
+ return 0;
+}
+
+static void
+mail_index_sync_header_update_counts_all(struct mail_index_sync_map_ctx *ctx,
+ uint32_t uid,
+ uint8_t old_flags, uint8_t new_flags)
+{
+ struct mail_index_map *const *maps;
+ const char *error;
+ unsigned int i, count;
+
+ maps = array_get(&ctx->view->map->rec_map->maps, &count);
+ for (i = 0; i < count; i++) {
+ if (uid >= maps[i]->hdr.next_uid)
+ continue;
+
+ if (mail_index_header_update_counts(&maps[i]->hdr,
+ old_flags, new_flags,
+ &error) < 0)
+ mail_index_sync_set_corrupted(ctx, "%s", error);
+ }
+}
+
+static void
+mail_index_sync_header_update_counts(struct mail_index_sync_map_ctx *ctx,
+ uint32_t uid, uint8_t old_flags,
+ uint8_t new_flags)
+{
+ const char *error;
+
+ if (uid >= ctx->view->map->hdr.next_uid) {
+ mail_index_sync_set_corrupted(ctx, "uid %u >= next_uid %u",
+ uid, ctx->view->map->hdr.next_uid);
+ } else {
+ if (mail_index_header_update_counts(&ctx->view->map->hdr,
+ old_flags, new_flags,
+ &error) < 0)
+ mail_index_sync_set_corrupted(ctx, "%s", error);
+ }
+}
+
+static void
+mail_index_header_update_lowwaters(struct mail_index_sync_map_ctx *ctx,
+ uint32_t uid, enum mail_flags flags)
+{
+ struct mail_index_map *const *maps;
+ unsigned int i, count;
+
+ maps = array_get(&ctx->view->map->rec_map->maps, &count);
+ for (i = 0; i < count; i++) {
+ if ((flags & MAIL_SEEN) == 0 &&
+ uid < maps[i]->hdr.first_unseen_uid_lowwater)
+ maps[i]->hdr.first_unseen_uid_lowwater = uid;
+ if ((flags & MAIL_DELETED) != 0 &&
+ uid < maps[i]->hdr.first_deleted_uid_lowwater)
+ maps[i]->hdr.first_deleted_uid_lowwater = uid;
+ }
+}
+
+static void
+sync_expunge_call_handlers(struct mail_index_sync_map_ctx *ctx,
+ uint32_t seq1, uint32_t seq2)
+{
+ const struct mail_index_expunge_handler *eh;
+ struct mail_index_record *rec;
+ uint32_t seq;
+
+ array_foreach(&ctx->expunge_handlers, eh) {
+ for (seq = seq1; seq <= seq2; seq++) {
+ rec = MAIL_INDEX_REC_AT_SEQ(ctx->view->map, seq);
+ eh->handler(ctx, PTR_OFFSET(rec, eh->record_offset),
+ eh->sync_context);
+ }
+ }
+}
+
+static bool
+sync_expunge_handlers_init(struct mail_index_sync_map_ctx *ctx)
+{
+ /* call expunge handlers only when syncing index file */
+ if (ctx->type != MAIL_INDEX_SYNC_HANDLER_FILE)
+ return FALSE;
+
+ if (!ctx->expunge_handlers_set)
+ mail_index_sync_init_expunge_handlers(ctx);
+
+ if (!array_is_created(&ctx->expunge_handlers))
+ return FALSE;
+ return TRUE;
+}
+
+static void
+sync_expunge_range(struct mail_index_sync_map_ctx *ctx, const ARRAY_TYPE(seq_range) *seqs)
+{
+ struct mail_index_map *map;
+ const struct seq_range *range;
+ unsigned int i, count;
+ uint32_t dest_seq1, prev_seq2, orig_rec_count;
+
+ range = array_get(seqs, &count);
+ if (count == 0)
+ return;
+
+ /* Get a private in-memory rec_map, which we can modify. */
+ map = mail_index_sync_get_atomic_map(ctx);
+
+ /* call the expunge handlers first */
+ if (sync_expunge_handlers_init(ctx)) {
+ for (i = 0; i < count; i++) {
+ sync_expunge_call_handlers(ctx,
+ range[i].seq1, range[i].seq2);
+ }
+ }
+
+ prev_seq2 = 0;
+ dest_seq1 = 1;
+ orig_rec_count = map->rec_map->records_count;
+ for (i = 0; i < count; i++) {
+ uint32_t seq1 = range[i].seq1;
+ uint32_t seq2 = range[i].seq2;
+ struct mail_index_record *rec;
+ uint32_t seq_count, seq;
+
+ i_assert(seq1 > prev_seq2);
+
+ for (seq = seq1; seq <= seq2; seq++) {
+ rec = MAIL_INDEX_REC_AT_SEQ(map, seq);
+ mail_index_sync_header_update_counts(ctx, rec->uid, rec->flags, 0);
+ }
+
+ if (prev_seq2+1 <= seq1-1) {
+ /* @UNSAFE: move (prev_seq2+1) .. (seq1-1) to its
+ final location in the map if necessary */
+ uint32_t move_count = (seq1-1) - (prev_seq2+1) + 1;
+ if (prev_seq2+1-1 != dest_seq1-1)
+ memmove(MAIL_INDEX_REC_AT_SEQ(map, dest_seq1),
+ MAIL_INDEX_REC_AT_SEQ(map, prev_seq2+1),
+ move_count * map->hdr.record_size);
+ dest_seq1 += move_count;
+ }
+ seq_count = seq2 - seq1 + 1;
+ map->rec_map->records_count -= seq_count;
+ map->hdr.messages_count -= seq_count;
+ mail_index_modseq_expunge(ctx->modseq_ctx, seq1, seq2);
+ prev_seq2 = seq2;
+ }
+ /* Final stragglers */
+ if (orig_rec_count > prev_seq2) {
+ uint32_t final_move_count = orig_rec_count - prev_seq2;
+ memmove(MAIL_INDEX_REC_AT_SEQ(map, dest_seq1),
+ MAIL_INDEX_REC_AT_SEQ(map, prev_seq2+1),
+ final_move_count * map->hdr.record_size);
+ }
+}
+
+static void *sync_append_record(struct mail_index_map *map)
+{
+ size_t append_pos;
+ void *ret;
+
+ append_pos = map->rec_map->records_count * map->hdr.record_size;
+ ret = buffer_get_space_unsafe(map->rec_map->buffer, append_pos,
+ map->hdr.record_size);
+ map->rec_map->records =
+ buffer_get_modifiable_data(map->rec_map->buffer, NULL);
+ return ret;
+}
+
+static bool sync_update_ignored_change(struct mail_index_sync_map_ctx *ctx)
+{
+ struct mail_index_transaction_commit_result *result =
+ ctx->view->index->sync_commit_result;
+ uint32_t prev_log_seq;
+ uoff_t prev_log_offset, trans_start_offset, trans_end_offset;
+
+ if (result == NULL)
+ return FALSE;
+
+ /* we'll return TRUE if this modseq change was written within the
+ transaction that was just committed */
+ mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
+ &prev_log_seq, &prev_log_offset);
+ if (prev_log_seq != result->log_file_seq)
+ return FALSE;
+
+ trans_end_offset = result->log_file_offset;
+ trans_start_offset = trans_end_offset - result->commit_size;
+ if (prev_log_offset < trans_start_offset ||
+ prev_log_offset >= trans_end_offset)
+ return FALSE;
+
+ return TRUE;
+}
+
+static int
+sync_modseq_update(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_modseq_update *u,
+ unsigned int size)
+{
+ struct mail_index_view *view = ctx->view;
+ const struct mail_transaction_modseq_update *end;
+ uint32_t seq;
+ uint64_t min_modseq;
+ int ret;
+
+ end = CONST_PTR_OFFSET(u, size);
+ for (; u < end; u++) {
+ if (u->uid == 0)
+ seq = 0;
+ else if (!mail_index_lookup_seq(view, u->uid, &seq))
+ continue;
+
+ min_modseq = ((uint64_t)u->modseq_high32 << 32) |
+ u->modseq_low32;
+
+ ret = seq == 0 ? 1 :
+ mail_index_modseq_set(view, seq, min_modseq);
+ if (ret < 0) {
+ mail_index_sync_set_corrupted(ctx,
+ "modseqs updated before they were enabled");
+ return -1;
+ }
+ if (ret == 0 && sync_update_ignored_change(ctx))
+ view->index->sync_commit_result->ignored_modseq_changes++;
+ }
+ return 1;
+}
+
+static int sync_append(const struct mail_index_record *rec,
+ struct mail_index_sync_map_ctx *ctx)
+{
+ struct mail_index_view *view = ctx->view;
+ struct mail_index_map *map = view->map;
+ const struct mail_index_record *old_rec;
+ enum mail_flags new_flags;
+ void *dest;
+
+ if (rec->uid < map->hdr.next_uid) {
+ mail_index_sync_set_corrupted(ctx,
+ "Append with UID %u, but next_uid = %u",
+ rec->uid, map->hdr.next_uid);
+ return -1;
+ }
+
+ /* We'll need to append a new record. If map currently points to
+ mmap()ed index, it first needs to be moved to memory since we can't
+ write past the mmap()ed memory area. */
+ map = mail_index_sync_move_to_private_memory(ctx);
+
+ if (rec->uid <= map->rec_map->last_appended_uid) {
+ i_assert(map->hdr.messages_count < map->rec_map->records_count);
+ /* the flags may have changed since it was added to map.
+ use the updated flags already, so flag counters won't get
+ broken. */
+ old_rec = MAIL_INDEX_MAP_IDX(map, map->hdr.messages_count);
+ i_assert(old_rec->uid == rec->uid);
+ new_flags = old_rec->flags;
+ } else {
+ /* don't rely on buffer->used being at the correct position.
+ at least expunges can move it */
+ dest = sync_append_record(map);
+ memcpy(dest, rec, sizeof(*rec));
+ memset(PTR_OFFSET(dest, sizeof(*rec)), 0,
+ map->hdr.record_size - sizeof(*rec));
+ map->rec_map->records_count++;
+ map->rec_map->last_appended_uid = rec->uid;
+ new_flags = rec->flags;
+
+ mail_index_modseq_append(ctx->modseq_ctx,
+ map->rec_map->records_count);
+ }
+
+ map->hdr.messages_count++;
+ map->hdr.next_uid = rec->uid+1;
+
+ if ((new_flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0 &&
+ (view->index->flags & MAIL_INDEX_OPEN_FLAG_NO_DIRTY) == 0)
+ map->hdr.flags |= MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
+
+ mail_index_header_update_lowwaters(ctx, rec->uid, new_flags);
+ mail_index_sync_header_update_counts(ctx, rec->uid, 0, new_flags);
+ return 1;
+}
+
+static int sync_flag_update(const struct mail_transaction_flag_update *u,
+ struct mail_index_sync_map_ctx *ctx)
+{
+ struct mail_index_view *view = ctx->view;
+ struct mail_index_record *rec;
+ uint8_t flag_mask, old_flags;
+ uint32_t seq, seq1, seq2;
+
+ if (!mail_index_lookup_seq_range(view, u->uid1, u->uid2, &seq1, &seq2))
+ return 1;
+
+ if (!MAIL_TRANSACTION_FLAG_UPDATE_IS_INTERNAL(u)) {
+ mail_index_modseq_update_flags(ctx->modseq_ctx,
+ u->add_flags | u->remove_flags,
+ seq1, seq2);
+ }
+
+ if ((u->add_flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0 &&
+ (view->index->flags & MAIL_INDEX_OPEN_FLAG_NO_DIRTY) == 0)
+ view->map->hdr.flags |= MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
+
+ flag_mask = (unsigned char)~u->remove_flags;
+
+ if (((u->add_flags | u->remove_flags) &
+ (MAIL_SEEN | MAIL_DELETED)) == 0) {
+ /* we're not modifying any counted/lowwatered flags */
+ for (seq = seq1; seq <= seq2; seq++) {
+ rec = MAIL_INDEX_REC_AT_SEQ(view->map, seq);
+ rec->flags = (rec->flags & flag_mask) | u->add_flags;
+ }
+ } else {
+ for (seq = seq1; seq <= seq2; seq++) {
+ rec = MAIL_INDEX_REC_AT_SEQ(view->map, seq);
+
+ old_flags = rec->flags;
+ rec->flags = (rec->flags & flag_mask) | u->add_flags;
+
+ mail_index_header_update_lowwaters(ctx, rec->uid,
+ rec->flags);
+ mail_index_sync_header_update_counts_all(ctx, rec->uid,
+ old_flags,
+ rec->flags);
+ }
+ }
+ return 1;
+}
+
+static int sync_header_update(const struct mail_transaction_header_update *u,
+ struct mail_index_sync_map_ctx *ctx)
+{
+#define MAIL_INDEX_HEADER_UPDATE_FIELD_IN_RANGE(u, field) \
+ ((u)->offset <= offsetof(struct mail_index_header, field) && \
+ (u)->offset + (u)->size > offsetof(struct mail_index_header, field))
+ struct mail_index_map *map = ctx->view->map;
+ uint32_t orig_log_file_tail_offset = map->hdr.log_file_tail_offset;
+ uint32_t orig_next_uid = map->hdr.next_uid;
+
+ if (u->offset >= map->hdr.base_header_size ||
+ u->offset + u->size > map->hdr.base_header_size) {
+ mail_index_sync_set_corrupted(ctx,
+ "Header update outside range: %u + %u > %u",
+ u->offset, u->size, map->hdr.base_header_size);
+ return -1;
+ }
+
+ buffer_write(map->hdr_copy_buf, u->offset, u + 1, u->size);
+ i_assert(map->hdr_copy_buf->used == map->hdr.header_size);
+
+ /* @UNSAFE */
+ if ((uint32_t)(u->offset + u->size) <= sizeof(map->hdr)) {
+ memcpy(PTR_OFFSET(&map->hdr, u->offset),
+ u + 1, u->size);
+ } else if (u->offset < sizeof(map->hdr)) {
+ memcpy(PTR_OFFSET(&map->hdr, u->offset),
+ u + 1, sizeof(map->hdr) - u->offset);
+ }
+
+ if (map->hdr.next_uid < orig_next_uid) {
+ /* next_uid update tried to shrink its value. this can happen
+ in some race conditions with e.g. with dsync, so just
+ silently ignore it. */
+ map->hdr.next_uid = orig_next_uid;
+ }
+
+ /* the tail offset updates are intended for internal transaction
+ log handling. we'll update the offset in the header only when
+ the sync is finished. */
+ map->hdr.log_file_tail_offset = orig_log_file_tail_offset;
+ return 1;
+}
+
+static int
+mail_index_sync_record_real(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_header *hdr,
+ const void *data)
+{
+ int ret = 0;
+
+ switch (hdr->type & MAIL_TRANSACTION_TYPE_MASK) {
+ case MAIL_TRANSACTION_APPEND: {
+ const struct mail_index_record *rec, *end;
+
+ end = CONST_PTR_OFFSET(data, hdr->size);
+ for (rec = data; rec < end; rec++) {
+ ret = sync_append(rec, ctx);
+ if (ret <= 0)
+ break;
+ }
+ break;
+ }
+ case MAIL_TRANSACTION_EXPUNGE:
+ case MAIL_TRANSACTION_EXPUNGE|MAIL_TRANSACTION_EXPUNGE_PROT: {
+ const struct mail_transaction_expunge *rec = data, *end;
+ ARRAY_TYPE(seq_range) seqs;
+ uint32_t seq1, seq2;
+
+ if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0) {
+ /* this is simply a request for expunge */
+ break;
+ }
+ t_array_init(&seqs, 64);
+ end = CONST_PTR_OFFSET(data, hdr->size);
+ for (; rec != end; rec++) {
+ if (mail_index_lookup_seq_range(ctx->view,
+ rec->uid1, rec->uid2, &seq1, &seq2))
+ seq_range_array_add_range(&seqs, seq1, seq2);
+ }
+ sync_expunge_range(ctx, &seqs);
+ break;
+ }
+ case MAIL_TRANSACTION_EXPUNGE_GUID:
+ case MAIL_TRANSACTION_EXPUNGE_GUID|MAIL_TRANSACTION_EXPUNGE_PROT: {
+ const struct mail_transaction_expunge_guid *rec = data, *end;
+ ARRAY_TYPE(seq_range) seqs;
+ uint32_t seq;
+
+ if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0) {
+ /* this is simply a request for expunge */
+ break;
+ }
+ t_array_init(&seqs, 64);
+ end = CONST_PTR_OFFSET(data, hdr->size);
+ for (; rec != end; rec++) {
+ i_assert(rec->uid != 0);
+
+ if (mail_index_lookup_seq(ctx->view, rec->uid, &seq))
+ seq_range_array_add(&seqs, seq);
+ }
+
+ sync_expunge_range(ctx, &seqs);
+ break;
+ }
+ case MAIL_TRANSACTION_FLAG_UPDATE: {
+ const struct mail_transaction_flag_update *rec, *end;
+
+ end = CONST_PTR_OFFSET(data, hdr->size);
+ for (rec = data; rec < end; rec++) {
+ ret = sync_flag_update(rec, ctx);
+ if (ret <= 0)
+ break;
+ }
+ break;
+ }
+ case MAIL_TRANSACTION_HEADER_UPDATE: {
+ const struct mail_transaction_header_update *rec;
+ unsigned int i;
+
+ for (i = 0; i < hdr->size; ) {
+ rec = CONST_PTR_OFFSET(data, i);
+ ret = sync_header_update(rec, ctx);
+ if (ret <= 0)
+ break;
+
+ i += sizeof(*rec) + rec->size;
+ if ((i % 4) != 0)
+ i += 4 - (i % 4);
+ }
+ break;
+ }
+ case MAIL_TRANSACTION_EXT_INTRO: {
+ const struct mail_transaction_ext_intro *rec = data;
+ unsigned int i;
+ uint32_t prev_seq;
+ uoff_t prev_offset;
+
+ mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
+ &prev_seq, &prev_offset);
+ ctx->ext_intro_seq = prev_seq;
+ ctx->ext_intro_offset = prev_offset;
+ ctx->ext_intro_end_offset =
+ prev_offset + hdr->size + sizeof(*hdr);
+
+ for (i = 0; i < hdr->size; ) {
+ if (i + sizeof(*rec) > hdr->size) {
+ /* should be just extra padding */
+ break;
+ }
+
+ rec = CONST_PTR_OFFSET(data, i);
+ /* name_size checked by _log_view_next() */
+ i_assert(i + sizeof(*rec) + rec->name_size <= hdr->size);
+
+ ret = mail_index_sync_ext_intro(ctx, rec);
+ if (ret <= 0)
+ break;
+
+ i += sizeof(*rec) + rec->name_size;
+ if ((i % 4) != 0)
+ i += 4 - (i % 4);
+ }
+ break;
+ }
+ case MAIL_TRANSACTION_EXT_RESET: {
+ struct mail_transaction_ext_reset rec;
+
+ /* old versions have only new_reset_id */
+ if (hdr->size < sizeof(uint32_t)) {
+ mail_index_sync_set_corrupted(ctx,
+ "ext reset: invalid record size");
+ ret = -1;
+ break;
+ }
+ i_zero(&rec);
+ memcpy(&rec, data, I_MIN(hdr->size, sizeof(rec)));
+ ret = mail_index_sync_ext_reset(ctx, &rec);
+ break;
+ }
+ case MAIL_TRANSACTION_EXT_HDR_UPDATE: {
+ const struct mail_transaction_ext_hdr_update *rec;
+ unsigned int i;
+
+ for (i = 0; i < hdr->size; ) {
+ rec = CONST_PTR_OFFSET(data, i);
+
+ if (i + sizeof(*rec) > hdr->size ||
+ i + sizeof(*rec) + rec->size > hdr->size) {
+ mail_index_sync_set_corrupted(ctx,
+ "ext hdr update: invalid record size");
+ ret = -1;
+ break;
+ }
+
+ ret = mail_index_sync_ext_hdr_update(ctx, rec->offset,
+ rec->size, rec + 1);
+ if (ret <= 0)
+ break;
+
+ i += sizeof(*rec) + rec->size;
+ if ((i % 4) != 0)
+ i += 4 - (i % 4);
+ }
+ break;
+ }
+ case MAIL_TRANSACTION_EXT_HDR_UPDATE32: {
+ const struct mail_transaction_ext_hdr_update32 *rec;
+ unsigned int i;
+
+ for (i = 0; i < hdr->size; ) {
+ rec = CONST_PTR_OFFSET(data, i);
+
+ if (i + sizeof(*rec) > hdr->size ||
+ i + sizeof(*rec) + rec->size > hdr->size) {
+ mail_index_sync_set_corrupted(ctx,
+ "ext hdr update: invalid record size");
+ ret = -1;
+ break;
+ }
+
+ ret = mail_index_sync_ext_hdr_update(ctx, rec->offset,
+ rec->size, rec + 1);
+ if (ret <= 0)
+ break;
+
+ i += sizeof(*rec) + rec->size;
+ if ((i % 4) != 0)
+ i += 4 - (i % 4);
+ }
+ break;
+ }
+ case MAIL_TRANSACTION_EXT_REC_UPDATE: {
+ const struct mail_transaction_ext_rec_update *rec;
+ unsigned int i, record_size;
+
+ if (ctx->cur_ext_map_idx == (uint32_t)-1) {
+ mail_index_sync_set_corrupted(ctx,
+ "Extension record updated "
+ "without intro prefix");
+ ret = -1;
+ break;
+ }
+
+ if (ctx->cur_ext_ignore) {
+ ret = 1;
+ break;
+ }
+
+ /* the record is padded to 32bits in the transaction log */
+ record_size = (sizeof(*rec) + ctx->cur_ext_record_size + 3) & ~3U;
+
+ for (i = 0; i < hdr->size; i += record_size) {
+ rec = CONST_PTR_OFFSET(data, i);
+
+ if (i + record_size > hdr->size) {
+ mail_index_sync_set_corrupted(ctx,
+ "ext rec update: invalid record size");
+ ret = -1;
+ break;
+ }
+
+ ret = mail_index_sync_ext_rec_update(ctx, rec);
+ if (ret <= 0)
+ break;
+ }
+ break;
+ }
+ case MAIL_TRANSACTION_EXT_ATOMIC_INC: {
+ const struct mail_transaction_ext_atomic_inc *rec, *end;
+
+ if (ctx->cur_ext_map_idx == (uint32_t)-1) {
+ mail_index_sync_set_corrupted(ctx,
+ "Extension record updated "
+ "without intro prefix");
+ ret = -1;
+ break;
+ }
+
+ if (ctx->cur_ext_ignore) {
+ ret = 1;
+ break;
+ }
+
+ end = CONST_PTR_OFFSET(data, hdr->size);
+ for (rec = data; rec < end; rec++) {
+ ret = mail_index_sync_ext_atomic_inc(ctx, rec);
+ if (ret <= 0)
+ break;
+ }
+ break;
+ }
+ case MAIL_TRANSACTION_KEYWORD_UPDATE: {
+ const struct mail_transaction_keyword_update *rec = data;
+
+ ret = mail_index_sync_keywords(ctx, hdr, rec);
+ break;
+ }
+ case MAIL_TRANSACTION_KEYWORD_RESET: {
+ const struct mail_transaction_keyword_reset *rec = data;
+
+ ret = mail_index_sync_keywords_reset(ctx, hdr, rec);
+ break;
+ }
+ case MAIL_TRANSACTION_MODSEQ_UPDATE: {
+ const struct mail_transaction_modseq_update *rec = data;
+
+ ret = sync_modseq_update(ctx, rec, hdr->size);
+ break;
+ }
+ case MAIL_TRANSACTION_INDEX_DELETED:
+ if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0) {
+ /* next sync finishes the deletion */
+ ctx->view->index->index_delete_requested = TRUE;
+ } else {
+ /* transaction log reading handles this */
+ }
+ break;
+ case MAIL_TRANSACTION_INDEX_UNDELETED:
+ ctx->view->index->index_delete_requested = FALSE;
+ break;
+ case MAIL_TRANSACTION_BOUNDARY:
+ break;
+ case MAIL_TRANSACTION_ATTRIBUTE_UPDATE:
+ break;
+ default:
+ mail_index_sync_set_corrupted(ctx,
+ "Unknown transaction record type 0x%x",
+ (hdr->type & MAIL_TRANSACTION_TYPE_MASK));
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+int mail_index_sync_record(struct mail_index_sync_map_ctx *ctx,
+ const struct mail_transaction_header *hdr,
+ const void *data)
+{
+ int ret;
+
+ T_BEGIN {
+ ret = mail_index_sync_record_real(ctx, hdr, data);
+ } T_END;
+ return ret;
+}
+
+void mail_index_sync_map_init(struct mail_index_sync_map_ctx *sync_map_ctx,
+ struct mail_index_view *view,
+ enum mail_index_sync_handler_type type)
+{
+ i_zero(sync_map_ctx);
+ sync_map_ctx->view = view;
+ sync_map_ctx->cur_ext_map_idx = (uint32_t)-1;
+ sync_map_ctx->type = type;
+ sync_map_ctx->modseq_ctx = mail_index_modseq_sync_begin(sync_map_ctx);
+
+ mail_index_sync_init_handlers(sync_map_ctx);
+}
+
+void mail_index_sync_map_deinit(struct mail_index_sync_map_ctx *sync_map_ctx)
+{
+ i_assert(sync_map_ctx->modseq_ctx == NULL);
+
+ buffer_free(&sync_map_ctx->unknown_extensions);
+ if (sync_map_ctx->expunge_handlers_used)
+ mail_index_sync_deinit_expunge_handlers(sync_map_ctx);
+ mail_index_sync_deinit_handlers(sync_map_ctx);
+}
+
+static void mail_index_sync_update_hdr_dirty_flag(struct mail_index_map *map)
+{
+ const struct mail_index_record *rec;
+ uint32_t seq;
+
+ if ((map->hdr.flags & MAIL_INDEX_HDR_FLAG_HAVE_DIRTY) != 0 ||
+ (map->index->flags & MAIL_INDEX_OPEN_FLAG_NO_DIRTY) != 0)
+ return;
+
+ /* do we have dirty flags anymore? */
+ for (seq = 1; seq <= map->rec_map->records_count; seq++) {
+ rec = MAIL_INDEX_REC_AT_SEQ(map, seq);
+ if ((rec->flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0) {
+ map->hdr.flags |= MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
+ break;
+ }
+ }
+}
+
+#ifdef DEBUG
+void mail_index_map_check(struct mail_index_map *map)
+{
+ const struct mail_index_header *hdr = &map->hdr;
+ unsigned int del = 0, seen = 0;
+ uint32_t seq, prev_uid = 0;
+
+ if (getenv("DEBUG_IGNORE_INDEX_CORRUPTION") != NULL)
+ return;
+
+ i_assert(hdr->messages_count <= map->rec_map->records_count);
+ for (seq = 1; seq <= hdr->messages_count; seq++) {
+ const struct mail_index_record *rec;
+
+ rec = MAIL_INDEX_REC_AT_SEQ(map, seq);
+ i_assert(rec->uid > prev_uid);
+ prev_uid = rec->uid;
+
+ if ((rec->flags & MAIL_DELETED) != 0) {
+ i_assert(rec->uid >= hdr->first_deleted_uid_lowwater);
+ del++;
+ }
+ if ((rec->flags & MAIL_SEEN) != 0)
+ seen++;
+ else
+ i_assert(rec->uid >= hdr->first_unseen_uid_lowwater);
+ }
+ i_assert(del == hdr->deleted_messages_count);
+ i_assert(seen == hdr->seen_messages_count);
+}
+#endif
+
+bool mail_index_sync_map_want_index_reopen(struct mail_index_map *map,
+ enum mail_index_sync_handler_type type)
+{
+ struct mail_index *index = map->index;
+
+ if (index->log->head == NULL)
+ return TRUE;
+
+ uoff_t start_offset = type == MAIL_INDEX_SYNC_HANDLER_FILE ?
+ map->hdr.log_file_tail_offset : map->hdr.log_file_head_offset;
+ /* don't check this if mmap is disabled, because reopening
+ index causes sync to get lost. */
+ if ((index->flags & MAIL_INDEX_OPEN_FLAG_MMAP_DISABLE) == 0) {
+ uoff_t log_size, index_size;
+
+ if (index->fd == -1 &&
+ index->log->head->hdr.prev_file_seq != 0) {
+ /* we don't know the index's size, so use the
+ smallest index size we're willing to read */
+ index_size = MAIL_INDEX_SYNC_MIN_READ_INDEX_SIZE;
+ } else {
+ index_size = map->hdr.header_size +
+ map->rec_map->records_count *
+ map->hdr.record_size;
+ }
+
+ /* this isn't necessary correct currently, but it should be
+ close enough */
+ log_size = index->log->head->last_size;
+ if (log_size > start_offset &&
+ log_size - start_offset > index_size)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+int mail_index_sync_map(struct mail_index_map **_map,
+ enum mail_index_sync_handler_type type,
+ const char **reason_r)
+{
+ struct mail_index_map *map = *_map;
+ struct mail_index *index = map->index;
+ struct mail_index_view *view;
+ struct mail_index_sync_map_ctx sync_map_ctx;
+ const struct mail_transaction_header *thdr;
+ const void *tdata;
+ uint32_t prev_seq;
+ uoff_t start_offset, prev_offset;
+ const char *reason, *error;
+ int ret;
+ bool had_dirty, reset;
+
+ i_assert(index->log->head != NULL);
+ i_assert(index->map == map || type == MAIL_INDEX_SYNC_HANDLER_VIEW);
+
+ start_offset = type == MAIL_INDEX_SYNC_HANDLER_FILE ?
+ map->hdr.log_file_tail_offset : map->hdr.log_file_head_offset;
+
+ view = mail_index_view_open_with_map(index, map);
+ ret = mail_transaction_log_view_set(view->log_view,
+ map->hdr.log_file_seq, start_offset,
+ (uint32_t)-1, UOFF_T_MAX,
+ &reset, &reason);
+ if (ret <= 0) {
+ mail_index_view_close(&view);
+ if (ret < 0) {
+ /* I/O failure */
+ return -1;
+ }
+ /* the seq/offset is probably broken */
+ *reason_r = t_strdup_printf(
+ "Lost log for seq=%u offset=%"PRIuUOFF_T": %s "
+ "(initial_mapped=%d)",
+ map->hdr.log_file_seq, start_offset, reason,
+ index->initial_mapped ? 1 : 0);
+ return 0;
+ }
+
+ mail_transaction_log_get_head(index->log, &prev_seq, &prev_offset);
+ if (prev_seq != map->hdr.log_file_seq ||
+ prev_offset - map->hdr.log_file_tail_offset >
+ index->optimization_set.index.rewrite_min_log_bytes) {
+ /* we're reading more from log than we would have preferred.
+ remember that we probably want to rewrite index soon. */
+ index->index_min_write = TRUE;
+ }
+
+ /* view referenced the map. avoid unnecessary map cloning by
+ unreferencing the map while view exists. */
+ map->refcount--;
+
+ had_dirty = (map->hdr.flags & MAIL_INDEX_HDR_FLAG_HAVE_DIRTY) != 0;
+ if (had_dirty)
+ map->hdr.flags &= ENUM_NEGATE(MAIL_INDEX_HDR_FLAG_HAVE_DIRTY);
+
+ mail_transaction_log_view_get_prev_pos(view->log_view,
+ &prev_seq, &prev_offset);
+
+ mail_index_sync_map_init(&sync_map_ctx, view, type);
+ if (reset) {
+ /* Reset the entire index. Leave only indexid and
+ log_file_seq. */
+ mail_transaction_log_view_get_prev_pos(view->log_view,
+ &prev_seq, &prev_offset);
+ map = mail_index_map_alloc(index);
+ if ((index->map->hdr.flags & MAIL_INDEX_HDR_FLAG_FSCKD) != 0)
+ map->hdr.flags |= MAIL_INDEX_HDR_FLAG_FSCKD;
+ map->hdr.log_file_seq = prev_seq;
+ map->hdr.log_file_tail_offset = 0;
+ mail_index_sync_replace_map(&sync_map_ctx, map);
+ }
+ map = NULL;
+
+ /* FIXME: when transaction sync lock is removed, we'll need to handle
+ the case when a transaction is committed while mailbox is being
+ synced ([synced transactions][new transaction][ext transaction]).
+ this means int_offset contains [synced] and ext_offset contains
+ all */
+ while ((ret = mail_transaction_log_view_next(view->log_view, &thdr,
+ &tdata)) > 0) {
+ mail_transaction_log_view_get_prev_pos(view->log_view,
+ &prev_seq, &prev_offset);
+
+ if (LOG_IS_BEFORE(prev_seq, prev_offset,
+ view->map->hdr.log_file_seq,
+ view->map->hdr.log_file_head_offset)) {
+ /* this has been synced already. */
+ i_assert(type == MAIL_INDEX_SYNC_HANDLER_FILE);
+ continue;
+ }
+
+ /* we'll just skip over broken entries */
+ (void)mail_index_sync_record(&sync_map_ctx, thdr, tdata);
+ }
+ map = view->map;
+
+ if (had_dirty)
+ mail_index_sync_update_hdr_dirty_flag(map);
+ mail_index_modseq_sync_end(&sync_map_ctx.modseq_ctx);
+
+ mail_index_sync_update_log_offset(&sync_map_ctx, view->map, TRUE);
+
+#ifdef DEBUG
+ mail_index_map_check(map);
+#endif
+ i_assert(map->hdr.indexid == index->indexid || map->hdr.indexid == 0);
+
+ /* transaction log tracks internally the current tail offset.
+ besides using header updates, it also updates the offset to skip
+ over following external transactions to avoid extra unneeded log
+ reading. */
+ i_assert(map->hdr.log_file_seq == index->log->head->hdr.file_seq);
+ if (map->hdr.log_file_tail_offset < index->log->head->max_tail_offset) {
+ map->hdr.log_file_tail_offset =
+ index->log->head->max_tail_offset;
+ }
+
+ buffer_write(map->hdr_copy_buf, 0, &map->hdr, sizeof(map->hdr));
+ if (!MAIL_INDEX_MAP_IS_IN_MEMORY(map)) {
+ memcpy(map->rec_map->mmap_base, map->hdr_copy_buf->data,
+ map->hdr_copy_buf->used);
+ }
+
+ /* restore refcount before closing the view. this is necessary also
+ if map got cloned, because view closing would otherwise destroy it */
+ map->refcount++;
+ mail_index_sync_map_deinit(&sync_map_ctx);
+ mail_index_view_close(&view);
+
+ i_assert(index->map == map || type == MAIL_INDEX_SYNC_HANDLER_VIEW);
+
+ if (mail_index_map_check_header(map, &error) <= 0) {
+ mail_index_set_error(index,
+ "Synchronization corrupted index header %s: %s",
+ index->filepath, error);
+ (void)mail_index_fsck(index);
+ map = index->map;
+ } else if (sync_map_ctx.errors) {
+ /* make sure the index looks valid now */
+ (void)mail_index_fsck(index);
+ map = index->map;
+ }
+
+ *_map = map;
+ return ret < 0 ? -1 : 1;
+}
diff --git a/src/lib-index/mail-index-sync.c b/src/lib-index/mail-index-sync.c
new file mode 100644
index 0000000..6322ee1
--- /dev/null
+++ b/src/lib-index/mail-index-sync.c
@@ -0,0 +1,1062 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "mail-index-view-private.h"
+#include "mail-index-sync-private.h"
+#include "mail-index-transaction-private.h"
+#include "mail-transaction-log-private.h"
+#include "mail-cache-private.h"
+
+#include <stdio.h>
+
+struct mail_index_sync_ctx {
+ struct mail_index *index;
+ struct mail_index_view *view;
+ struct mail_index_transaction *sync_trans, *ext_trans;
+ struct mail_index_transaction_commit_result *sync_commit_result;
+ enum mail_index_sync_flags flags;
+ char *reason;
+
+ const struct mail_transaction_header *hdr;
+ const void *data;
+
+ ARRAY(struct mail_index_sync_list) sync_list;
+ uint32_t next_uid;
+
+ bool no_warning:1;
+ bool seen_external_expunges:1;
+ bool seen_nonexternal_transactions:1;
+ bool fully_synced:1;
+};
+
+static void mail_index_sync_add_expunge(struct mail_index_sync_ctx *ctx)
+{
+ const struct mail_transaction_expunge *e = ctx->data;
+ size_t i, size = ctx->hdr->size / sizeof(*e);
+ uint32_t uid;
+
+ for (i = 0; i < size; i++) {
+ for (uid = e[i].uid1; uid <= e[i].uid2; uid++)
+ mail_index_expunge(ctx->sync_trans, uid);
+ }
+}
+
+static void mail_index_sync_add_expunge_guid(struct mail_index_sync_ctx *ctx)
+{
+ const struct mail_transaction_expunge_guid *e = ctx->data;
+ size_t i, size = ctx->hdr->size / sizeof(*e);
+
+ for (i = 0; i < size; i++) {
+ mail_index_expunge_guid(ctx->sync_trans, e[i].uid,
+ e[i].guid_128);
+ }
+}
+
+static void mail_index_sync_add_flag_update(struct mail_index_sync_ctx *ctx)
+{
+ const struct mail_transaction_flag_update *u = ctx->data;
+ size_t i, size = ctx->hdr->size / sizeof(*u);
+
+ for (i = 0; i < size; i++) {
+ if (u[i].add_flags != 0) {
+ mail_index_update_flags_range(ctx->sync_trans,
+ u[i].uid1, u[i].uid2,
+ MODIFY_ADD,
+ u[i].add_flags);
+ }
+ if (u[i].remove_flags != 0) {
+ mail_index_update_flags_range(ctx->sync_trans,
+ u[i].uid1, u[i].uid2,
+ MODIFY_REMOVE,
+ u[i].remove_flags);
+ }
+ }
+}
+
+static void mail_index_sync_add_keyword_update(struct mail_index_sync_ctx *ctx)
+{
+ const struct mail_transaction_keyword_update *u = ctx->data;
+ const char *keyword_names[2];
+ struct mail_keywords *keywords;
+ const uint32_t *uids;
+ uint32_t uid;
+ size_t uidset_offset, i, size;
+
+ i_assert(u->name_size > 0);
+
+ uidset_offset = sizeof(*u) + u->name_size;
+ if ((uidset_offset % 4) != 0)
+ uidset_offset += 4 - (uidset_offset % 4);
+ uids = CONST_PTR_OFFSET(u, uidset_offset);
+
+ keyword_names[0] = t_strndup(u + 1, u->name_size);
+ keyword_names[1] = NULL;
+ keywords = mail_index_keywords_create(ctx->index, keyword_names);
+
+ size = (ctx->hdr->size - uidset_offset) / sizeof(uint32_t);
+ for (i = 0; i < size; i += 2) {
+ /* FIXME: mail_index_update_keywords_range() */
+ for (uid = uids[i]; uid <= uids[i+1]; uid++) {
+ mail_index_update_keywords(ctx->sync_trans, uid,
+ u->modify_type, keywords);
+ }
+ }
+
+ mail_index_keywords_unref(&keywords);
+}
+
+static void mail_index_sync_add_keyword_reset(struct mail_index_sync_ctx *ctx)
+{
+ const struct mail_transaction_keyword_reset *u = ctx->data;
+ size_t i, size = ctx->hdr->size / sizeof(*u);
+ struct mail_keywords *keywords;
+ uint32_t uid;
+
+ keywords = mail_index_keywords_create(ctx->index, NULL);
+ for (i = 0; i < size; i++) {
+ for (uid = u[i].uid1; uid <= u[i].uid2; uid++) {
+ mail_index_update_keywords(ctx->sync_trans, uid,
+ MODIFY_REPLACE, keywords);
+ }
+ }
+ mail_index_keywords_unref(&keywords);
+}
+
+static bool mail_index_sync_add_transaction(struct mail_index_sync_ctx *ctx)
+{
+ switch (ctx->hdr->type & MAIL_TRANSACTION_TYPE_MASK) {
+ case MAIL_TRANSACTION_EXPUNGE:
+ mail_index_sync_add_expunge(ctx);
+ break;
+ case MAIL_TRANSACTION_EXPUNGE_GUID:
+ mail_index_sync_add_expunge_guid(ctx);
+ break;
+ case MAIL_TRANSACTION_FLAG_UPDATE:
+ mail_index_sync_add_flag_update(ctx);
+ break;
+ case MAIL_TRANSACTION_KEYWORD_UPDATE:
+ mail_index_sync_add_keyword_update(ctx);
+ break;
+ case MAIL_TRANSACTION_KEYWORD_RESET:
+ mail_index_sync_add_keyword_reset(ctx);
+ break;
+ default:
+ return FALSE;
+ }
+ return TRUE;
+}
+
+static void mail_index_sync_add_dirty_updates(struct mail_index_sync_ctx *ctx)
+{
+ struct mail_transaction_flag_update update;
+ const struct mail_index_record *rec;
+ uint32_t seq, messages_count;
+
+ i_zero(&update);
+
+ messages_count = mail_index_view_get_messages_count(ctx->view);
+ for (seq = 1; seq <= messages_count; seq++) {
+ rec = mail_index_lookup(ctx->view, seq);
+ if ((rec->flags & MAIL_INDEX_MAIL_FLAG_DIRTY) == 0)
+ continue;
+
+ mail_index_update_flags(ctx->sync_trans, rec->uid,
+ MODIFY_REPLACE, rec->flags);
+ }
+}
+
+static int
+mail_index_sync_read_and_sort(struct mail_index_sync_ctx *ctx)
+{
+ struct mail_index_transaction *sync_trans = ctx->sync_trans;
+ struct mail_index_sync_list *synclist;
+ const struct mail_index_transaction_keyword_update *keyword_updates;
+ unsigned int i, keyword_count;
+ int ret;
+
+ if ((ctx->view->map->hdr.flags & MAIL_INDEX_HDR_FLAG_HAVE_DIRTY) != 0 &&
+ (ctx->flags & MAIL_INDEX_SYNC_FLAG_FLUSH_DIRTY) != 0 &&
+ (ctx->view->index->flags & MAIL_INDEX_OPEN_FLAG_NO_DIRTY) == 0) {
+ /* show dirty flags as flag updates */
+ mail_index_sync_add_dirty_updates(ctx);
+ }
+
+ /* read all transactions from log into a transaction in memory.
+ skip the external ones, they're already synced to mailbox and
+ included in our view */
+ while ((ret = mail_transaction_log_view_next(ctx->view->log_view,
+ &ctx->hdr,
+ &ctx->data)) > 0) {
+ if ((ctx->hdr->type & MAIL_TRANSACTION_EXTERNAL) != 0) {
+ if ((ctx->hdr->type & (MAIL_TRANSACTION_EXPUNGE |
+ MAIL_TRANSACTION_EXPUNGE_GUID)) != 0)
+ ctx->seen_external_expunges = TRUE;
+ continue;
+ }
+
+ T_BEGIN {
+ if (mail_index_sync_add_transaction(ctx)) {
+ /* update tail_offset if needed */
+ ctx->seen_nonexternal_transactions = TRUE;
+ } else {
+ /* this is an internal change. we don't
+ necessarily need to update tail_offset, so
+ avoid the extra write caused by it. */
+ }
+ } T_END;
+ }
+
+ /* create an array containing all expunge, flag and keyword update
+ arrays so we can easily go through all of the changes. */
+ keyword_count = !array_is_created(&sync_trans->keyword_updates) ? 0 :
+ array_count(&sync_trans->keyword_updates);
+ i_array_init(&ctx->sync_list, keyword_count + 2);
+
+ if (array_is_created(&sync_trans->expunges)) {
+ mail_index_transaction_sort_expunges(sync_trans);
+ synclist = array_append_space(&ctx->sync_list);
+ synclist->array = (void *)&sync_trans->expunges;
+ }
+
+ if (array_is_created(&sync_trans->updates)) {
+ synclist = array_append_space(&ctx->sync_list);
+ synclist->array = (void *)&sync_trans->updates;
+ }
+
+ keyword_updates = keyword_count == 0 ? NULL :
+ array_front(&sync_trans->keyword_updates);
+ for (i = 0; i < keyword_count; i++) {
+ if (array_is_created(&keyword_updates[i].add_seq)) {
+ synclist = array_append_space(&ctx->sync_list);
+ synclist->array =
+ (const void *)&keyword_updates[i].add_seq;
+ synclist->keyword_idx = i;
+ }
+ if (array_is_created(&keyword_updates[i].remove_seq)) {
+ synclist = array_append_space(&ctx->sync_list);
+ synclist->array =
+ (const void *)&keyword_updates[i].remove_seq;
+ synclist->keyword_idx = i;
+ synclist->keyword_remove = TRUE;
+ }
+ }
+
+ return ret;
+}
+
+static bool
+mail_index_need_sync(struct mail_index *index, enum mail_index_sync_flags flags,
+ uint32_t log_file_seq, uoff_t log_file_offset)
+{
+ const struct mail_index_header *hdr = &index->map->hdr;
+ if ((flags & MAIL_INDEX_SYNC_FLAG_REQUIRE_CHANGES) == 0)
+ return TRUE;
+
+ /* sync only if there's something to do */
+ if (hdr->first_recent_uid < hdr->next_uid &&
+ (flags & MAIL_INDEX_SYNC_FLAG_DROP_RECENT) != 0)
+ return TRUE;
+
+ if ((hdr->flags & MAIL_INDEX_HDR_FLAG_HAVE_DIRTY) != 0 &&
+ (flags & MAIL_INDEX_SYNC_FLAG_FLUSH_DIRTY) != 0 &&
+ (index->flags & MAIL_INDEX_OPEN_FLAG_NO_DIRTY) == 0)
+ return TRUE;
+
+ if (log_file_seq == (uint32_t)-1) {
+ /* we want to sync up to transaction log's head */
+ mail_transaction_log_get_head(index->log,
+ &log_file_seq, &log_file_offset);
+ }
+ if ((hdr->log_file_tail_offset < log_file_offset &&
+ hdr->log_file_seq == log_file_seq) ||
+ hdr->log_file_seq < log_file_seq)
+ return TRUE;
+
+ if (index->need_recreate != NULL)
+ return TRUE;
+
+ /* already synced */
+ const char *reason;
+ return mail_cache_need_purge(index->cache, &reason);
+}
+
+static int
+mail_index_sync_set_log_view(struct mail_index_view *view,
+ uint32_t start_file_seq, uoff_t start_file_offset)
+{
+ uint32_t log_seq;
+ uoff_t log_offset;
+ const char *reason;
+ bool reset;
+ int ret;
+
+ mail_transaction_log_get_head(view->index->log, &log_seq, &log_offset);
+
+ ret = mail_transaction_log_view_set(view->log_view,
+ start_file_seq, start_file_offset,
+ log_seq, log_offset, &reset, &reason);
+ if (ret < 0)
+ return -1;
+ if (ret == 0) {
+ /* either corrupted or the file was deleted for
+ some reason. either way, we can't go forward */
+ mail_index_set_error(view->index,
+ "Unexpected transaction log desync with index %s: %s",
+ view->index->filepath, reason);
+ return 0;
+ }
+ return 1;
+}
+
+int mail_index_sync_begin(struct mail_index *index,
+ struct mail_index_sync_ctx **ctx_r,
+ struct mail_index_view **view_r,
+ struct mail_index_transaction **trans_r,
+ enum mail_index_sync_flags flags)
+{
+ int ret;
+
+ ret = mail_index_sync_begin_to(index, ctx_r, view_r, trans_r,
+ (uint32_t)-1, UOFF_T_MAX, flags);
+ i_assert(ret != 0 ||
+ (flags & MAIL_INDEX_SYNC_FLAG_REQUIRE_CHANGES) != 0);
+ return ret;
+}
+
+static int
+mail_index_sync_begin_init(struct mail_index *index,
+ enum mail_index_sync_flags flags,
+ uint32_t log_file_seq, uoff_t log_file_offset)
+{
+ const struct mail_index_header *hdr;
+ uint32_t seq;
+ uoff_t offset;
+ bool locked = FALSE;
+ int ret;
+
+ /* if we require changes, don't lock transaction log yet. first check
+ if there's anything to sync. */
+ if ((flags & MAIL_INDEX_SYNC_FLAG_REQUIRE_CHANGES) == 0) {
+ if (mail_transaction_log_sync_lock(index->log, "syncing",
+ &seq, &offset) < 0)
+ return -1;
+ locked = TRUE;
+ }
+
+ /* The view must contain what we expect the mailbox to look like
+ currently. That allows the backend to update external flag
+ changes (etc.) if the view doesn't match the mailbox.
+
+ We'll update the view to contain everything that exist in the
+ transaction log except for expunges. They're synced in
+ mail_index_sync_commit(). */
+ if ((ret = mail_index_map(index, MAIL_INDEX_SYNC_HANDLER_HEAD)) <= 0) {
+ if (ret == 0) {
+ if (locked)
+ mail_transaction_log_sync_unlock(index->log, "sync init failure");
+ return -1;
+ }
+
+ /* let's try again */
+ if (mail_index_map(index, MAIL_INDEX_SYNC_HANDLER_HEAD) <= 0) {
+ if (locked)
+ mail_transaction_log_sync_unlock(index->log, "sync init failure");
+ return -1;
+ }
+ }
+
+ if (!mail_index_need_sync(index, flags, log_file_seq, log_file_offset) &&
+ !index->index_deleted && index->need_recreate == NULL) {
+ if (locked)
+ mail_transaction_log_sync_unlock(index->log, "syncing determined unnecessary");
+ return 0;
+ }
+
+ if (!locked) {
+ /* it looks like we have something to sync. lock the file and
+ check again. */
+ flags &= ENUM_NEGATE(MAIL_INDEX_SYNC_FLAG_REQUIRE_CHANGES);
+ return mail_index_sync_begin_init(index, flags, log_file_seq,
+ log_file_offset);
+ }
+
+ if (index->index_deleted &&
+ (flags & MAIL_INDEX_SYNC_FLAG_DELETING_INDEX) == 0) {
+ /* index is already deleted. we can't sync. */
+ if (locked)
+ mail_transaction_log_sync_unlock(index->log, "syncing detected deleted index");
+ return -1;
+ }
+
+ hdr = &index->map->hdr;
+ if (hdr->log_file_tail_offset > hdr->log_file_head_offset ||
+ hdr->log_file_seq > seq ||
+ (hdr->log_file_seq == seq && hdr->log_file_tail_offset > offset)) {
+ /* broken sync positions. fix them. */
+ mail_index_set_error(index,
+ "broken sync positions in index file %s",
+ index->filepath);
+ mail_index_fsck_locked(index);
+ }
+ return 1;
+}
+
+static int
+mail_index_sync_begin_to2(struct mail_index *index,
+ struct mail_index_sync_ctx **ctx_r,
+ struct mail_index_view **view_r,
+ struct mail_index_transaction **trans_r,
+ uint32_t log_file_seq, uoff_t log_file_offset,
+ enum mail_index_sync_flags flags, bool *retry_r)
+{
+ const struct mail_index_header *hdr;
+ struct mail_index_sync_ctx *ctx;
+ struct mail_index_view *sync_view;
+ enum mail_index_transaction_flags trans_flags;
+ int ret;
+
+ i_assert(!index->syncing);
+
+ *retry_r = FALSE;
+
+ if (index->map != NULL &&
+ (index->map->hdr.flags & MAIL_INDEX_HDR_FLAG_CORRUPTED) != 0) {
+ /* index is corrupted and need to be reopened */
+ return -1;
+ }
+
+ if (log_file_seq != (uint32_t)-1)
+ flags |= MAIL_INDEX_SYNC_FLAG_REQUIRE_CHANGES;
+
+ ret = mail_index_sync_begin_init(index, flags, log_file_seq,
+ log_file_offset);
+ if (ret <= 0)
+ return ret;
+
+ hdr = &index->map->hdr;
+
+ ctx = i_new(struct mail_index_sync_ctx, 1);
+ ctx->index = index;
+ ctx->flags = flags;
+
+ ctx->view = mail_index_view_open(index);
+
+ sync_view = mail_index_dummy_view_open(index);
+ ctx->sync_trans = mail_index_transaction_begin(sync_view,
+ MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL);
+ mail_index_view_close(&sync_view);
+
+ /* set before any rollbacks are called */
+ index->syncing = TRUE;
+
+ /* we wish to see all the changes from last mailbox sync position to
+ the end of the transaction log */
+ ret = mail_index_sync_set_log_view(ctx->view, hdr->log_file_seq,
+ hdr->log_file_tail_offset);
+ if (ret < 0) {
+ mail_index_sync_rollback(&ctx);
+ return -1;
+ }
+ if (ret == 0) {
+ /* if a log file is missing, there's nothing we can do except
+ to skip over it. fix the problem with fsck and try again. */
+ mail_index_fsck_locked(index);
+ mail_index_sync_rollback(&ctx);
+ *retry_r = TRUE;
+ return 0;
+ }
+
+ /* we need to have all the transactions sorted to optimize
+ caller's mailbox access patterns */
+ if (mail_index_sync_read_and_sort(ctx) < 0) {
+ mail_index_sync_rollback(&ctx);
+ return -1;
+ }
+
+ /* create the transaction after the view has been updated with
+ external transactions and marked as sync view */
+ trans_flags = MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL;
+ if ((ctx->flags & MAIL_INDEX_SYNC_FLAG_AVOID_FLAG_UPDATES) != 0)
+ trans_flags |= MAIL_INDEX_TRANSACTION_FLAG_AVOID_FLAG_UPDATES;
+ if ((ctx->flags & MAIL_INDEX_SYNC_FLAG_FSYNC) != 0)
+ trans_flags |= MAIL_INDEX_TRANSACTION_FLAG_FSYNC;
+ ctx->ext_trans = mail_index_transaction_begin(ctx->view, trans_flags);
+ ctx->ext_trans->sync_transaction = TRUE;
+ ctx->ext_trans->commit_deleted_index =
+ (flags & (MAIL_INDEX_SYNC_FLAG_DELETING_INDEX |
+ MAIL_INDEX_SYNC_FLAG_TRY_DELETING_INDEX)) != 0;
+
+ *ctx_r = ctx;
+ *view_r = ctx->view;
+ *trans_r = ctx->ext_trans;
+ return 1;
+}
+
+int mail_index_sync_begin_to(struct mail_index *index,
+ struct mail_index_sync_ctx **ctx_r,
+ struct mail_index_view **view_r,
+ struct mail_index_transaction **trans_r,
+ uint32_t log_file_seq, uoff_t log_file_offset,
+ enum mail_index_sync_flags flags)
+{
+ bool retry;
+ int ret;
+
+ i_assert(index->open_count > 0);
+
+ ret = mail_index_sync_begin_to2(index, ctx_r, view_r, trans_r,
+ log_file_seq, log_file_offset,
+ flags, &retry);
+ if (retry) {
+ ret = mail_index_sync_begin_to2(index, ctx_r, view_r, trans_r,
+ log_file_seq, log_file_offset,
+ flags, &retry);
+ }
+ return ret;
+}
+
+bool mail_index_sync_has_expunges(struct mail_index_sync_ctx *ctx)
+{
+ return array_is_created(&ctx->sync_trans->expunges) &&
+ array_count(&ctx->sync_trans->expunges) > 0;
+}
+
+static bool mail_index_sync_view_have_any(struct mail_index_view *view,
+ enum mail_index_sync_flags flags,
+ bool expunges_only)
+{
+ const struct mail_transaction_header *hdr;
+ const void *data;
+ uint32_t log_seq;
+ uoff_t log_offset;
+ const char *reason;
+ bool reset;
+ int ret;
+
+ if (view->map->hdr.first_recent_uid < view->map->hdr.next_uid &&
+ (flags & MAIL_INDEX_SYNC_FLAG_DROP_RECENT) != 0)
+ return TRUE;
+
+ if ((view->map->hdr.flags & MAIL_INDEX_HDR_FLAG_HAVE_DIRTY) != 0 &&
+ (flags & MAIL_INDEX_SYNC_FLAG_FLUSH_DIRTY) != 0 &&
+ (view->index->flags & MAIL_INDEX_OPEN_FLAG_NO_DIRTY) == 0)
+ return TRUE;
+
+ mail_transaction_log_get_head(view->index->log, &log_seq, &log_offset);
+ if (mail_transaction_log_view_set(view->log_view,
+ view->map->hdr.log_file_seq,
+ view->map->hdr.log_file_tail_offset,
+ log_seq, log_offset,
+ &reset, &reason) <= 0) {
+ /* let the actual syncing handle the error */
+ return TRUE;
+ }
+
+ while ((ret = mail_transaction_log_view_next(view->log_view,
+ &hdr, &data)) > 0) {
+ if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) != 0)
+ continue;
+
+ switch (hdr->type & MAIL_TRANSACTION_TYPE_MASK) {
+ case MAIL_TRANSACTION_EXPUNGE:
+ case MAIL_TRANSACTION_EXPUNGE_GUID:
+ return TRUE;
+ case MAIL_TRANSACTION_EXT_REC_UPDATE:
+ case MAIL_TRANSACTION_EXT_ATOMIC_INC:
+ /* extension record updates aren't exactly needed
+ to be synced, but cache syncing relies on tail
+ offsets being updated. */
+ case MAIL_TRANSACTION_FLAG_UPDATE:
+ case MAIL_TRANSACTION_KEYWORD_UPDATE:
+ case MAIL_TRANSACTION_KEYWORD_RESET:
+ case MAIL_TRANSACTION_INDEX_DELETED:
+ case MAIL_TRANSACTION_INDEX_UNDELETED:
+ if (!expunges_only)
+ return TRUE;
+ break;
+ default:
+ break;
+ }
+ }
+ return ret < 0;
+}
+
+bool mail_index_sync_have_any(struct mail_index *index,
+ enum mail_index_sync_flags flags)
+{
+ struct mail_index_view *view;
+ bool ret;
+
+ view = mail_index_view_open(index);
+ ret = mail_index_sync_view_have_any(view, flags, FALSE);
+ mail_index_view_close(&view);
+ return ret;
+}
+
+bool mail_index_sync_have_any_expunges(struct mail_index *index)
+{
+ struct mail_index_view *view;
+ bool ret;
+
+ view = mail_index_view_open(index);
+ ret = mail_index_sync_view_have_any(view, 0, TRUE);
+ mail_index_view_close(&view);
+ return ret;
+}
+
+void mail_index_sync_get_offsets(struct mail_index_sync_ctx *ctx,
+ uint32_t *seq1_r, uoff_t *offset1_r,
+ uint32_t *seq2_r, uoff_t *offset2_r)
+{
+ *seq1_r = ctx->view->map->hdr.log_file_seq;
+ *offset1_r = ctx->view->map->hdr.log_file_tail_offset != 0 ?
+ ctx->view->map->hdr.log_file_tail_offset :
+ ctx->view->index->log->head->hdr.hdr_size;
+ mail_transaction_log_get_head(ctx->view->index->log, seq2_r, offset2_r);
+}
+
+static void
+mail_index_sync_get_expunge(struct mail_index_sync_rec *rec,
+ const struct mail_transaction_expunge_guid *exp)
+{
+ rec->type = MAIL_INDEX_SYNC_TYPE_EXPUNGE;
+ rec->uid1 = exp->uid;
+ rec->uid2 = exp->uid;
+ memcpy(rec->guid_128, exp->guid_128, sizeof(rec->guid_128));
+}
+
+static void
+mail_index_sync_get_update(struct mail_index_sync_rec *rec,
+ const struct mail_index_flag_update *update)
+{
+ rec->type = MAIL_INDEX_SYNC_TYPE_FLAGS;
+ rec->uid1 = update->uid1;
+ rec->uid2 = update->uid2;
+
+ rec->add_flags = update->add_flags;
+ rec->remove_flags = update->remove_flags;
+}
+
+static void
+mail_index_sync_get_keyword_update(struct mail_index_sync_rec *rec,
+ const struct uid_range *range,
+ struct mail_index_sync_list *sync_list)
+{
+ rec->type = !sync_list->keyword_remove ?
+ MAIL_INDEX_SYNC_TYPE_KEYWORD_ADD :
+ MAIL_INDEX_SYNC_TYPE_KEYWORD_REMOVE;
+ rec->uid1 = range->uid1;
+ rec->uid2 = range->uid2;
+ rec->keyword_idx = sync_list->keyword_idx;
+}
+
+bool mail_index_sync_next(struct mail_index_sync_ctx *ctx,
+ struct mail_index_sync_rec *sync_rec)
+{
+ struct mail_index_transaction *sync_trans = ctx->sync_trans;
+ struct mail_index_sync_list *sync_list;
+ const struct uid_range *uid_range = NULL;
+ unsigned int i, count, next_i;
+ uint32_t next_found_uid;
+
+ next_i = UINT_MAX;
+ next_found_uid = (uint32_t)-1;
+
+ /* FIXME: replace with a priority queue so we don't have to go
+ through the whole list constantly. and remember to make sure that
+ keyword resets are sent before adds! */
+ /* FIXME: pretty ugly to do this for expunges, which isn't even a
+ seq_range. */
+ sync_list = array_get_modifiable(&ctx->sync_list, &count);
+ for (i = 0; i < count; i++) {
+ if (!array_is_created(sync_list[i].array) ||
+ sync_list[i].idx == array_count(sync_list[i].array))
+ continue;
+
+ uid_range = array_idx(sync_list[i].array, sync_list[i].idx);
+ if (uid_range->uid1 == ctx->next_uid) {
+ /* use this one. */
+ break;
+ }
+ if (uid_range->uid1 < next_found_uid) {
+ next_i = i;
+ next_found_uid = uid_range->uid1;
+ }
+ }
+
+ if (i == count) {
+ if (next_i == UINT_MAX) {
+ /* nothing left in sync_list */
+ ctx->fully_synced = TRUE;
+ return FALSE;
+ }
+ ctx->next_uid = next_found_uid;
+ i = next_i;
+ uid_range = array_idx(sync_list[i].array, sync_list[i].idx);
+ }
+
+ if (sync_list[i].array == (void *)&sync_trans->expunges) {
+ mail_index_sync_get_expunge(sync_rec,
+ (const struct mail_transaction_expunge_guid *)uid_range);
+ } else if (sync_list[i].array == (void *)&sync_trans->updates) {
+ mail_index_sync_get_update(sync_rec,
+ (const struct mail_index_flag_update *)uid_range);
+ } else {
+ mail_index_sync_get_keyword_update(sync_rec, uid_range,
+ &sync_list[i]);
+ }
+ sync_list[i].idx++;
+ return TRUE;
+}
+
+bool mail_index_sync_have_more(struct mail_index_sync_ctx *ctx)
+{
+ const struct mail_index_sync_list *sync_list;
+
+ array_foreach(&ctx->sync_list, sync_list) {
+ if (array_is_created(sync_list->array) &&
+ sync_list->idx != array_count(sync_list->array))
+ return TRUE;
+ }
+ return FALSE;
+}
+
+void mail_index_sync_set_commit_result(struct mail_index_sync_ctx *ctx,
+ struct mail_index_transaction_commit_result *result)
+{
+ ctx->sync_commit_result = result;
+}
+
+void mail_index_sync_reset(struct mail_index_sync_ctx *ctx)
+{
+ struct mail_index_sync_list *sync_list;
+
+ ctx->next_uid = 0;
+ array_foreach_modifiable(&ctx->sync_list, sync_list)
+ sync_list->idx = 0;
+}
+
+void mail_index_sync_no_warning(struct mail_index_sync_ctx *ctx)
+{
+ ctx->no_warning = TRUE;
+}
+
+void mail_index_sync_set_reason(struct mail_index_sync_ctx *ctx,
+ const char *reason)
+{
+ i_free(ctx->reason);
+ ctx->reason = i_strdup(reason);
+}
+
+static void mail_index_sync_end(struct mail_index_sync_ctx **_ctx)
+{
+ struct mail_index_sync_ctx *ctx = *_ctx;
+ const char *lock_reason;
+
+ i_assert(ctx->index->syncing);
+
+ *_ctx = NULL;
+
+ ctx->index->syncing = FALSE;
+ if (ctx->no_warning)
+ lock_reason = NULL;
+ else if (ctx->reason != NULL)
+ lock_reason = ctx->reason;
+ else
+ lock_reason = "Mailbox was synchronized";
+ mail_transaction_log_sync_unlock(ctx->index->log, lock_reason);
+
+ mail_index_view_close(&ctx->view);
+ mail_index_transaction_rollback(&ctx->sync_trans);
+ if (array_is_created(&ctx->sync_list))
+ array_free(&ctx->sync_list);
+ i_free(ctx->reason);
+ i_free(ctx);
+}
+
+static void
+mail_index_sync_update_mailbox_offset(struct mail_index_sync_ctx *ctx)
+{
+ const struct mail_index_header *hdr = &ctx->index->map->hdr;
+ uint32_t seq;
+ uoff_t offset;
+
+ if (!ctx->fully_synced) {
+ /* Everything wasn't synced. This usually means that syncing
+ was used for locking and nothing was synced. Don't update
+ tail offset. */
+ return;
+ }
+ /* All changes were synced. During the syncing other transactions may
+ have been created and committed as well. They're expected to be
+ external transactions. These could be at least:
+ - mdbox finishing expunges
+ - mdbox writing to dovecot.map.index (requires tail offset updates)
+ - sdbox appending messages
+
+ If any expunges were committed, tail_offset must not be updated
+ before mail_index_map(MAIL_INDEX_SYNC_HANDLER_FILE) is called.
+ Otherwise expunge handlers won't be called for them.
+
+ We'll require MAIL_INDEX_SYNC_FLAG_UPDATE_TAIL_OFFSET flag for the
+ few places that actually require tail_offset to include the
+ externally committed transactions. Otherwise tail_offset is updated
+ only up to what was just synced. */
+ if ((ctx->flags & MAIL_INDEX_SYNC_FLAG_UPDATE_TAIL_OFFSET) != 0)
+ mail_transaction_log_get_head(ctx->index->log, &seq, &offset);
+ else {
+ mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
+ &seq, &offset);
+ }
+ mail_transaction_log_set_mailbox_sync_pos(ctx->index->log, seq, offset);
+
+ /* If tail offset has changed, make sure it gets written to
+ transaction log. do this only if we're required to make changes.
+
+ avoid writing a new tail offset if all the transactions were
+ external, because that wouldn't change effective the tail offset.
+ except e.g. mdbox map requires this to happen, so do it
+ optionally. Also update the tail if we've been calling any expunge
+ handlers, so they won't be called multiple times. That could cause
+ at least cache file's [deleted_]record_count to shrink too much. */
+ if ((hdr->log_file_seq != seq || hdr->log_file_tail_offset < offset) &&
+ (ctx->seen_external_expunges ||
+ ctx->seen_nonexternal_transactions ||
+ (ctx->flags & MAIL_INDEX_SYNC_FLAG_UPDATE_TAIL_OFFSET) != 0)) {
+ ctx->ext_trans->log_updates = TRUE;
+ ctx->ext_trans->tail_offset_changed = TRUE;
+ }
+}
+
+static bool mail_index_sync_want_index_write(struct mail_index *index, const char **reason_r)
+{
+ uint32_t log_diff;
+
+ if (index->main_index_hdr_log_file_seq != 0 &&
+ index->main_index_hdr_log_file_seq != index->map->hdr.log_file_seq) {
+ /* dovecot.index points to an old .log file. we were supposed
+ to rewrite the dovecot.index when rotating the log, so
+ we shouldn't usually get here. */
+ *reason_r = "points to old .log file";
+ return TRUE;
+ }
+
+ log_diff = index->map->hdr.log_file_tail_offset -
+ index->main_index_hdr_log_file_tail_offset;
+ if (log_diff > index->optimization_set.index.rewrite_max_log_bytes) {
+ *reason_r = t_strdup_printf(
+ ".log read %u..%u > rewrite_max_log_bytes %"PRIuUOFF_T,
+ index->map->hdr.log_file_tail_offset,
+ index->main_index_hdr_log_file_tail_offset,
+ index->optimization_set.index.rewrite_max_log_bytes);
+ return TRUE;
+ }
+ if (index->index_min_write &&
+ log_diff > index->optimization_set.index.rewrite_min_log_bytes) {
+ *reason_r = t_strdup_printf(
+ ".log read %u..%u > rewrite_min_log_bytes %"PRIuUOFF_T,
+ index->map->hdr.log_file_tail_offset,
+ index->main_index_hdr_log_file_tail_offset,
+ index->optimization_set.index.rewrite_min_log_bytes);
+ return TRUE;
+ }
+
+ if (index->need_recreate != NULL) {
+ *reason_r = t_strdup_printf("Need to recreate index: %s",
+ index->need_recreate);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+int mail_index_sync_commit(struct mail_index_sync_ctx **_ctx)
+{
+ struct mail_index_sync_ctx *ctx = *_ctx;
+ struct mail_index *index = ctx->index;
+ const char *reason = NULL;
+ uint32_t next_uid;
+ bool want_rotate, index_undeleted, delete_index;
+ int ret = 0, ret2;
+
+ index_undeleted = ctx->ext_trans->index_undeleted;
+ delete_index = index->index_delete_requested && !index_undeleted &&
+ (ctx->flags & (MAIL_INDEX_SYNC_FLAG_DELETING_INDEX |
+ MAIL_INDEX_SYNC_FLAG_TRY_DELETING_INDEX)) != 0;
+ if (delete_index) {
+ /* finish this sync by marking the index deleted */
+ mail_index_set_deleted(ctx->ext_trans);
+ } else if (index->index_deleted && !index_undeleted &&
+ (ctx->flags & MAIL_INDEX_SYNC_FLAG_TRY_DELETING_INDEX) == 0) {
+ /* another process just marked the index deleted.
+ finish the sync, but return error. */
+ mail_index_set_error_nolog(index, "Index is marked deleted");
+ ret = -1;
+ }
+
+ mail_index_sync_update_mailbox_offset(ctx);
+
+ if ((ctx->flags & MAIL_INDEX_SYNC_FLAG_DROP_RECENT) != 0) {
+ next_uid = mail_index_transaction_get_next_uid(ctx->ext_trans);
+ if (index->map->hdr.first_recent_uid < next_uid) {
+ mail_index_update_header(ctx->ext_trans,
+ offsetof(struct mail_index_header,
+ first_recent_uid),
+ &next_uid, sizeof(next_uid), FALSE);
+ }
+ }
+ if (index->hdr_log2_rotate_time_delayed_update != 0) {
+ /* We checked whether .log.2 should be deleted in this same
+ sync. It resulted in wanting to change the log2_rotate_time
+ in the header. Do it here as part of the other changes. */
+ uint32_t log2_rotate_time =
+ index->hdr_log2_rotate_time_delayed_update;
+
+ mail_index_update_header(ctx->ext_trans,
+ offsetof(struct mail_index_header, log2_rotate_time),
+ &log2_rotate_time, sizeof(log2_rotate_time), TRUE);
+ index->hdr_log2_rotate_time_delayed_update = 0;
+ }
+
+ ret2 = mail_index_transaction_commit(&ctx->ext_trans);
+ if (ret2 < 0) {
+ mail_index_sync_end(&ctx);
+ return -1;
+ }
+
+ if (delete_index)
+ index->index_deleted = TRUE;
+ else if (index_undeleted) {
+ index->index_deleted = FALSE;
+ index->index_delete_requested = FALSE;
+ }
+
+ /* refresh the mapping with newly committed external transactions
+ and the synced expunges. sync using file handler here so that the
+ expunge handlers get called. */
+ index->sync_commit_result = ctx->sync_commit_result;
+ if (mail_index_map(ctx->index, MAIL_INDEX_SYNC_HANDLER_FILE) <= 0)
+ ret = -1;
+ index->sync_commit_result = NULL;
+
+ /* The previously called expunged handlers will update cache's
+ record_count and deleted_record_count. That also has a side effect
+ of updating whether cache needs to be purged. */
+ if (ret == 0 && mail_cache_need_purge(index->cache, &reason) &&
+ !mail_cache_transactions_have_changes(index->cache)) {
+ if (mail_cache_purge(index->cache,
+ index->cache->need_purge_file_seq,
+ reason) < 0) {
+ /* can't really do anything if it fails */
+ }
+ /* Make sure the newly committed cache record offsets are
+ updated to the current index. This is important if the
+ dovecot.index gets recreated below, because rotation of
+ dovecot.index.log also re-maps the index to make sure
+ everything is up-to-date. But if it wasn't,
+ mail_index_write() will just assert-crash because
+ log_file_head_offset changed. */
+ if (mail_index_map(ctx->index, MAIL_INDEX_SYNC_HANDLER_FILE) <= 0)
+ ret = -1;
+ }
+
+ /* Log rotation is allowed only if everything was synced. Note that
+ tail_offset might not equal head_offset here, because
+ mail_index_sync_update_mailbox_offset() doesn't always update
+ tail_offset to skip over other committed external transactions.
+ However, it's still safe to do the rotation because external
+ transactions don't require syncing. */
+ want_rotate = ctx->fully_synced &&
+ mail_transaction_log_want_rotate(index->log, &reason);
+ if (ret == 0 &&
+ (want_rotate || mail_index_sync_want_index_write(index, &reason))) {
+ i_free(index->need_recreate);
+ index->index_min_write = FALSE;
+ mail_index_write(index, want_rotate, reason);
+ }
+ mail_index_sync_end(_ctx);
+ return ret;
+}
+
+void mail_index_sync_rollback(struct mail_index_sync_ctx **ctx)
+{
+ if ((*ctx)->ext_trans != NULL)
+ mail_index_transaction_rollback(&(*ctx)->ext_trans);
+ mail_index_sync_end(ctx);
+}
+
+void mail_index_sync_flags_apply(const struct mail_index_sync_rec *sync_rec,
+ uint8_t *flags)
+{
+ i_assert(sync_rec->type == MAIL_INDEX_SYNC_TYPE_FLAGS);
+
+ *flags = (*flags & ENUM_NEGATE(sync_rec->remove_flags)) | sync_rec->add_flags;
+}
+
+bool mail_index_sync_keywords_apply(const struct mail_index_sync_rec *sync_rec,
+ ARRAY_TYPE(keyword_indexes) *keywords)
+{
+ const unsigned int *keyword_indexes;
+ unsigned int idx = sync_rec->keyword_idx;
+ unsigned int i, count;
+
+ keyword_indexes = array_get(keywords, &count);
+ switch (sync_rec->type) {
+ case MAIL_INDEX_SYNC_TYPE_KEYWORD_ADD:
+ for (i = 0; i < count; i++) {
+ if (keyword_indexes[i] == idx)
+ return FALSE;
+ }
+
+ array_push_back(keywords, &idx);
+ return TRUE;
+ case MAIL_INDEX_SYNC_TYPE_KEYWORD_REMOVE:
+ for (i = 0; i < count; i++) {
+ if (keyword_indexes[i] == idx) {
+ array_delete(keywords, i, 1);
+ return TRUE;
+ }
+ }
+ return FALSE;
+ default:
+ i_unreached();
+ return FALSE;
+ }
+}
+
+void mail_index_sync_set_corrupted(struct mail_index_sync_map_ctx *ctx,
+ const char *fmt, ...)
+{
+ va_list va;
+ uint32_t seq;
+ uoff_t offset;
+ char *reason, *reason_free = NULL;
+
+ va_start(va, fmt);
+ reason = reason_free = i_strdup_vprintf(fmt, va);
+ va_end(va);
+
+ ctx->errors = TRUE;
+ /* make sure we don't get to this same error again by updating the
+ dovecot.index */
+ if (ctx->view->index->need_recreate == NULL) {
+ ctx->view->index->need_recreate = reason;
+ reason_free = NULL;
+ }
+
+ mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
+ &seq, &offset);
+
+ if (seq < ctx->view->index->fsck_log_head_file_seq ||
+ (seq == ctx->view->index->fsck_log_head_file_seq &&
+ offset < ctx->view->index->fsck_log_head_file_offset)) {
+ /* be silent */
+ } else {
+ mail_index_set_error(ctx->view->index,
+ "Log synchronization error at "
+ "seq=%u,offset=%"PRIuUOFF_T" for %s: %s",
+ seq, offset, ctx->view->index->filepath,
+ reason);
+ }
+ i_free(reason_free);
+}
diff --git a/src/lib-index/mail-index-transaction-export.c b/src/lib-index/mail-index-transaction-export.c
new file mode 100644
index 0000000..2aced67
--- /dev/null
+++ b/src/lib-index/mail-index-transaction-export.c
@@ -0,0 +1,533 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "mail-index-private.h"
+#include "mail-index-modseq.h"
+#include "mail-transaction-log-private.h"
+#include "mail-index-transaction-private.h"
+
+struct mail_index_export_context {
+ struct mail_index_transaction *trans;
+ struct mail_transaction_log_append_ctx *append_ctx;
+};
+
+static void
+log_append_buffer(struct mail_index_export_context *ctx,
+ const buffer_t *buf, enum mail_transaction_type type)
+{
+ mail_transaction_log_append_add(ctx->append_ctx, type,
+ buf->data, buf->used);
+}
+
+static void log_append_flag_updates(struct mail_index_export_context *ctx,
+ struct mail_index_transaction *t)
+{
+ ARRAY(struct mail_transaction_flag_update) log_updates;
+ const struct mail_index_flag_update *updates;
+ struct mail_transaction_flag_update *log_update;
+ unsigned int i, count;
+
+ updates = array_get(&t->updates, &count);
+ if (count == 0)
+ return;
+
+ i_array_init(&log_updates, count);
+
+ for (i = 0; i < count; i++) {
+ log_update = array_append_space(&log_updates);
+ log_update->uid1 = updates[i].uid1;
+ log_update->uid2 = updates[i].uid2;
+ log_update->add_flags = updates[i].add_flags & 0xff;
+ log_update->remove_flags = updates[i].remove_flags & 0xff;
+ if ((updates[i].add_flags & MAIL_INDEX_MAIL_FLAG_UPDATE_MODSEQ) != 0)
+ log_update->modseq_inc_flag = 1;
+ }
+ log_append_buffer(ctx, log_updates.arr.buffer,
+ MAIL_TRANSACTION_FLAG_UPDATE);
+ array_free(&log_updates);
+}
+
+static const buffer_t *
+log_get_hdr_update_buffer(struct mail_index_transaction *t, bool prepend)
+{
+ buffer_t *buf;
+ const unsigned char *data, *mask;
+ struct mail_transaction_header_update u;
+ uint16_t offset;
+ int state = 0;
+
+ i_zero(&u);
+
+ data = prepend ? t->pre_hdr_change : t->post_hdr_change;
+ mask = prepend ? t->pre_hdr_mask : t->post_hdr_mask;
+
+ buf = t_buffer_create(256);
+ for (offset = 0; offset <= sizeof(t->pre_hdr_change); offset++) {
+ if (offset < sizeof(t->pre_hdr_change) && mask[offset] != 0) {
+ if (state == 0) {
+ u.offset = offset;
+ state++;
+ }
+ } else {
+ if (state > 0) {
+ u.size = offset - u.offset;
+ buffer_append(buf, &u, sizeof(u));
+ buffer_append(buf, data + u.offset, u.size);
+ state = 0;
+ }
+ }
+ }
+ return buf;
+}
+
+static unsigned int
+ext_hdr_update_get_size(const struct mail_index_transaction_ext_hdr_update *hu)
+{
+ unsigned int i;
+
+ for (i = hu->alloc_size; i > 0; i--) {
+ if (hu->mask[i-1] != 0)
+ return i;
+ }
+ return 0;
+}
+
+static void log_append_ext_intro(struct mail_index_export_context *ctx,
+ uint32_t ext_id, uint32_t reset_id,
+ unsigned int *hdr_size_r)
+{
+ struct mail_index_transaction *t = ctx->trans;
+ const struct mail_index_registered_ext *rext;
+ const struct mail_index_ext *ext;
+ struct mail_transaction_ext_intro *intro, *resizes;
+ buffer_t *buf;
+ uint32_t idx;
+ unsigned int count;
+
+ i_assert(ext_id != (uint32_t)-1);
+
+ if (t->reset ||
+ !mail_index_map_get_ext_idx(t->view->index->map, ext_id, &idx)) {
+ /* new extension */
+ idx = (uint32_t)-1;
+ }
+
+ rext = array_idx(&t->view->index->extensions, ext_id);
+ if (!array_is_created(&t->ext_resizes)) {
+ resizes = NULL;
+ count = 0;
+ } else {
+ resizes = array_get_modifiable(&t->ext_resizes, &count);
+ }
+
+ buf = t_buffer_create(128);
+ if (ext_id < count && resizes[ext_id].name_size != 0) {
+ /* we're resizing the extension. use the resize struct. */
+ intro = &resizes[ext_id];
+
+ if (idx != (uint32_t)-1) {
+ intro->ext_id = idx;
+ intro->name_size = 0;
+ } else {
+ intro->ext_id = (uint32_t)-1;
+ intro->name_size = strlen(rext->name);
+ }
+ buffer_append(buf, intro, sizeof(*intro));
+ } else {
+ /* generate a new intro structure */
+ intro = buffer_append_space_unsafe(buf, sizeof(*intro));
+ intro->ext_id = idx;
+ intro->record_size = rext->record_size;
+ intro->record_align = rext->record_align;
+ if (idx == (uint32_t)-1) {
+ intro->hdr_size = rext->hdr_size;
+ intro->name_size = strlen(rext->name);
+ } else {
+ ext = array_idx(&t->view->index->map->extensions, idx);
+ intro->hdr_size = ext->hdr_size;
+ intro->name_size = 0;
+ }
+ intro->flags = MAIL_TRANSACTION_EXT_INTRO_FLAG_NO_SHRINK;
+
+ /* handle increasing header size automatically */
+ if (array_is_created(&t->ext_hdr_updates) &&
+ ext_id < array_count(&t->ext_hdr_updates)) {
+ const struct mail_index_transaction_ext_hdr_update *hu;
+ unsigned int hdr_update_size;
+
+ hu = array_idx(&t->ext_hdr_updates, ext_id);
+ hdr_update_size = ext_hdr_update_get_size(hu);
+ if (intro->hdr_size < hdr_update_size)
+ intro->hdr_size = hdr_update_size;
+ }
+ }
+ i_assert(intro->record_size != 0 || intro->hdr_size != 0);
+ if (reset_id != 0) {
+ /* we're going to reset this extension in this transaction */
+ intro->reset_id = reset_id;
+ } else if (idx != (uint32_t)-1) {
+ /* use the existing reset_id */
+ const struct mail_index_ext *map_ext =
+ array_idx(&t->view->index->map->extensions, idx);
+ intro->reset_id = map_ext->reset_id;
+ } else {
+ /* new extension, reset_id defaults to 0 */
+ }
+ buffer_append(buf, rext->name, intro->name_size);
+ if ((buf->used % 4) != 0)
+ buffer_append_zero(buf, 4 - (buf->used % 4));
+
+ if (ctx->append_ctx->new_highest_modseq == 0 &&
+ strcmp(rext->name, MAIL_INDEX_MODSEQ_EXT_NAME) == 0) {
+ /* modseq tracking started */
+ ctx->append_ctx->new_highest_modseq = 1;
+ }
+
+ log_append_buffer(ctx, buf, MAIL_TRANSACTION_EXT_INTRO);
+ *hdr_size_r = intro->hdr_size;
+}
+
+static void
+log_append_ext_hdr_update(struct mail_index_export_context *ctx,
+ const struct mail_index_transaction_ext_hdr_update *hdr,
+ unsigned int ext_hdr_size)
+{
+ buffer_t *buf;
+ const unsigned char *data, *mask;
+ struct mail_transaction_ext_hdr_update u;
+ struct mail_transaction_ext_hdr_update32 u32;
+ size_t offset;
+ bool started = FALSE, use_32 = hdr->alloc_size >= 65536;
+
+ i_zero(&u);
+ i_zero(&u32);
+
+ data = hdr->data;
+ mask = hdr->mask;
+
+ buf = buffer_create_dynamic(default_pool, 256);
+ for (offset = 0; offset <= hdr->alloc_size; offset++) {
+ if (offset < hdr->alloc_size && mask[offset] != 0) {
+ if (!started) {
+ u32.offset = offset;
+ started = TRUE;
+ }
+ } else {
+ if (started) {
+ u32.size = offset - u32.offset;
+ if (use_32)
+ buffer_append(buf, &u32, sizeof(u32));
+ else {
+ u.offset = u32.offset;
+ u.size = u32.size;
+ buffer_append(buf, &u, sizeof(u));
+ }
+ i_assert(u32.offset + u32.size <= ext_hdr_size);
+ buffer_append(buf, data + u32.offset, u32.size);
+ started = FALSE;
+ }
+ }
+ }
+ if (buf->used % 4 != 0)
+ buffer_append_zero(buf, 4 - buf->used % 4);
+ log_append_buffer(ctx, buf, use_32 ? MAIL_TRANSACTION_EXT_HDR_UPDATE32 :
+ MAIL_TRANSACTION_EXT_HDR_UPDATE);
+ buffer_free(&buf);
+}
+
+static void
+mail_transaction_log_append_ext_intros(struct mail_index_export_context *ctx)
+{
+ struct mail_index_transaction *t = ctx->trans;
+ const struct mail_transaction_ext_intro *resize;
+ const struct mail_index_transaction_ext_hdr_update *hdrs;
+ struct mail_transaction_ext_reset ext_reset;
+ unsigned int resize_count, ext_count = 0;
+ unsigned int hdrs_count, reset_id_count, reset_count, hdr_size;
+ uint32_t ext_id, reset_id;
+ const struct mail_transaction_ext_reset *reset;
+ const uint32_t *reset_ids;
+ buffer_t reset_buf;
+
+ if (!array_is_created(&t->ext_resizes)) {
+ resize = NULL;
+ resize_count = 0;
+ } else {
+ resize = array_get(&t->ext_resizes, &resize_count);
+ if (ext_count < resize_count)
+ ext_count = resize_count;
+ }
+
+ if (!array_is_created(&t->ext_reset_ids)) {
+ reset_ids = NULL;
+ reset_id_count = 0;
+ } else {
+ reset_ids = array_get(&t->ext_reset_ids, &reset_id_count);
+ }
+
+ if (!array_is_created(&t->ext_resets)) {
+ reset = NULL;
+ reset_count = 0;
+ } else {
+ reset = array_get(&t->ext_resets, &reset_count);
+ if (ext_count < reset_count)
+ ext_count = reset_count;
+ }
+
+ if (!array_is_created(&t->ext_hdr_updates)) {
+ hdrs = NULL;
+ hdrs_count = 0;
+ } else {
+ hdrs = array_get(&t->ext_hdr_updates, &hdrs_count);
+ if (ext_count < hdrs_count)
+ ext_count = hdrs_count;
+ }
+
+ i_zero(&ext_reset);
+ buffer_create_from_data(&reset_buf, &ext_reset, sizeof(ext_reset));
+ buffer_set_used_size(&reset_buf, sizeof(ext_reset));
+
+ for (ext_id = 0; ext_id < ext_count; ext_id++) {
+ if (ext_id < reset_count)
+ ext_reset = reset[ext_id];
+ else
+ ext_reset.new_reset_id = 0;
+ if ((ext_id < resize_count && resize[ext_id].name_size > 0) ||
+ ext_reset.new_reset_id != 0 ||
+ (ext_id < hdrs_count && hdrs[ext_id].alloc_size > 0)) {
+ if (ext_reset.new_reset_id != 0) {
+ /* we're going to reset this extension
+ immediately after the intro */
+ reset_id = 0;
+ } else {
+ reset_id = ext_id < reset_id_count ?
+ reset_ids[ext_id] : 0;
+ }
+ log_append_ext_intro(ctx, ext_id, reset_id, &hdr_size);
+ } else {
+ hdr_size = 0;
+ }
+ if (ext_reset.new_reset_id != 0) {
+ i_assert(ext_id < reset_id_count &&
+ ext_reset.new_reset_id == reset_ids[ext_id]);
+ log_append_buffer(ctx, &reset_buf,
+ MAIL_TRANSACTION_EXT_RESET);
+ }
+ if (ext_id < hdrs_count && hdrs[ext_id].alloc_size > 0) {
+ T_BEGIN {
+ log_append_ext_hdr_update(ctx, &hdrs[ext_id],
+ hdr_size);
+ } T_END;
+ }
+ }
+}
+
+static void log_append_ext_recs(struct mail_index_export_context *ctx,
+ const ARRAY_TYPE(seq_array_array) *arr,
+ enum mail_transaction_type type)
+{
+ struct mail_index_transaction *t = ctx->trans;
+ const ARRAY_TYPE(seq_array) *updates;
+ const uint32_t *reset_ids;
+ unsigned int ext_id, count, reset_id_count, hdr_size;
+ uint32_t reset_id;
+
+ if (!array_is_created(&t->ext_reset_ids)) {
+ reset_ids = NULL;
+ reset_id_count = 0;
+ } else {
+ reset_ids = array_get_modifiable(&t->ext_reset_ids,
+ &reset_id_count);
+ }
+
+ updates = array_get(arr, &count);
+ for (ext_id = 0; ext_id < count; ext_id++) {
+ if (!array_is_created(&updates[ext_id]))
+ continue;
+
+ reset_id = ext_id < reset_id_count ? reset_ids[ext_id] : 0;
+ log_append_ext_intro(ctx, ext_id, reset_id, &hdr_size);
+
+ log_append_buffer(ctx, updates[ext_id].arr.buffer, type);
+ }
+}
+
+static void
+log_append_keyword_update(struct mail_index_export_context *ctx,
+ buffer_t *tmp_buf, enum modify_type modify_type,
+ const char *keyword, const buffer_t *uid_buffer)
+{
+ struct mail_transaction_keyword_update kt_hdr;
+
+ i_assert(uid_buffer->used > 0);
+
+ i_zero(&kt_hdr);
+ kt_hdr.modify_type = modify_type;
+ kt_hdr.name_size = strlen(keyword);
+
+ buffer_set_used_size(tmp_buf, 0);
+ buffer_append(tmp_buf, &kt_hdr, sizeof(kt_hdr));
+ buffer_append(tmp_buf, keyword, kt_hdr.name_size);
+ if ((tmp_buf->used % 4) != 0)
+ buffer_append_zero(tmp_buf, 4 - (tmp_buf->used % 4));
+ buffer_append(tmp_buf, uid_buffer->data, uid_buffer->used);
+
+ log_append_buffer(ctx, tmp_buf, MAIL_TRANSACTION_KEYWORD_UPDATE);
+}
+
+static bool
+log_append_keyword_updates(struct mail_index_export_context *ctx)
+{
+ const struct mail_index_transaction_keyword_update *updates;
+ const char *const *keywords;
+ buffer_t *tmp_buf;
+ unsigned int i, count, keywords_count;
+ bool changed = FALSE;
+
+ tmp_buf = t_buffer_create(64);
+
+ keywords = array_get_modifiable(&ctx->trans->view->index->keywords,
+ &keywords_count);
+ updates = array_get_modifiable(&ctx->trans->keyword_updates, &count);
+ i_assert(count <= keywords_count);
+
+ for (i = 0; i < count; i++) {
+ if (array_is_created(&updates[i].add_seq) &&
+ array_count(&updates[i].add_seq) > 0) {
+ changed = TRUE;
+ log_append_keyword_update(ctx, tmp_buf,
+ MODIFY_ADD, keywords[i],
+ updates[i].add_seq.arr.buffer);
+ }
+ if (array_is_created(&updates[i].remove_seq) &&
+ array_count(&updates[i].remove_seq) > 0) {
+ changed = TRUE;
+ log_append_keyword_update(ctx, tmp_buf,
+ MODIFY_REMOVE, keywords[i],
+ updates[i].remove_seq.arr.buffer);
+ }
+ }
+ return changed;
+}
+
+void mail_index_transaction_export(struct mail_index_transaction *t,
+ struct mail_transaction_log_append_ctx *append_ctx,
+ enum mail_index_transaction_change *changes_r)
+{
+ static uint8_t null4[4] = { 0, 0, 0, 0 };
+ enum mail_index_fsync_mask change_mask = 0;
+ struct mail_index_export_context ctx;
+
+ *changes_r = 0;
+
+ i_zero(&ctx);
+ ctx.trans = t;
+ ctx.append_ctx = append_ctx;
+
+ if (t->index_undeleted) {
+ i_assert(!t->index_deleted);
+ mail_transaction_log_append_add(ctx.append_ctx,
+ MAIL_TRANSACTION_INDEX_UNDELETED, &null4, 4);
+ }
+
+ /* send all extension introductions and resizes before appends
+ to avoid resize overhead as much as possible */
+ mail_transaction_log_append_ext_intros(&ctx);
+
+ if (t->pre_hdr_changed) {
+ log_append_buffer(&ctx, log_get_hdr_update_buffer(t, TRUE),
+ MAIL_TRANSACTION_HEADER_UPDATE);
+ }
+
+ if (append_ctx->output->used > 0)
+ *changes_r |= MAIL_INDEX_TRANSACTION_CHANGE_OTHERS;
+
+ if (t->attribute_updates != NULL) {
+ buffer_append_c(t->attribute_updates, '\0');
+ /* need to have 32bit alignment */
+ if (t->attribute_updates->used % 4 != 0) {
+ buffer_append_zero(t->attribute_updates,
+ 4 - t->attribute_updates->used % 4);
+ }
+ /* append the timestamp and value lengths */
+ buffer_append(t->attribute_updates,
+ t->attribute_updates_suffix->data,
+ t->attribute_updates_suffix->used);
+ i_assert(t->attribute_updates->used % 4 == 0);
+ *changes_r |= MAIL_INDEX_TRANSACTION_CHANGE_ATTRIBUTE;
+ log_append_buffer(&ctx, t->attribute_updates,
+ MAIL_TRANSACTION_ATTRIBUTE_UPDATE);
+ }
+ if (array_is_created(&t->appends)) {
+ change_mask |= MAIL_INDEX_FSYNC_MASK_APPENDS;
+ *changes_r |= MAIL_INDEX_TRANSACTION_CHANGE_APPEND;
+ log_append_buffer(&ctx, t->appends.arr.buffer,
+ MAIL_TRANSACTION_APPEND);
+ }
+
+ if (array_is_created(&t->updates)) {
+ change_mask |= MAIL_INDEX_FSYNC_MASK_FLAGS;
+ *changes_r |= MAIL_INDEX_TRANSACTION_CHANGE_FLAGS;
+ log_append_flag_updates(&ctx, t);
+ }
+
+ if (array_is_created(&t->ext_rec_updates)) {
+ *changes_r |= MAIL_INDEX_TRANSACTION_CHANGE_OTHERS;
+ log_append_ext_recs(&ctx, &t->ext_rec_updates,
+ MAIL_TRANSACTION_EXT_REC_UPDATE);
+ }
+ if (array_is_created(&t->ext_rec_atomics)) {
+ *changes_r |= MAIL_INDEX_TRANSACTION_CHANGE_OTHERS;
+ log_append_ext_recs(&ctx, &t->ext_rec_atomics,
+ MAIL_TRANSACTION_EXT_ATOMIC_INC);
+ }
+
+ if (array_is_created(&t->keyword_updates)) {
+ if (log_append_keyword_updates(&ctx)) {
+ change_mask |= MAIL_INDEX_FSYNC_MASK_KEYWORDS;
+ *changes_r |= MAIL_INDEX_TRANSACTION_CHANGE_KEYWORDS;
+ }
+ }
+ /* keep modseq updates almost last */
+ if (array_is_created(&t->modseq_updates)) {
+ *changes_r |= MAIL_INDEX_TRANSACTION_CHANGE_MODSEQ;
+ log_append_buffer(&ctx, t->modseq_updates.arr.buffer,
+ MAIL_TRANSACTION_MODSEQ_UPDATE);
+ }
+
+ if (array_is_created(&t->expunges)) {
+ /* non-external expunges are only requests, ignore them when
+ checking fsync_mask */
+ if ((t->flags & MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL) != 0) {
+ change_mask |= MAIL_INDEX_FSYNC_MASK_EXPUNGES;
+ *changes_r |= MAIL_INDEX_TRANSACTION_CHANGE_EXPUNGE;
+ } else {
+ *changes_r |= MAIL_INDEX_TRANSACTION_CHANGE_OTHERS;
+ }
+ log_append_buffer(&ctx, t->expunges.arr.buffer,
+ MAIL_TRANSACTION_EXPUNGE_GUID);
+ }
+
+ if (t->post_hdr_changed) {
+ *changes_r |= MAIL_INDEX_TRANSACTION_CHANGE_OTHERS;
+ log_append_buffer(&ctx, log_get_hdr_update_buffer(t, FALSE),
+ MAIL_TRANSACTION_HEADER_UPDATE);
+ }
+
+ if (t->index_deleted) {
+ i_assert(!t->index_undeleted);
+ *changes_r |= MAIL_INDEX_TRANSACTION_CHANGE_OTHERS;
+ mail_transaction_log_append_add(ctx.append_ctx,
+ MAIL_TRANSACTION_INDEX_DELETED,
+ &null4, 4);
+ }
+
+ i_assert((append_ctx->output->used > 0) == (*changes_r != 0));
+
+ append_ctx->index_sync_transaction = t->sync_transaction;
+ append_ctx->tail_offset_changed = t->tail_offset_changed;
+ append_ctx->want_fsync =
+ (t->view->index->set.fsync_mask & change_mask) != 0 ||
+ (t->flags & MAIL_INDEX_TRANSACTION_FLAG_FSYNC) != 0;
+}
diff --git a/src/lib-index/mail-index-transaction-finish.c b/src/lib-index/mail-index-transaction-finish.c
new file mode 100644
index 0000000..360e597
--- /dev/null
+++ b/src/lib-index/mail-index-transaction-finish.c
@@ -0,0 +1,350 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "ioloop.h"
+#include "mail-index-private.h"
+#include "mail-index-modseq.h"
+#include "mail-index-transaction-private.h"
+
+int mail_transaction_expunge_guid_cmp(const struct mail_transaction_expunge_guid *e1,
+ const struct mail_transaction_expunge_guid *e2)
+{
+ if (e1->uid < e2->uid)
+ return -1;
+ else if (e1->uid > e2->uid)
+ return 1;
+ else
+ return 0;
+}
+
+void mail_index_transaction_sort_expunges(struct mail_index_transaction *t)
+{
+ if (!t->expunges_nonsorted)
+ return;
+
+ array_sort(&t->expunges, mail_transaction_expunge_guid_cmp);
+ t->expunges_nonsorted = FALSE;
+}
+
+static void
+ext_reset_update_atomic(struct mail_index_transaction *t,
+ uint32_t ext_id, uint32_t expected_reset_id)
+{
+ const struct mail_index_ext *map_ext;
+ struct mail_transaction_ext_reset *reset;
+ uint32_t idx, reset_id;
+
+ if (!mail_index_map_get_ext_idx(t->view->index->map, ext_id, &idx)) {
+ /* new extension */
+ reset_id = 1;
+ } else {
+ map_ext = array_idx(&t->view->index->map->extensions, idx);
+ reset_id = map_ext->reset_id + 1;
+ }
+ if (reset_id != expected_reset_id) {
+ /* ignore this extension update */
+ mail_index_ext_set_reset_id(t, ext_id, 0);
+ return;
+ }
+
+ if (reset_id == 0)
+ reset_id++;
+
+ array_idx_set(&t->ext_reset_ids, ext_id, &reset_id);
+
+ /* reseting existing data is optional */
+ if (array_is_created(&t->ext_resets)) {
+ reset = array_idx_modifiable(&t->ext_resets, ext_id);
+ if (reset->new_reset_id == (uint32_t)-1)
+ reset->new_reset_id = reset_id;
+ }
+}
+
+static void
+transaction_update_atomic_reset_ids(struct mail_index_transaction *t)
+{
+ const uint32_t *expected_reset_ids;
+ unsigned int ext_id, count;
+
+ if (!array_is_created(&t->ext_reset_atomic))
+ return;
+
+ expected_reset_ids = array_get(&t->ext_reset_atomic, &count);
+ for (ext_id = 0; ext_id < count; ext_id++) {
+ if (expected_reset_ids[ext_id] != 0) {
+ ext_reset_update_atomic(t, ext_id,
+ expected_reset_ids[ext_id]);
+ }
+ }
+}
+
+static unsigned int
+mail_transaction_drop_range(struct mail_index_transaction *t,
+ struct mail_index_flag_update update,
+ unsigned int update_idx,
+ ARRAY_TYPE(seq_range) *keeps)
+{
+ const struct seq_range *keep_range;
+ unsigned int i, keep_count;
+
+ keep_range = array_get(keeps, &keep_count);
+ if (keep_count == 1 &&
+ update.uid1 == keep_range[0].seq1 &&
+ update.uid2 == keep_range[0].seq2) {
+ /* everything is kept */
+ return update_idx + 1;
+ }
+
+ array_delete(&t->updates, update_idx, 1);
+
+ /* add back all the updates we want to keep */
+ for (i = 0; i < keep_count; i++, update_idx++) {
+ update.uid1 = keep_range[i].seq1;
+ update.uid2 = keep_range[i].seq2;
+ array_insert(&t->updates, update_idx, &update, 1);
+ }
+ return update_idx;
+}
+
+static void
+mail_index_transaction_finish_flag_updates(struct mail_index_transaction *t)
+{
+ const struct mail_index_flag_update *updates, *u;
+ const struct mail_index_record *rec;
+ unsigned int i, count;
+ ARRAY_TYPE(seq_range) keeps;
+ uint32_t seq;
+
+ if (!t->drop_unnecessary_flag_updates || !array_is_created(&t->updates))
+ return;
+
+ t_array_init(&keeps, 64);
+ updates = array_get(&t->updates, &count);
+ for (i = 0; i < count; ) {
+ /* first get the list of changes to drop */
+ u = &updates[i];
+ array_clear(&keeps);
+ for (seq = u->uid1; seq <= u->uid2; seq++) {
+ rec = mail_index_lookup(t->view, seq);
+ if ((rec->flags & u->add_flags) != u->add_flags ||
+ (rec->flags & u->remove_flags) != 0) {
+ /* keep this change */
+ seq_range_array_add(&keeps, seq);
+ }
+ }
+ i = mail_transaction_drop_range(t, updates[i], i, &keeps);
+ updates = array_get(&t->updates, &count);
+ }
+
+ if (array_count(&t->updates) == 0)
+ array_free(&t->updates);
+}
+
+static void
+mail_index_transaction_check_conflicts(struct mail_index_transaction *t)
+{
+ uint32_t seq;
+ bool ret1, ret2;
+
+ i_assert(t->max_modseq != 0);
+ i_assert(t->conflict_seqs != NULL);
+
+ if (t->max_modseq == mail_index_modseq_get_highest(t->view)) {
+ /* no conflicts possible */
+ return;
+ }
+ if (t->min_flagupdate_seq == 0) {
+ /* no flag updates */
+ return;
+ }
+
+ for (seq = t->min_flagupdate_seq; seq <= t->max_flagupdate_seq; seq++) {
+ if (mail_index_modseq_lookup(t->view, seq) > t->max_modseq) {
+ ret1 = mail_index_cancel_flag_updates(t, seq);
+ ret2 = mail_index_cancel_keyword_updates(t, seq);
+ if (ret1 || ret2) {
+ seq_range_array_add_with_init(t->conflict_seqs,
+ 16, seq);
+ }
+ }
+ }
+ mail_index_transaction_set_log_updates(t);
+}
+
+static uint32_t
+mail_index_transaction_get_uid(struct mail_index_transaction *t, uint32_t seq)
+{
+ const struct mail_index_record *rec;
+
+ i_assert(seq > 0);
+
+ if (seq >= t->first_new_seq)
+ rec = mail_index_transaction_lookup(t, seq);
+ else {
+ i_assert(seq <= t->view->map->hdr.messages_count);
+ rec = MAIL_INDEX_REC_AT_SEQ(t->view->map, seq);
+ }
+ i_assert(rec->uid != 0);
+ return rec->uid;
+}
+
+static void
+mail_index_convert_to_uids(struct mail_index_transaction *t,
+ ARRAY_TYPE(seq_array) *array)
+{
+ uint32_t *seq;
+ unsigned int i, count;
+
+ if (!array_is_created(array))
+ return;
+
+ count = array_count(array);
+ for (i = 0; i < count; i++) {
+ seq = array_idx_modifiable(array, i);
+ *seq = mail_index_transaction_get_uid(t, *seq);
+ }
+}
+
+static uint32_t
+get_nonexpunged_uid2(struct mail_index_transaction *t,
+ uint32_t uid1, uint32_t seq1)
+{
+ seq1++;
+
+ while (mail_index_transaction_get_uid(t, seq1) == uid1 + 1) {
+ seq1++;
+ uid1++;
+ }
+ return uid1;
+}
+
+void mail_index_transaction_seq_range_to_uid(struct mail_index_transaction *t,
+ ARRAY_TYPE(seq_range) *array)
+{
+ struct seq_range *range, *new_range;
+ unsigned int i, count;
+ uint32_t uid1, uid2, prev_uid = 0;
+
+ if (!array_is_created(array))
+ return;
+
+ count = array_count(array);
+ for (i = 0; i < count; i++) {
+ range = array_idx_modifiable(array, i);
+
+ uid1 = mail_index_transaction_get_uid(t, range->seq1);
+ uid2 = mail_index_transaction_get_uid(t, range->seq2);
+ i_assert(uid1 > prev_uid);
+ if (uid2 - uid1 == range->seq2 - range->seq1) {
+ /* simple conversion */
+ range->seq1 = uid1;
+ range->seq2 = uid2;
+ prev_uid = uid2;
+ } else {
+ /* remove expunged UIDs */
+ new_range = array_insert_space(array, i);
+ range = array_idx_modifiable(array, i + 1);
+ count++;
+
+ memcpy(new_range, range, array->arr.element_size);
+ new_range->seq1 = uid1;
+ new_range->seq2 = get_nonexpunged_uid2(t, uid1,
+ range->seq1);
+ i_assert(new_range->seq2 < uid2);
+
+ /* continue the range without the inserted seqs */
+ range->seq1 += new_range->seq2 - new_range->seq1 + 1;
+ prev_uid = new_range->seq2;
+ }
+ }
+}
+
+static void keyword_updates_convert_to_uids(struct mail_index_transaction *t)
+{
+ struct mail_index_transaction_keyword_update *update;
+
+ if (!array_is_created(&t->keyword_updates))
+ return;
+
+ array_foreach_modifiable(&t->keyword_updates, update) {
+ mail_index_transaction_seq_range_to_uid(t, &update->add_seq);
+ mail_index_transaction_seq_range_to_uid(t, &update->remove_seq);
+ }
+}
+
+static void expunges_convert_to_uids(struct mail_index_transaction *t)
+{
+ struct mail_transaction_expunge_guid *expunges;
+ unsigned int src, dest, count;
+
+ if (!array_is_created(&t->expunges))
+ return;
+
+ mail_index_transaction_sort_expunges(t);
+
+ expunges = array_get_modifiable(&t->expunges, &count);
+ if (count == 0)
+ return;
+
+ /* convert uids and drop duplicates */
+ expunges[0].uid = mail_index_transaction_get_uid(t, expunges[0].uid);
+ for (src = dest = 1; src < count; src++) {
+ expunges[dest].uid =
+ mail_index_transaction_get_uid(t, expunges[src].uid);
+ if (expunges[dest-1].uid != expunges[dest].uid) {
+ if (dest != src) {
+ memcpy(expunges[dest].guid_128, expunges[src].guid_128,
+ sizeof(expunges[dest].guid_128));
+ }
+ dest++;
+ }
+ }
+ array_delete(&t->expunges, dest, count-dest);
+}
+
+static void
+mail_index_transaction_convert_to_uids(struct mail_index_transaction *t)
+{
+ ARRAY_TYPE(seq_array) *update;
+
+ if (array_is_created(&t->ext_rec_updates)) {
+ array_foreach_modifiable(&t->ext_rec_updates, update)
+ mail_index_convert_to_uids(t, update);
+ }
+ if (array_is_created(&t->ext_rec_atomics)) {
+ array_foreach_modifiable(&t->ext_rec_atomics, update)
+ mail_index_convert_to_uids(t, update);
+ }
+
+ keyword_updates_convert_to_uids(t);
+ expunges_convert_to_uids(t);
+ mail_index_convert_to_uids(t, (void *)&t->modseq_updates);
+ mail_index_transaction_seq_range_to_uid(t, (void *)&t->updates);
+}
+
+void mail_index_transaction_finish_so_far(struct mail_index_transaction *t)
+{
+ if (array_is_created(&t->appends))
+ mail_index_transaction_sort_appends(t);
+ mail_index_transaction_finish_flag_updates(t);
+ if (t->max_modseq != 0)
+ mail_index_transaction_check_conflicts(t);
+}
+
+void mail_index_transaction_finish(struct mail_index_transaction *t)
+{
+ mail_index_transaction_finish_so_far(t);
+
+ if (array_is_created(&t->appends))
+ mail_index_update_day_headers(t, ioloop_time);
+ if (array_is_created(&t->ext_reset_atomic))
+ transaction_update_atomic_reset_ids(t);
+ /* finally convert all sequences to UIDs before we write them,
+ but after we've checked and removed conflicts */
+ mail_index_transaction_convert_to_uids(t);
+
+ /* and kind of ugly way to update highest modseq */
+ if (t->min_highest_modseq != 0)
+ mail_index_update_modseq(t, 0, t->min_highest_modseq);
+}
diff --git a/src/lib-index/mail-index-transaction-private.h b/src/lib-index/mail-index-transaction-private.h
new file mode 100644
index 0000000..eb438b2
--- /dev/null
+++ b/src/lib-index/mail-index-transaction-private.h
@@ -0,0 +1,165 @@
+#ifndef MAIL_INDEX_TRANSACTION_PRIVATE_H
+#define MAIL_INDEX_TRANSACTION_PRIVATE_H
+
+#include "seq-range-array.h"
+#include "mail-transaction-log.h"
+
+ARRAY_DEFINE_TYPE(seq_array_array, ARRAY_TYPE(seq_array));
+
+struct mail_index_transaction_keyword_update {
+ ARRAY_TYPE(seq_range) add_seq;
+ ARRAY_TYPE(seq_range) remove_seq;
+};
+
+struct mail_index_transaction_ext_hdr_update {
+ size_t alloc_size;
+ /* mask is in bytes, not bits */
+ unsigned char *mask;
+ unsigned char *data;
+};
+
+struct mail_index_transaction_vfuncs {
+ void (*reset)(struct mail_index_transaction *t);
+ int (*commit)(struct mail_index_transaction *t,
+ struct mail_index_transaction_commit_result *result_r);
+ void (*rollback)(struct mail_index_transaction *t);
+};
+
+union mail_index_transaction_module_context {
+ struct mail_index_transaction_vfuncs super;
+ struct mail_index_module_register *reg;
+};
+
+struct mail_index_flag_update {
+ uint32_t uid1, uid2;
+ uint16_t add_flags;
+ uint16_t remove_flags;
+};
+
+struct mail_index_transaction {
+ struct mail_index_transaction *prev, *next;
+ int refcount;
+
+ enum mail_index_transaction_flags flags;
+ struct mail_index_transaction_vfuncs v, *vlast;
+ struct mail_index_view *view;
+ struct mail_index_view *latest_view;
+
+ /* NOTE: If you add anything new, remember to update
+ mail_index_transaction_reset_v() to reset it. */
+ ARRAY(struct mail_index_record) appends;
+ uint32_t first_new_seq, last_new_seq;
+ uint32_t highest_append_uid;
+ /* lowest/highest sequence that updates flags/keywords */
+ uint32_t min_flagupdate_seq, max_flagupdate_seq;
+
+ ARRAY(struct mail_transaction_modseq_update) modseq_updates;
+ ARRAY(struct mail_transaction_expunge_guid) expunges;
+ ARRAY(struct mail_index_flag_update) updates;
+ size_t last_update_idx;
+
+ unsigned char pre_hdr_change[sizeof(struct mail_index_header)];
+ unsigned char pre_hdr_mask[sizeof(struct mail_index_header)];
+ unsigned char post_hdr_change[sizeof(struct mail_index_header)];
+ unsigned char post_hdr_mask[sizeof(struct mail_index_header)];
+
+ ARRAY(struct mail_index_transaction_ext_hdr_update) ext_hdr_updates;
+ ARRAY_TYPE(seq_array_array) ext_rec_updates;
+ ARRAY_TYPE(seq_array_array) ext_rec_atomics;
+ ARRAY(struct mail_transaction_ext_intro) ext_resizes;
+ ARRAY(struct mail_transaction_ext_reset) ext_resets;
+ ARRAY(uint32_t) ext_reset_ids;
+ ARRAY(uint32_t) ext_reset_atomic;
+
+ ARRAY(struct mail_index_transaction_keyword_update) keyword_updates;
+ buffer_t *attribute_updates; /* [+-][ps]key\0.. */
+ buffer_t *attribute_updates_suffix; /* <timestamp>[<value len>].. */
+
+ uint64_t min_highest_modseq;
+ uint64_t max_modseq;
+ ARRAY_TYPE(seq_range) *conflict_seqs;
+
+ /* Module-specific contexts. */
+ ARRAY(union mail_index_transaction_module_context *) module_contexts;
+
+ bool no_appends:1;
+
+ bool sync_transaction:1;
+ bool appends_nonsorted:1;
+ bool expunges_nonsorted:1;
+ bool drop_unnecessary_flag_updates:1;
+ bool pre_hdr_changed:1;
+ bool post_hdr_changed:1;
+ bool reset:1;
+ bool index_deleted:1;
+ bool index_undeleted:1;
+ bool commit_deleted_index:1;
+ bool tail_offset_changed:1;
+ /* non-extension updates. flag updates don't change this because
+ they may be added and removed, so be sure to check that the updates
+ array is non-empty also. */
+ bool log_updates:1;
+ /* extension updates */
+ bool log_ext_updates:1;
+};
+
+#define MAIL_INDEX_TRANSACTION_HAS_CHANGES(t) \
+ ((t)->log_updates || (t)->log_ext_updates || \
+ (array_is_created(&(t)->updates) && array_count(&(t)->updates) > 0) || \
+ (t)->index_deleted || (t)->index_undeleted)
+
+typedef void hook_mail_index_transaction_created_t(struct mail_index_transaction *t);
+
+void mail_index_transaction_hook_register(hook_mail_index_transaction_created_t *hook);
+void mail_index_transaction_hook_unregister(hook_mail_index_transaction_created_t *hook);
+
+struct mail_index_record *
+mail_index_transaction_lookup(struct mail_index_transaction *t, uint32_t seq);
+
+void mail_index_transaction_ref(struct mail_index_transaction *t);
+void mail_index_transaction_unref(struct mail_index_transaction **t);
+void mail_index_transaction_reset_v(struct mail_index_transaction *t);
+
+void mail_index_transaction_sort_appends(struct mail_index_transaction *t);
+void mail_index_transaction_sort_expunges(struct mail_index_transaction *t);
+uint32_t mail_index_transaction_get_next_uid(struct mail_index_transaction *t);
+void mail_index_transaction_set_log_updates(struct mail_index_transaction *t);
+void mail_index_update_day_headers(struct mail_index_transaction *t, time_t day_stamp);
+
+unsigned int
+mail_index_transaction_get_flag_update_pos(struct mail_index_transaction *t,
+ unsigned int left_idx,
+ unsigned int right_idx,
+ uint32_t seq);
+void mail_index_transaction_lookup_latest_keywords(struct mail_index_transaction *t,
+ uint32_t seq,
+ ARRAY_TYPE(keyword_indexes) *keywords);
+
+bool mail_index_cancel_flag_updates(struct mail_index_transaction *t,
+ uint32_t seq);
+bool mail_index_cancel_keyword_updates(struct mail_index_transaction *t,
+ uint32_t seq);
+
+/* As input the array's each element starts with struct seq_range where
+ uid1..uid2 are actually sequences within the transaction view. This function
+ changes the sequences into UIDs. If the transaction has any appends, they
+ must have already been assigned UIDs. */
+void mail_index_transaction_seq_range_to_uid(struct mail_index_transaction *t,
+ ARRAY_TYPE(seq_range) *array);
+void mail_index_transaction_finish_so_far(struct mail_index_transaction *t);
+void mail_index_transaction_finish(struct mail_index_transaction *t);
+void mail_index_transaction_export(struct mail_index_transaction *t,
+ struct mail_transaction_log_append_ctx *append_ctx,
+ enum mail_index_transaction_change *changes_r);
+int mail_transaction_expunge_guid_cmp(const struct mail_transaction_expunge_guid *e1,
+ const struct mail_transaction_expunge_guid *e2);
+unsigned int
+mail_index_transaction_get_flag_update_pos(struct mail_index_transaction *t,
+ unsigned int left_idx,
+ unsigned int right_idx,
+ uint32_t seq);
+
+void mail_index_ext_using_reset_id(struct mail_index_transaction *t,
+ uint32_t ext_id, uint32_t reset_id);
+
+#endif
diff --git a/src/lib-index/mail-index-transaction-sort-appends.c b/src/lib-index/mail-index-transaction-sort-appends.c
new file mode 100644
index 0000000..c2a29d3
--- /dev/null
+++ b/src/lib-index/mail-index-transaction-sort-appends.c
@@ -0,0 +1,184 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "seq-range-array.h"
+#include "mail-index-private.h"
+#include "mail-index-transaction-private.h"
+
+
+struct uid_map {
+ uint32_t idx;
+ uint32_t uid;
+};
+
+static int uid_map_cmp(const void *p1, const void *p2)
+{
+ const struct uid_map *m1 = p1, *m2 = p2;
+
+ return m1->uid < m2->uid ? -1 :
+ (m1->uid > m2->uid ? 1 : 0);
+}
+
+static void
+mail_index_transaction_sort_appends_ext(ARRAY_TYPE(seq_array_array) *updates,
+ uint32_t first_new_seq,
+ const uint32_t *old_to_newseq_map)
+{
+ ARRAY_TYPE(seq_array) *ext_rec_arrays;
+ ARRAY_TYPE(seq_array) *old_array;
+ ARRAY_TYPE(seq_array) new_array;
+ unsigned int ext_count;
+ const uint32_t *ext_rec;
+ uint32_t seq;
+ unsigned int i, j, count;
+
+ if (!array_is_created(updates))
+ return;
+
+ ext_rec_arrays = array_get_modifiable(updates, &count);
+ for (j = 0; j < count; j++) {
+ old_array = &ext_rec_arrays[j];
+ if (!array_is_created(old_array))
+ continue;
+
+ ext_count = array_count(old_array);
+ array_create(&new_array, default_pool,
+ old_array->arr.element_size, ext_count);
+ for (i = 0; i < ext_count; i++) {
+ ext_rec = array_idx(old_array, i);
+
+ seq = *ext_rec < first_new_seq ? *ext_rec :
+ old_to_newseq_map[*ext_rec - first_new_seq];
+ (void)mail_index_seq_array_add(&new_array, seq, ext_rec+1,
+ old_array->arr.element_size -
+ sizeof(*ext_rec), NULL);
+ }
+ array_free(old_array);
+ ext_rec_arrays[j] = new_array;
+ }
+}
+
+static void
+sort_appends_seq_range(ARRAY_TYPE(seq_range) *array, uint32_t first_new_seq,
+ const uint32_t *old_to_newseq_map)
+{
+ struct seq_range *range, temp_range;
+ ARRAY_TYPE(seq_range) old_seqs;
+ uint32_t idx, idx1, idx2;
+ unsigned int i, count;
+
+ range = array_get_modifiable(array, &count);
+ for (i = 0; i < count; i++) {
+ if (range[i].seq2 >= first_new_seq)
+ break;
+ }
+ if (i == count) {
+ /* nothing to do */
+ return;
+ }
+
+ i_array_init(&old_seqs, count - i);
+ if (range[i].seq1 < first_new_seq) {
+ temp_range.seq1 = first_new_seq;
+ temp_range.seq2 = range[i].seq2;
+ array_push_back(&old_seqs, &temp_range);
+ range[i].seq2 = first_new_seq - 1;
+ i++;
+ }
+ array_append(&old_seqs, &range[i], count - i);
+ array_delete(array, i, count - i);
+
+ range = array_get_modifiable(&old_seqs, &count);
+ for (i = 0; i < count; i++) {
+ idx1 = range[i].seq1 - first_new_seq;
+ idx2 = range[i].seq2 - first_new_seq;
+ for (idx = idx1; idx <= idx2; idx++)
+ seq_range_array_add(array, old_to_newseq_map[idx]);
+ }
+ array_free(&old_seqs);
+}
+
+static void
+mail_index_transaction_sort_appends_keywords(struct mail_index_transaction *t,
+ const uint32_t *old_to_newseq_map)
+{
+ struct mail_index_transaction_keyword_update *update;
+
+ if (!array_is_created(&t->keyword_updates))
+ return;
+
+ array_foreach_modifiable(&t->keyword_updates, update) {
+ if (array_is_created(&update->add_seq)) {
+ sort_appends_seq_range(&update->add_seq,
+ t->first_new_seq,
+ old_to_newseq_map);
+ }
+ if (array_is_created(&update->remove_seq)) {
+ sort_appends_seq_range(&update->remove_seq,
+ t->first_new_seq,
+ old_to_newseq_map);
+ }
+ }
+}
+
+void mail_index_transaction_sort_appends(struct mail_index_transaction *t)
+{
+ struct mail_index_record *recs, *sorted_recs;
+ struct uid_map *new_uid_map;
+ uint32_t *old_to_newseq_map;
+ unsigned int i, count;
+
+ if (!array_is_created(&t->appends))
+ return;
+ recs = array_get_modifiable(&t->appends, &count);
+ i_assert(count > 0);
+
+ if (!t->appends_nonsorted) {
+ i_assert(recs[0].uid != 0);
+#ifdef DEBUG
+ for (i = 1; i < count; i++)
+ i_assert(recs[i-1].uid < recs[i].uid);
+#endif
+ return;
+ }
+
+ /* first make a copy of the UIDs and map them to sequences */
+ new_uid_map = i_new(struct uid_map, count);
+ for (i = 0; i < count; i++) {
+ i_assert(recs[i].uid != 0);
+ new_uid_map[i].idx = i;
+ new_uid_map[i].uid = recs[i].uid;
+ }
+
+ /* now sort the UID map */
+ qsort(new_uid_map, count, sizeof(*new_uid_map), uid_map_cmp);
+
+ /* sort mail records */
+ sorted_recs = i_new(struct mail_index_record, count);
+ sorted_recs[0] = recs[new_uid_map[0].idx];
+ for (i = 1; i < count; i++) {
+ sorted_recs[i] = recs[new_uid_map[i].idx];
+ if (sorted_recs[i].uid == sorted_recs[i-1].uid)
+ i_panic("Duplicate UIDs added in transaction");
+ }
+ buffer_write(t->appends.arr.buffer, 0, sorted_recs,
+ sizeof(*sorted_recs) * count);
+ i_free(sorted_recs);
+
+ old_to_newseq_map = i_new(uint32_t, count);
+ for (i = 0; i < count; i++)
+ old_to_newseq_map[new_uid_map[i].idx] = i + t->first_new_seq;
+ i_free(new_uid_map);
+
+ mail_index_transaction_sort_appends_ext(&t->ext_rec_updates,
+ t->first_new_seq,
+ old_to_newseq_map);
+ mail_index_transaction_sort_appends_ext(&t->ext_rec_atomics,
+ t->first_new_seq,
+ old_to_newseq_map);
+ mail_index_transaction_sort_appends_keywords(t, old_to_newseq_map);
+ i_free(old_to_newseq_map);
+
+ t->appends_nonsorted = FALSE;
+}
diff --git a/src/lib-index/mail-index-transaction-update.c b/src/lib-index/mail-index-transaction-update.c
new file mode 100644
index 0000000..c7bcbd9
--- /dev/null
+++ b/src/lib-index/mail-index-transaction-update.c
@@ -0,0 +1,1367 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+/* Inside transaction we keep messages stored in sequences in uid fields.
+ Before they're written to transaction log the sequences are changed to
+ UIDs. */
+
+#include "lib.h"
+#include "array.h"
+#include "time-util.h"
+#include "mail-index-private.h"
+#include "mail-index-transaction-private.h"
+
+static bool
+mail_index_transaction_has_ext_changes(struct mail_index_transaction *t);
+
+struct mail_index_record *
+mail_index_transaction_lookup(struct mail_index_transaction *t, uint32_t seq)
+{
+ i_assert(seq >= t->first_new_seq && seq <= t->last_new_seq);
+
+ return array_idx_modifiable(&t->appends, seq - t->first_new_seq);
+}
+
+void mail_index_transaction_reset_v(struct mail_index_transaction *t)
+{
+ ARRAY_TYPE(seq_array) *rec;
+ struct mail_index_transaction_ext_hdr_update *ext_hdr;
+
+ if (array_is_created(&t->ext_rec_updates)) {
+ array_foreach_modifiable(&t->ext_rec_updates, rec) {
+ if (array_is_created(rec))
+ array_free(rec);
+ }
+ array_free(&t->ext_rec_updates);
+ }
+ if (array_is_created(&t->ext_rec_atomics)) {
+ array_foreach_modifiable(&t->ext_rec_atomics, rec) {
+ if (array_is_created(rec))
+ array_free(rec);
+ }
+ array_free(&t->ext_rec_atomics);
+ }
+ if (array_is_created(&t->ext_hdr_updates)) {
+ array_foreach_modifiable(&t->ext_hdr_updates, ext_hdr) {
+ i_free(ext_hdr->data);
+ i_free(ext_hdr->mask);
+ }
+ array_free(&t->ext_hdr_updates);
+ }
+
+ if (array_is_created(&t->keyword_updates)) {
+ struct mail_index_transaction_keyword_update *u;
+
+ array_foreach_modifiable(&t->keyword_updates, u) {
+ if (array_is_created(&u->add_seq))
+ array_free(&u->add_seq);
+ if (array_is_created(&u->remove_seq))
+ array_free(&u->remove_seq);
+ }
+ array_free(&t->keyword_updates);
+ }
+
+ if (array_is_created(&t->appends))
+ array_free(&t->appends);
+ if (array_is_created(&t->modseq_updates))
+ array_free(&t->modseq_updates);
+ if (array_is_created(&t->expunges))
+ array_free(&t->expunges);
+ if (array_is_created(&t->updates))
+ array_free(&t->updates);
+ if (array_is_created(&t->ext_resizes))
+ array_free(&t->ext_resizes);
+ if (array_is_created(&t->ext_resets))
+ array_free(&t->ext_resets);
+ if (array_is_created(&t->ext_reset_ids))
+ array_free(&t->ext_reset_ids);
+ if (array_is_created(&t->ext_reset_atomic))
+ array_free(&t->ext_reset_atomic);
+ buffer_free(&t->attribute_updates);
+ buffer_free(&t->attribute_updates_suffix);
+
+ t->first_new_seq = mail_index_view_get_messages_count(t->view)+1;
+ t->last_new_seq = 0;
+ t->last_update_idx = 0;
+ t->min_flagupdate_seq = 0;
+ t->max_flagupdate_seq = 0;
+ t->min_highest_modseq = 0;
+
+ memset(t->pre_hdr_mask, 0, sizeof(t->pre_hdr_mask));
+ memset(t->post_hdr_mask, 0, sizeof(t->post_hdr_mask));
+
+ t->appends_nonsorted = FALSE;
+ t->expunges_nonsorted = FALSE;
+ t->drop_unnecessary_flag_updates = FALSE;
+ t->pre_hdr_changed = FALSE;
+ t->post_hdr_changed = FALSE;
+ t->reset = FALSE;
+ t->index_deleted = FALSE;
+ t->index_undeleted = FALSE;
+ t->log_updates = FALSE;
+ t->log_ext_updates = FALSE;
+ t->tail_offset_changed = FALSE;
+}
+
+void mail_index_transaction_set_log_updates(struct mail_index_transaction *t)
+{
+ /* flag updates aren't included in log_updates */
+ t->log_updates = array_is_created(&t->appends) ||
+ array_is_created(&t->modseq_updates) ||
+ array_is_created(&t->expunges) ||
+ array_is_created(&t->keyword_updates) ||
+ t->attribute_updates != NULL ||
+ t->pre_hdr_changed || t->post_hdr_changed ||
+ t->min_highest_modseq != 0;
+}
+
+void mail_index_update_day_headers(struct mail_index_transaction *t,
+ time_t day_stamp)
+{
+ struct mail_index_header hdr;
+ const struct mail_index_record *rec;
+ const int max_days = N_ELEMENTS(hdr.day_first_uid);
+ time_t stamp;
+ int i, days;
+
+ hdr = *mail_index_get_header(t->view);
+ rec = array_front(&t->appends);
+
+ stamp = time_to_local_day_start(day_stamp);
+ if ((time_t)hdr.day_stamp >= stamp)
+ return;
+
+ /* get number of days since last message */
+ days = (stamp - hdr.day_stamp) / (3600*24);
+ if (days > max_days)
+ days = max_days;
+
+ /* @UNSAFE: move days forward and fill the missing days with old
+ day_first_uid[0]. */
+ if (days > 0 && days < max_days)
+ memmove(hdr.day_first_uid + days, hdr.day_first_uid,
+ (max_days - days) * sizeof(hdr.day_first_uid[0]));
+ for (i = 1; i < days; i++)
+ hdr.day_first_uid[i] = hdr.day_first_uid[0];
+
+ hdr.day_stamp = stamp;
+ hdr.day_first_uid[0] = rec->uid;
+
+ mail_index_update_header(t,
+ offsetof(struct mail_index_header, day_stamp),
+ &hdr.day_stamp, sizeof(hdr.day_stamp), FALSE);
+ mail_index_update_header(t,
+ offsetof(struct mail_index_header, day_first_uid),
+ hdr.day_first_uid, sizeof(hdr.day_first_uid), FALSE);
+}
+
+void mail_index_append(struct mail_index_transaction *t, uint32_t uid,
+ uint32_t *seq_r)
+{
+ struct mail_index_record *rec;
+
+ i_assert(!t->no_appends);
+
+ t->log_updates = TRUE;
+
+ if (!array_is_created(&t->appends))
+ i_array_init(&t->appends, 32);
+
+ /* sequence number is visible only inside given view,
+ so let it generate it */
+ if (t->last_new_seq != 0)
+ *seq_r = ++t->last_new_seq;
+ else
+ *seq_r = t->last_new_seq = t->first_new_seq;
+
+ rec = array_append_space(&t->appends);
+ if (uid != 0) {
+ rec->uid = uid;
+ if (!t->appends_nonsorted &&
+ t->last_new_seq != t->first_new_seq) {
+ /* if previous record's UID is larger than this one,
+ we'll have to sort the appends later */
+ rec = mail_index_transaction_lookup(t, *seq_r - 1);
+ if (rec->uid > uid)
+ t->appends_nonsorted = TRUE;
+ else if (rec->uid == uid)
+ i_panic("Duplicate UIDs added in transaction");
+ }
+ if (t->highest_append_uid < uid)
+ t->highest_append_uid = uid;
+ }
+}
+
+void mail_index_append_finish_uids(struct mail_index_transaction *t,
+ uint32_t first_uid,
+ ARRAY_TYPE(seq_range) *uids_r)
+{
+ return mail_index_append_finish_uids_full(t, first_uid, first_uid, uids_r);
+}
+
+void mail_index_append_finish_uids_full(struct mail_index_transaction *t,
+ uint32_t min_allowed_uid,
+ uint32_t first_new_uid,
+ ARRAY_TYPE(seq_range) *uids_r)
+{
+ struct mail_index_record *recs;
+ unsigned int i, count;
+ struct seq_range *range;
+ uint32_t next_uid;
+
+ if (!array_is_created(&t->appends))
+ return;
+
+ i_assert(min_allowed_uid <= first_new_uid);
+ i_assert(first_new_uid < (uint32_t)-1);
+
+ /* first find the highest assigned uid */
+ recs = array_get_modifiable(&t->appends, &count);
+ i_assert(count > 0);
+
+ next_uid = first_new_uid;
+ for (i = 0; i < count; i++) {
+ if (next_uid <= recs[i].uid)
+ next_uid = recs[i].uid + 1;
+ }
+ i_assert(next_uid > 0 && next_uid < (uint32_t)-1);
+
+ /* assign missing uids */
+ for (i = 0; i < count; i++) {
+ if (recs[i].uid == 0 || recs[i].uid < min_allowed_uid) {
+ i_assert(next_uid < (uint32_t)-1);
+ recs[i].uid = next_uid++;
+ if (t->highest_append_uid < recs[i].uid)
+ t->highest_append_uid = recs[i].uid;
+ } else {
+ t->appends_nonsorted = TRUE;
+ }
+ }
+
+ /* write the saved uids range */
+ array_clear(uids_r);
+ range = array_append_space(uids_r);
+ range->seq1 = range->seq2 = recs[0].uid;
+ for (i = 1; i < count; i++) {
+ if (range->seq2 + 1 == recs[i].uid)
+ range->seq2++;
+ else {
+ range = array_append_space(uids_r);
+ range->seq1 = range->seq2 = recs[i].uid;
+ }
+ }
+}
+
+void mail_index_update_modseq(struct mail_index_transaction *t, uint32_t seq,
+ uint64_t min_modseq)
+{
+ struct mail_transaction_modseq_update *u;
+
+ /* modseq=1 is the minimum always and it's only for mails that were
+ created/modified before modseqs were enabled. */
+ if (min_modseq <= 1)
+ return;
+
+ if (!array_is_created(&t->modseq_updates))
+ i_array_init(&t->modseq_updates, 32);
+
+ u = array_append_space(&t->modseq_updates);
+ u->uid = seq;
+ u->modseq_low32 = min_modseq & 0xffffffff;
+ u->modseq_high32 = min_modseq >> 32;
+
+ t->log_updates = TRUE;
+}
+
+void mail_index_update_highest_modseq(struct mail_index_transaction *t,
+ uint64_t min_modseq)
+{
+ /* modseq=1 is the minimum always and it's only for mails that were
+ created/modified before modseqs were enabled. */
+ if (min_modseq <= 1)
+ return;
+
+ if (t->min_highest_modseq < min_modseq)
+ t->min_highest_modseq = min_modseq;
+
+ t->log_updates = TRUE;
+}
+
+static void
+mail_index_revert_ext(ARRAY_TYPE(seq_array_array) *ext_updates,
+ uint32_t seq)
+{
+ ARRAY_TYPE(seq_array) *seqs;
+ unsigned int idx;
+
+ if (!array_is_created(ext_updates))
+ return;
+
+ array_foreach_modifiable(ext_updates, seqs) {
+ if (array_is_created(seqs) &&
+ mail_index_seq_array_lookup(seqs, seq, &idx))
+ array_delete(seqs, idx, 1);
+ }
+}
+
+static void
+mail_index_revert_changes_common(struct mail_index_transaction *t, uint32_t seq)
+{
+ struct mail_index_transaction_keyword_update *kw_update;
+ unsigned int i;
+
+ /* remove extension updates */
+ mail_index_revert_ext(&t->ext_rec_updates, seq);
+ mail_index_revert_ext(&t->ext_rec_atomics, seq);
+ t->log_ext_updates = mail_index_transaction_has_ext_changes(t);
+
+ /* remove keywords */
+ if (array_is_created(&t->keyword_updates)) {
+ array_foreach_modifiable(&t->keyword_updates, kw_update) {
+ if (array_is_created(&kw_update->add_seq)) {
+ seq_range_array_remove(&kw_update->add_seq,
+ seq);
+ }
+ if (array_is_created(&kw_update->remove_seq)) {
+ seq_range_array_remove(&kw_update->remove_seq,
+ seq);
+ }
+ }
+ }
+ /* remove modseqs */
+ if (array_is_created(&t->modseq_updates) &&
+ mail_index_seq_array_lookup((void *)&t->modseq_updates, seq, &i))
+ array_delete(&t->modseq_updates, i, 1);
+}
+
+void mail_index_revert_changes(struct mail_index_transaction *t, uint32_t seq)
+{
+ mail_index_revert_changes_common(t, seq);
+ (void)mail_index_cancel_flag_updates(t, seq);
+}
+
+static void
+mail_index_expunge_last_append(struct mail_index_transaction *t, uint32_t seq)
+{
+ i_assert(seq == t->last_new_seq);
+
+ mail_index_revert_changes_common(t, seq);
+
+ /* and finally remove the append itself */
+ array_delete(&t->appends, seq - t->first_new_seq, 1);
+ t->last_new_seq--;
+ if (t->first_new_seq > t->last_new_seq) {
+ t->last_new_seq = 0;
+ t->appends_nonsorted = FALSE;
+ array_free(&t->appends);
+ }
+ mail_index_transaction_set_log_updates(t);
+}
+
+void mail_index_expunge(struct mail_index_transaction *t, uint32_t seq)
+{
+ static guid_128_t null_guid =
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ mail_index_expunge_guid(t, seq, null_guid);
+}
+
+void mail_index_expunge_guid(struct mail_index_transaction *t, uint32_t seq,
+ const guid_128_t guid_128)
+{
+ const struct mail_transaction_expunge_guid *expunges;
+ struct mail_transaction_expunge_guid *expunge;
+ unsigned int count;
+
+ i_assert(seq > 0);
+ if (seq >= t->first_new_seq) {
+ /* we can handle only the last append. otherwise we'd have to
+ renumber sequences and that gets tricky. for now this is
+ enough, since we typically want to expunge all the
+ appends. */
+ mail_index_expunge_last_append(t, seq);
+ } else {
+ t->log_updates = TRUE;
+
+ /* ignore duplicates here. drop them when committing. */
+ if (!array_is_created(&t->expunges))
+ i_array_init(&t->expunges, 64);
+ else if (!t->expunges_nonsorted) {
+ /* usually expunges are added in increasing order. */
+ expunges = array_get(&t->expunges, &count);
+ if (count > 0 && seq < expunges[count-1].uid)
+ t->expunges_nonsorted = TRUE;
+ }
+ expunge = array_append_space(&t->expunges);
+ expunge->uid = seq;
+ memcpy(expunge->guid_128, guid_128, sizeof(expunge->guid_128));
+ }
+}
+
+static void update_minmax_flagupdate_seq(struct mail_index_transaction *t,
+ uint32_t seq1, uint32_t seq2)
+{
+ if (t->min_flagupdate_seq == 0) {
+ t->min_flagupdate_seq = seq1;
+ t->max_flagupdate_seq = seq2;
+ } else {
+ if (t->min_flagupdate_seq > seq1)
+ t->min_flagupdate_seq = seq1;
+ if (t->max_flagupdate_seq < seq2)
+ t->max_flagupdate_seq = seq2;
+ }
+}
+
+unsigned int
+mail_index_transaction_get_flag_update_pos(struct mail_index_transaction *t,
+ unsigned int left_idx,
+ unsigned int right_idx,
+ uint32_t seq)
+{
+ const struct mail_index_flag_update *updates;
+ unsigned int idx, count;
+
+ updates = array_get(&t->updates, &count);
+ i_assert(left_idx <= right_idx && right_idx <= count);
+ i_assert(count < INT_MAX);
+
+ /* find the first update with either overlapping range,
+ or the update which will come after our insert */
+ idx = left_idx;
+ while (left_idx < right_idx) {
+ idx = (left_idx + right_idx) / 2;
+
+ if (updates[idx].uid2 < seq)
+ left_idx = idx+1;
+ else if (updates[idx].uid1 > seq)
+ right_idx = idx;
+ else
+ break;
+ }
+ if (left_idx > idx)
+ idx++;
+ return idx;
+}
+
+static void
+mail_index_insert_flag_update(struct mail_index_transaction *t,
+ struct mail_index_flag_update u,
+ unsigned int idx)
+{
+ struct mail_index_flag_update *updates, tmp_update;
+ unsigned int count, first_idx, max;
+
+ updates = array_get_modifiable(&t->updates, &count);
+
+ /* overlapping ranges, split/merge them */
+ i_assert(idx == 0 || updates[idx-1].uid2 < u.uid1);
+ i_assert(idx == count || updates[idx].uid2 >= u.uid1);
+
+ /* first we'll just add the changes without trying to merge anything */
+ first_idx = idx;
+ for (; idx < count && u.uid2 >= updates[idx].uid1; idx++) {
+ i_assert(u.uid1 <= updates[idx].uid2);
+ if (u.uid1 != updates[idx].uid1 &&
+ (updates[idx].add_flags != u.add_flags ||
+ updates[idx].remove_flags != u.remove_flags)) {
+ if (u.uid1 < updates[idx].uid1) {
+ /* insert new update */
+ tmp_update = u;
+ tmp_update.uid2 = updates[idx].uid1 - 1;
+ } else {
+ /* split existing update from beginning */
+ tmp_update = updates[idx];
+ tmp_update.uid2 = u.uid1 - 1;
+ updates[idx].uid1 = u.uid1;
+ }
+
+ i_assert(tmp_update.uid1 <= tmp_update.uid2);
+ i_assert(updates[idx].uid1 <= updates[idx].uid2);
+
+ array_insert(&t->updates, idx, &tmp_update, 1);
+ updates = array_get_modifiable(&t->updates, &count);
+ idx++;
+ } else if (u.uid1 < updates[idx].uid1) {
+ updates[idx].uid1 = u.uid1;
+ }
+
+ if (u.uid2 < updates[idx].uid2 &&
+ (updates[idx].add_flags != u.add_flags ||
+ updates[idx].remove_flags != u.remove_flags)) {
+ /* split existing update from end */
+ tmp_update = updates[idx];
+ tmp_update.uid2 = u.uid2;
+ updates[idx].uid1 = u.uid2 + 1;
+
+ i_assert(tmp_update.uid1 <= tmp_update.uid2);
+ i_assert(updates[idx].uid1 <= updates[idx].uid2);
+
+ array_insert(&t->updates, idx, &tmp_update, 1);
+ updates = array_get_modifiable(&t->updates, &count);
+ }
+
+ updates[idx].add_flags =
+ (updates[idx].add_flags | u.add_flags) &
+ ENUM_NEGATE(u.remove_flags);
+ updates[idx].remove_flags =
+ (updates[idx].remove_flags | u.remove_flags) &
+ ENUM_NEGATE(u.add_flags);
+ u.uid1 = updates[idx].uid2 + 1;
+
+ if (updates[idx].add_flags == 0 &&
+ updates[idx].remove_flags == 0) {
+ /* we can remove this update completely */
+ array_delete(&t->updates, idx, 1);
+ updates = array_get_modifiable(&t->updates, &count);
+ }
+
+ if (u.uid1 > u.uid2) {
+ /* break here before idx++ so last_update_idx is set
+ correctly */
+ break;
+ }
+ }
+ i_assert(idx <= count);
+
+ if (u.uid1 <= u.uid2) {
+ i_assert(idx == 0 || updates[idx-1].uid2 < u.uid1);
+ i_assert(idx == count || updates[idx].uid1 > u.uid2);
+ array_insert(&t->updates, idx, &u, 1);
+ }
+ updates = array_get_modifiable(&t->updates, &count);
+ t->last_update_idx = idx == count ? count-1 : idx;
+
+ /* merge everything */
+ idx = first_idx == 0 ? 0 : first_idx - 1;
+ max = count == 0 ? 0 : I_MIN(t->last_update_idx + 1, count-1);
+ for (; idx < max; ) {
+ if (updates[idx].uid2 + 1 == updates[idx+1].uid1 &&
+ updates[idx].add_flags == updates[idx+1].add_flags &&
+ updates[idx].remove_flags == updates[idx+1].remove_flags) {
+ /* merge */
+ updates[idx].uid2 = updates[idx+1].uid2;
+ array_delete(&t->updates, idx + 1, 1);
+ max--;
+ if (t->last_update_idx > idx)
+ t->last_update_idx--;
+ updates = array_get_modifiable(&t->updates, &count);
+ } else {
+ idx++;
+ }
+ }
+}
+
+static void mail_index_record_modify_flags(struct mail_index_record *rec,
+ enum modify_type modify_type,
+ enum mail_flags flags)
+{
+ switch (modify_type) {
+ case MODIFY_REPLACE:
+ rec->flags = flags;
+ break;
+ case MODIFY_ADD:
+ rec->flags |= flags;
+ break;
+ case MODIFY_REMOVE:
+ rec->flags &= ENUM_NEGATE(flags);
+ break;
+ }
+}
+
+void mail_index_update_flags_range(struct mail_index_transaction *t,
+ uint32_t seq1, uint32_t seq2,
+ enum modify_type modify_type,
+ enum mail_flags flags)
+{
+ struct mail_index_record *rec;
+ struct mail_index_flag_update u, *last_update;
+ unsigned int idx, first_idx, count;
+
+ update_minmax_flagupdate_seq(t, seq1, seq2);
+ if (seq2 >= t->first_new_seq) {
+ /* updates for appended messages, modify them directly */
+ uint32_t seq;
+
+ for (seq = I_MAX(t->first_new_seq, seq1); seq <= seq2; seq++) {
+ rec = mail_index_transaction_lookup(t, seq);
+ mail_index_record_modify_flags(rec, modify_type, flags);
+ }
+ if (seq1 >= t->first_new_seq)
+ return;
+
+ /* range contains also existing messages. update them next. */
+ seq2 = t->first_new_seq - 1;
+ }
+
+ i_assert(seq1 <= seq2 && seq1 > 0);
+ i_assert(seq2 <= mail_index_view_get_messages_count(t->view));
+
+ if ((t->flags & MAIL_INDEX_TRANSACTION_FLAG_AVOID_FLAG_UPDATES) != 0)
+ t->drop_unnecessary_flag_updates = TRUE;
+
+ i_zero(&u);
+ u.uid1 = seq1;
+ u.uid2 = seq2;
+
+ switch (modify_type) {
+ case MODIFY_REPLACE:
+ u.add_flags = flags;
+ u.remove_flags = ENUM_NEGATE(flags) & MAIL_INDEX_FLAGS_MASK;
+ break;
+ case MODIFY_ADD:
+ if (flags == 0)
+ return;
+ u.add_flags = flags;
+ break;
+ case MODIFY_REMOVE:
+ if (flags == 0)
+ return;
+ u.remove_flags = flags;
+ break;
+ }
+
+ if (!array_is_created(&t->updates)) {
+ i_array_init(&t->updates, 256);
+ array_push_back(&t->updates, &u);
+ return;
+ }
+
+ last_update = array_get_modifiable(&t->updates, &count);
+ if (t->last_update_idx < count) {
+ /* fast path - hopefully we're updating the next message,
+ or a message that is to be appended as last update */
+ last_update += t->last_update_idx;
+ if (seq1 - 1 == last_update->uid2) {
+ if (u.add_flags == last_update->add_flags &&
+ u.remove_flags == last_update->remove_flags &&
+ (t->last_update_idx + 1 == count ||
+ last_update[1].uid1 > seq2)) {
+ /* we can just update the UID range */
+ last_update->uid2 = seq2;
+ return;
+ }
+ } else if (seq1 > last_update->uid2) {
+ /* hopefully we can just append it */
+ t->last_update_idx++;
+ last_update++;
+ }
+ }
+
+ if (t->last_update_idx == count)
+ array_push_back(&t->updates, &u);
+ else {
+ i_assert(t->last_update_idx < count);
+
+ /* slow path */
+ if (seq1 > last_update->uid2) {
+ /* added after this */
+ first_idx = t->last_update_idx + 1;
+ } else {
+ /* added before this or on top of this */
+ first_idx = 0;
+ count = t->last_update_idx + 1;
+ }
+ idx = mail_index_transaction_get_flag_update_pos(t, first_idx,
+ count, u.uid1);
+ mail_index_insert_flag_update(t, u, idx);
+ }
+}
+
+void mail_index_update_flags(struct mail_index_transaction *t, uint32_t seq,
+ enum modify_type modify_type,
+ enum mail_flags flags)
+{
+ mail_index_update_flags_range(t, seq, seq, modify_type, flags);
+}
+
+static void
+mail_index_attribute_set_full(struct mail_index_transaction *t,
+ const char *key, bool pvt, char prefix,
+ time_t timestamp, uint32_t value_len)
+{
+ uint32_t ts = timestamp;
+
+ if (t->attribute_updates == NULL) {
+ t->attribute_updates = buffer_create_dynamic(default_pool, 64);
+ t->attribute_updates_suffix = buffer_create_dynamic(default_pool, 64);
+ }
+ buffer_append_c(t->attribute_updates, prefix);
+ buffer_append_c(t->attribute_updates, pvt ? 'p' : 's');
+ buffer_append(t->attribute_updates, key, strlen(key)+1);
+
+ buffer_append(t->attribute_updates_suffix, &ts, sizeof(ts));
+ if (prefix == '+') {
+ buffer_append(t->attribute_updates_suffix,
+ &value_len, sizeof(value_len));
+ }
+ t->log_updates = TRUE;
+}
+
+void mail_index_attribute_set(struct mail_index_transaction *t,
+ bool pvt, const char *key,
+ time_t timestamp, uint32_t value_len)
+{
+ mail_index_attribute_set_full(t, key, pvt, '+', timestamp, value_len);
+}
+
+void mail_index_attribute_unset(struct mail_index_transaction *t,
+ bool pvt, const char *key,
+ time_t timestamp)
+{
+ mail_index_attribute_set_full(t, key, pvt, '-', timestamp, 0);
+}
+
+void mail_index_update_header(struct mail_index_transaction *t,
+ size_t offset, const void *data, size_t size,
+ bool prepend)
+{
+ i_assert(offset < sizeof(t->pre_hdr_change));
+ i_assert(size <= sizeof(t->pre_hdr_change) - offset);
+
+ t->log_updates = TRUE;
+
+ if (prepend) {
+ t->pre_hdr_changed = TRUE;
+ memcpy(t->pre_hdr_change + offset, data, size);
+ for (; size > 0; size--)
+ t->pre_hdr_mask[offset++] = 1;
+ } else {
+ t->post_hdr_changed = TRUE;
+ memcpy(t->post_hdr_change + offset, data, size);
+ for (; size > 0; size--)
+ t->post_hdr_mask[offset++] = 1;
+ }
+}
+
+static void
+mail_index_ext_rec_updates_resize(struct mail_index_transaction *t,
+ uint32_t ext_id, uint16_t new_record_size)
+{
+ ARRAY_TYPE(seq_array) *array, old_array;
+ unsigned int i;
+
+ if (!array_is_created(&t->ext_rec_updates))
+ return;
+ array = array_idx_modifiable(&t->ext_rec_updates, ext_id);
+ if (!array_is_created(array))
+ return;
+
+ old_array = *array;
+ i_zero(array);
+ mail_index_seq_array_alloc(array, new_record_size);
+
+ /* copy the records' beginnings. leave the end zero-filled. */
+ for (i = 0; i < array_count(&old_array); i++) {
+ const void *old_record = array_idx(&old_array, i);
+
+ memcpy(array_append_space(array), old_record,
+ old_array.arr.element_size);
+ }
+ array_free(&old_array);
+}
+
+void mail_index_ext_resize(struct mail_index_transaction *t, uint32_t ext_id,
+ uint32_t hdr_size, uint16_t record_size,
+ uint16_t record_align)
+{
+ const struct mail_index_registered_ext *rext;
+ const struct mail_transaction_ext_intro *resizes;
+ unsigned int resizes_count;
+ struct mail_transaction_ext_intro intro;
+ uint32_t old_record_size = 0, old_record_align, old_header_size;
+
+ i_zero(&intro);
+ rext = array_idx(&t->view->index->extensions, ext_id);
+
+ /* get ext_id from transaction's map if it's there */
+ if (!mail_index_map_get_ext_idx(t->view->map, ext_id, &intro.ext_id)) {
+ /* have to create it */
+ intro.ext_id = (uint32_t)-1;
+ old_record_align = rext->record_align;
+ old_header_size = rext->hdr_size;
+ } else {
+ const struct mail_index_ext *ext;
+
+ ext = array_idx(&t->view->map->extensions, intro.ext_id);
+ old_record_align = ext->record_align;
+ old_header_size = ext->hdr_size;
+ }
+
+ /* get the record size. if there are any existing record updates,
+ they're using the registered size, not the map's existing
+ record_size. */
+ if (array_is_created(&t->ext_resizes))
+ resizes = array_get(&t->ext_resizes, &resizes_count);
+ else {
+ resizes = NULL;
+ resizes_count = 0;
+ }
+ if (ext_id < resizes_count && resizes[ext_id].name_size != 0) {
+ /* already resized once. use the resized value. */
+ old_record_size = resizes[ext_id].record_size;
+ } else {
+ /* use the registered values. */
+ old_record_size = rext->record_size;
+ }
+
+ if (record_size != old_record_size && record_size != (uint16_t)-1) {
+ /* if record_size grows, we'll just resize the existing
+ ext_rec_updates array. it's not possible to shrink
+ record_size without data loss. */
+ i_assert(record_size > old_record_size);
+ mail_index_ext_rec_updates_resize(t, ext_id, record_size);
+ }
+
+ t->log_ext_updates = TRUE;
+
+ if (!array_is_created(&t->ext_resizes))
+ i_array_init(&t->ext_resizes, ext_id + 2);
+
+ intro.hdr_size = hdr_size != (uint32_t)-1 ? hdr_size : old_header_size;
+ if (record_size != (uint16_t)-1) {
+ i_assert(record_align != (uint16_t)-1);
+ intro.record_size = record_size;
+ intro.record_align = record_align;
+ } else {
+ i_assert(record_align == (uint16_t)-1);
+ intro.record_size = old_record_size;
+ intro.record_align = old_record_align;
+ }
+ intro.name_size = 1;
+ array_idx_set(&t->ext_resizes, ext_id, &intro);
+}
+
+void mail_index_ext_resize_hdr(struct mail_index_transaction *t,
+ uint32_t ext_id, uint32_t hdr_size)
+{
+ mail_index_ext_resize(t, ext_id, hdr_size, (uint16_t)-1, (uint16_t)-1);
+}
+
+void mail_index_ext_reset(struct mail_index_transaction *t, uint32_t ext_id,
+ uint32_t reset_id, bool clear_data)
+{
+ struct mail_transaction_ext_reset reset;
+
+ i_assert(reset_id != 0);
+
+ i_zero(&reset);
+ reset.new_reset_id = reset_id;
+ reset.preserve_data = clear_data ? 0 : 1;
+
+ mail_index_ext_set_reset_id(t, ext_id, reset_id);
+
+ if (!array_is_created(&t->ext_resets))
+ i_array_init(&t->ext_resets, ext_id + 2);
+ array_idx_set(&t->ext_resets, ext_id, &reset);
+ t->log_ext_updates = TRUE;
+}
+
+void mail_index_ext_reset_inc(struct mail_index_transaction *t, uint32_t ext_id,
+ uint32_t prev_reset_id, bool clear_data)
+{
+ uint32_t expected_reset_id = prev_reset_id + 1;
+
+ mail_index_ext_reset(t, ext_id, (uint32_t)-1, clear_data);
+
+ if (!array_is_created(&t->ext_reset_atomic))
+ i_array_init(&t->ext_reset_atomic, ext_id + 2);
+ array_idx_set(&t->ext_reset_atomic, ext_id, &expected_reset_id);
+}
+
+static bool
+mail_index_transaction_has_ext_updates(const ARRAY_TYPE(seq_array_array) *arr)
+{
+ const ARRAY_TYPE(seq_array) *array;
+
+ if (array_is_created(arr)) {
+ array_foreach(arr, array) {
+ if (array_is_created(array))
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+static bool
+mail_index_transaction_has_ext_changes(struct mail_index_transaction *t)
+{
+ if (mail_index_transaction_has_ext_updates(&t->ext_rec_updates))
+ return TRUE;
+ if (mail_index_transaction_has_ext_updates(&t->ext_rec_atomics))
+ return TRUE;
+
+ if (array_is_created(&t->ext_hdr_updates)) {
+ const struct mail_index_transaction_ext_hdr_update *hdr;
+
+ array_foreach(&t->ext_hdr_updates, hdr) {
+ if (hdr->alloc_size > 0)
+ return TRUE;
+ }
+ }
+ if (array_is_created(&t->ext_resets)) {
+ const struct mail_transaction_ext_reset *reset;
+
+ array_foreach(&t->ext_resets, reset) {
+ if (reset->new_reset_id != 0)
+ return TRUE;
+ }
+ }
+ if (array_is_created(&t->ext_resizes)) {
+ const struct mail_transaction_ext_intro *resize;
+
+ array_foreach(&t->ext_resizes, resize) {
+ if (resize->name_size > 0)
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+static void
+mail_index_ext_update_reset(ARRAY_TYPE(seq_array_array) *arr, uint32_t ext_id)
+{
+ if (array_is_created(arr) && ext_id < array_count(arr)) {
+ /* if extension records have been updated, clear them */
+ ARRAY_TYPE(seq_array) *array;
+
+ array = array_idx_modifiable(arr, ext_id);
+ if (array_is_created(array))
+ array_clear(array);
+ }
+}
+
+static void
+mail_index_ext_reset_changes(struct mail_index_transaction *t, uint32_t ext_id)
+{
+ mail_index_ext_update_reset(&t->ext_rec_updates, ext_id);
+ mail_index_ext_update_reset(&t->ext_rec_atomics, ext_id);
+ if (array_is_created(&t->ext_hdr_updates) &&
+ ext_id < array_count(&t->ext_hdr_updates)) {
+ /* if extension headers have been updated, clear them */
+ struct mail_index_transaction_ext_hdr_update *hdr;
+
+ hdr = array_idx_modifiable(&t->ext_hdr_updates, ext_id);
+ if (hdr->alloc_size > 0) {
+ i_free_and_null(hdr->mask);
+ i_free_and_null(hdr->data);
+ }
+ hdr->alloc_size = 0;
+ }
+ if (array_is_created(&t->ext_resets) &&
+ ext_id < array_count(&t->ext_resets)) {
+ /* clear resets */
+ array_idx_clear(&t->ext_resets, ext_id);
+ }
+ if (array_is_created(&t->ext_resizes) &&
+ ext_id < array_count(&t->ext_resizes)) {
+ /* clear resizes */
+ array_idx_clear(&t->ext_resizes, ext_id);
+ }
+
+ t->log_ext_updates = mail_index_transaction_has_ext_changes(t);
+}
+
+void mail_index_ext_using_reset_id(struct mail_index_transaction *t,
+ uint32_t ext_id, uint32_t reset_id)
+{
+ uint32_t *reset_id_p;
+ bool changed;
+
+ if (!array_is_created(&t->ext_reset_ids))
+ i_array_init(&t->ext_reset_ids, ext_id + 2);
+ reset_id_p = array_idx_get_space(&t->ext_reset_ids, ext_id);
+ changed = *reset_id_p != reset_id && *reset_id_p != 0;
+ *reset_id_p = reset_id;
+ if (changed) {
+ /* reset_id changed, clear existing changes */
+ mail_index_ext_reset_changes(t, ext_id);
+ }
+}
+
+void mail_index_ext_set_reset_id(struct mail_index_transaction *t,
+ uint32_t ext_id, uint32_t reset_id)
+{
+ mail_index_ext_using_reset_id(t, ext_id, reset_id);
+ /* make sure the changes get reset, even if reset_id doesn't change */
+ mail_index_ext_reset_changes(t, ext_id);
+}
+
+void mail_index_update_header_ext(struct mail_index_transaction *t,
+ uint32_t ext_id, size_t offset,
+ const void *data, size_t size)
+{
+ struct mail_index_transaction_ext_hdr_update *hdr;
+ size_t new_size;
+
+ i_assert(offset <= (uint32_t)-1 && size <= (uint32_t)-1 &&
+ offset + size <= (uint32_t)-1);
+
+ if (!array_is_created(&t->ext_hdr_updates))
+ i_array_init(&t->ext_hdr_updates, ext_id + 2);
+
+ hdr = array_idx_get_space(&t->ext_hdr_updates, ext_id);
+ if (hdr->alloc_size < offset || hdr->alloc_size - offset < size) {
+ i_assert(size < SIZE_MAX - offset);
+ new_size = nearest_power(offset + size);
+ hdr->mask = i_realloc(hdr->mask, hdr->alloc_size, new_size);
+ hdr->data = i_realloc(hdr->data, hdr->alloc_size, new_size);
+ hdr->alloc_size = new_size;
+ }
+ memset(hdr->mask + offset, 1, size);
+ memcpy(hdr->data + offset, data, size);
+
+ t->log_ext_updates = TRUE;
+}
+
+void mail_index_update_ext(struct mail_index_transaction *t, uint32_t seq,
+ uint32_t ext_id, const void *data, void *old_data_r)
+{
+ struct mail_index *index = t->view->index;
+ const struct mail_index_registered_ext *rext;
+ const struct mail_transaction_ext_intro *intro;
+ uint16_t record_size;
+ ARRAY_TYPE(seq_array) *array;
+ unsigned int count;
+
+ i_assert(seq > 0 &&
+ (seq <= mail_index_view_get_messages_count(t->view) ||
+ seq <= t->last_new_seq));
+ i_assert(ext_id < array_count(&index->extensions));
+
+ t->log_ext_updates = TRUE;
+
+ if (!array_is_created(&t->ext_resizes)) {
+ intro = NULL;
+ count = 0;
+ } else {
+ intro = array_get(&t->ext_resizes, &count);
+ }
+ if (ext_id < count && intro[ext_id].name_size != 0) {
+ /* resized record */
+ record_size = intro[ext_id].record_size;
+ } else {
+ rext = array_idx(&index->extensions, ext_id);
+ record_size = rext->record_size;
+ }
+ i_assert(record_size > 0);
+
+ if (!array_is_created(&t->ext_rec_updates))
+ i_array_init(&t->ext_rec_updates, ext_id + 2);
+ array = array_idx_get_space(&t->ext_rec_updates, ext_id);
+
+ /* @UNSAFE */
+ if (!mail_index_seq_array_add(array, seq, data, record_size,
+ old_data_r)) {
+ /* not found, clear old_data if it was given */
+ if (old_data_r != NULL)
+ memset(old_data_r, 0, record_size);
+ }
+}
+
+int mail_index_atomic_inc_ext(struct mail_index_transaction *t,
+ uint32_t seq, uint32_t ext_id, int diff)
+{
+ ARRAY_TYPE(seq_array) *array;
+ int32_t old_diff32, diff32 = diff;
+
+ i_assert(seq > 0 &&
+ (seq <= mail_index_view_get_messages_count(t->view) ||
+ seq <= t->last_new_seq));
+ i_assert(ext_id < array_count(&t->view->index->extensions));
+ /* currently non-external transactions can be applied multiple times,
+ causing multiple increments. FIXME: we need this now and it doesn't
+ actually seem to be a real problem at least right now - why? */
+ /*i_assert((t->flags & MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL) != 0);*/
+
+ t->log_ext_updates = TRUE;
+ if (!array_is_created(&t->ext_rec_atomics))
+ i_array_init(&t->ext_rec_atomics, ext_id + 2);
+ array = array_idx_get_space(&t->ext_rec_atomics, ext_id);
+ if (mail_index_seq_array_add(array, seq, &diff32, sizeof(diff32),
+ &old_diff32)) {
+ /* already incremented this sequence in this transaction */
+ diff32 += old_diff32;
+ (void)mail_index_seq_array_add(array, seq, &diff32,
+ sizeof(diff32), NULL);
+ }
+ return diff32;
+}
+
+static bool
+keyword_update_has_changes(struct mail_index_transaction *t, uint32_t seq,
+ enum modify_type modify_type,
+ struct mail_keywords *keywords)
+{
+ ARRAY_TYPE(keyword_indexes) existing;
+ const unsigned int *existing_idx;
+ unsigned int i, j, existing_count;
+ bool found;
+
+ t_array_init(&existing, 32);
+ if (seq < t->first_new_seq)
+ mail_index_transaction_lookup_latest_keywords(t, seq, &existing);
+ existing_idx = array_get(&existing, &existing_count);
+
+ if (modify_type == MODIFY_REPLACE && existing_count != keywords->count)
+ return TRUE;
+
+ for (i = 0; i < keywords->count; i++) {
+ found = FALSE;
+ for (j = 0; j < existing_count; j++) {
+ if (existing_idx[j] == keywords->idx[i]) {
+ found = TRUE;
+ break;
+ }
+ }
+ switch (modify_type) {
+ case MODIFY_ADD:
+ case MODIFY_REPLACE:
+ if (!found)
+ return TRUE;
+ break;
+ case MODIFY_REMOVE:
+ if (found)
+ return TRUE;
+ break;
+ }
+ }
+ return FALSE;
+}
+
+static struct mail_keywords *
+keyword_update_remove_existing(struct mail_index_transaction *t, uint32_t seq)
+{
+ ARRAY_TYPE(keyword_indexes) keywords;
+ uint32_t i, keywords_count;
+
+ t_array_init(&keywords, 32);
+ if (t->view->v.lookup_full == NULL) {
+ /* syncing is saving a list of changes into this transaction.
+ the seq is actual an uid, so we can't lookup the existing
+ keywords. we shouldn't get here unless we're reading
+ pre-v2.2 keyword-reset records from .log files. so we don't
+ really care about performance that much here, */
+ keywords_count = array_count(&t->view->index->keywords);
+ for (i = 0; i < keywords_count; i++)
+ array_push_back(&keywords, &i);
+ } else {
+ mail_index_transaction_lookup_latest_keywords(t, seq, &keywords);
+ }
+ if (array_count(&keywords) == 0)
+ return NULL;
+ return mail_index_keywords_create_from_indexes(t->view->index,
+ &keywords);
+}
+
+void mail_index_update_keywords(struct mail_index_transaction *t, uint32_t seq,
+ enum modify_type modify_type,
+ struct mail_keywords *keywords)
+{
+ struct mail_index_transaction_keyword_update *u;
+ struct mail_keywords *add_keywords = NULL, *remove_keywords = NULL;
+ struct mail_keywords *unref_keywords = NULL;
+ unsigned int i;
+ bool changed;
+
+ i_assert(seq > 0 &&
+ (seq <= mail_index_view_get_messages_count(t->view) ||
+ seq <= t->last_new_seq));
+ i_assert(keywords->index == t->view->index);
+
+ if (keywords->count == 0 && modify_type != MODIFY_REPLACE)
+ return;
+
+ update_minmax_flagupdate_seq(t, seq, seq);
+
+ if (!array_is_created(&t->keyword_updates)) {
+ uint32_t max_idx = keywords->count == 0 ? 3 :
+ keywords->idx[keywords->count-1];
+
+ i_array_init(&t->keyword_updates, max_idx + 1);
+ }
+
+ if ((t->flags & MAIL_INDEX_TRANSACTION_FLAG_AVOID_FLAG_UPDATES) != 0) {
+ T_BEGIN {
+ changed = keyword_update_has_changes(t, seq,
+ modify_type,
+ keywords);
+ } T_END;
+ if (!changed)
+ return;
+ }
+
+ switch (modify_type) {
+ case MODIFY_REPLACE:
+ /* split this into add+remove. remove all existing keywords not
+ included in the keywords list */
+ if (seq < t->first_new_seq) {
+ /* remove the ones currently in index */
+ remove_keywords = keyword_update_remove_existing(t, seq);
+ unref_keywords = remove_keywords;
+ }
+ /* remove from all changes we've done in this transaction */
+ array_foreach_modifiable(&t->keyword_updates, u)
+ seq_range_array_remove(&u->add_seq, seq);
+ add_keywords = keywords;
+ break;
+ case MODIFY_ADD:
+ add_keywords = keywords;
+ break;
+ case MODIFY_REMOVE:
+ remove_keywords = keywords;
+ break;
+ }
+
+ /* Update add_seq and remove_seq arrays which describe the keyword
+ changes. First do the removes, since replace removes everything
+ first. */
+ if (remove_keywords != NULL) {
+ for (i = 0; i < remove_keywords->count; i++) {
+ u = array_idx_get_space(&t->keyword_updates,
+ remove_keywords->idx[i]);
+ seq_range_array_remove(&u->add_seq, seq);
+ /* Don't bother updating remove_seq for new messages,
+ since their initial state is "no keyword" anyway */
+ if (seq < t->first_new_seq) {
+ seq_range_array_add_with_init(&u->remove_seq,
+ 16, seq);
+ }
+ }
+ }
+ if (add_keywords != NULL) {
+ for (i = 0; i < add_keywords->count; i++) {
+ u = array_idx_get_space(&t->keyword_updates,
+ add_keywords->idx[i]);
+ seq_range_array_add_with_init(&u->add_seq, 16, seq);
+ seq_range_array_remove(&u->remove_seq, seq);
+ }
+ }
+ if (unref_keywords != NULL)
+ mail_index_keywords_unref(&unref_keywords);
+
+ t->log_updates = TRUE;
+}
+
+bool mail_index_cancel_flag_updates(struct mail_index_transaction *t,
+ uint32_t seq)
+{
+ struct mail_index_flag_update *updates, tmp_update;
+ unsigned int i, count;
+
+ if (!array_is_created(&t->updates))
+ return FALSE;
+
+ updates = array_get_modifiable(&t->updates, &count);
+ i = mail_index_transaction_get_flag_update_pos(t, 0, count, seq);
+ if (i == count)
+ return FALSE;
+ else {
+ i_assert(seq <= updates[i].uid2);
+ if (seq < updates[i].uid1)
+ return FALSE;
+ }
+
+ /* exists */
+ if (updates[i].uid1 == seq) {
+ if (updates[i].uid2 != seq)
+ updates[i].uid1++;
+ else if (count > 1)
+ array_delete(&t->updates, i, 1);
+ else
+ array_free(&t->updates);
+ } else if (updates[i].uid2 == seq) {
+ updates[i].uid2--;
+ } else {
+ /* need to split it in two */
+ tmp_update = updates[i];
+ tmp_update.uid1 = seq+1;
+ updates[i].uid2 = seq-1;
+ array_insert(&t->updates, i + 1, &tmp_update, 1);
+ }
+ return TRUE;
+}
+
+static bool mail_index_cancel_array(ARRAY_TYPE(seq_range) *array, uint32_t seq)
+{
+ if (array_is_created(array)) {
+ if (seq_range_array_remove(array, seq)) {
+ if (array_count(array) == 0)
+ array_free(array);
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+bool mail_index_cancel_keyword_updates(struct mail_index_transaction *t,
+ uint32_t seq)
+{
+ struct mail_index_transaction_keyword_update *kw;
+ bool ret = FALSE, have_kw_changes = FALSE;
+
+ if (!array_is_created(&t->keyword_updates))
+ return FALSE;
+
+ array_foreach_modifiable(&t->keyword_updates, kw) {
+ if (mail_index_cancel_array(&kw->add_seq, seq))
+ ret = TRUE;
+ if (mail_index_cancel_array(&kw->remove_seq, seq))
+ ret = TRUE;
+ if (array_is_created(&kw->add_seq) ||
+ array_is_created(&kw->remove_seq))
+ have_kw_changes = TRUE;
+ }
+ if (!have_kw_changes)
+ array_free(&t->keyword_updates);
+ return ret;
+}
+
+void mail_index_transaction_reset(struct mail_index_transaction *t)
+{
+ t->v.reset(t);
+}
+
+void mail_index_reset(struct mail_index_transaction *t)
+{
+ mail_index_transaction_reset(t);
+
+ t->reset = TRUE;
+}
+
+void mail_index_unset_fscked(struct mail_index_transaction *t)
+{
+ struct mail_index_header new_hdr =
+ *mail_index_get_header(t->view);
+
+ i_assert(t->view->index->log_sync_locked);
+
+ /* remove fsck'd-flag if it exists. */
+ if ((new_hdr.flags & MAIL_INDEX_HDR_FLAG_FSCKD) != 0) {
+ new_hdr.flags &= ENUM_NEGATE(MAIL_INDEX_HDR_FLAG_FSCKD);
+ mail_index_update_header(t,
+ offsetof(struct mail_index_header, flags),
+ &new_hdr.flags, sizeof(new_hdr.flags), FALSE);
+ }
+}
+
+void mail_index_set_deleted(struct mail_index_transaction *t)
+{
+ i_assert(!t->index_undeleted);
+
+ t->index_deleted = TRUE;
+}
+
+void mail_index_set_undeleted(struct mail_index_transaction *t)
+{
+ i_assert(!t->index_deleted);
+
+ t->index_undeleted = TRUE;
+}
+
+void mail_index_transaction_set_max_modseq(struct mail_index_transaction *t,
+ uint64_t max_modseq,
+ ARRAY_TYPE(seq_range) *seqs)
+{
+ i_assert(array_is_created(seqs));
+
+ t->max_modseq = max_modseq;
+ t->conflict_seqs = seqs;
+}
diff --git a/src/lib-index/mail-index-transaction-view.c b/src/lib-index/mail-index-transaction-view.c
new file mode 100644
index 0000000..240c7fe
--- /dev/null
+++ b/src/lib-index/mail-index-transaction-view.c
@@ -0,0 +1,534 @@
+/* Copyright (c) 2004-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "buffer.h"
+#include "seq-range-array.h"
+#include "mail-index-private.h"
+#include "mail-index-view-private.h"
+#include "mail-index-transaction-private.h"
+
+struct mail_index_view_transaction {
+ struct mail_index_view view;
+ struct mail_index_view_vfuncs *super;
+ struct mail_index_transaction *t;
+
+ struct mail_index_map *lookup_map;
+ struct mail_index_header hdr;
+
+ buffer_t *lookup_return_data;
+ uint32_t lookup_prev_seq;
+
+ unsigned int record_size;
+ unsigned int recs_count;
+ void *recs;
+ ARRAY(void *) all_recs;
+};
+
+static void tview_close(struct mail_index_view *view)
+{
+ struct mail_index_view_transaction *tview =
+ (struct mail_index_view_transaction *)view;
+ struct mail_index_transaction *t = tview->t;
+ void **recs;
+ unsigned int i, count;
+
+ if (tview->lookup_map != NULL)
+ mail_index_unmap(&tview->lookup_map);
+ buffer_free(&tview->lookup_return_data);
+
+ if (array_is_created(&tview->all_recs)) {
+ recs = array_get_modifiable(&tview->all_recs, &count);
+ for (i = 0; i < count; i++)
+ i_free(recs[i]);
+ array_free(&tview->all_recs);
+ }
+
+ tview->super->close(view);
+ mail_index_transaction_unref(&t);
+}
+
+static uint32_t tview_get_message_count(struct mail_index_view *view)
+{
+ struct mail_index_view_transaction *tview =
+ (struct mail_index_view_transaction *)view;
+
+ return view->map->hdr.messages_count +
+ (tview->t->last_new_seq == 0 ? 0 :
+ tview->t->last_new_seq - tview->t->first_new_seq + 1);
+}
+
+static const struct mail_index_header *
+tview_get_header(struct mail_index_view *view)
+{
+ struct mail_index_view_transaction *tview =
+ (struct mail_index_view_transaction *)view;
+ const struct mail_index_header *hdr;
+ uint32_t next_uid;
+
+ /* FIXME: header counters may not be correct */
+ hdr = tview->super->get_header(view);
+
+ next_uid = mail_index_transaction_get_next_uid(tview->t);
+ if (next_uid != hdr->next_uid) {
+ tview->hdr = *hdr;
+ tview->hdr.next_uid = next_uid;
+ hdr = &tview->hdr;
+ }
+ return hdr;
+}
+
+static const struct mail_index_record *
+tview_apply_flag_updates(struct mail_index_view_transaction *tview,
+ struct mail_index_map *map,
+ const struct mail_index_record *rec, uint32_t seq)
+{
+ struct mail_index_transaction *t = tview->t;
+ const struct mail_index_flag_update *updates;
+ struct mail_index_record *trec;
+ unsigned int idx, count;
+
+ /* see if there are any flag updates */
+ if (seq < t->min_flagupdate_seq || seq > t->max_flagupdate_seq ||
+ !array_is_created(&t->updates))
+ return rec;
+
+ updates = array_get(&t->updates, &count);
+ idx = mail_index_transaction_get_flag_update_pos(t, 0, count, seq);
+ if (seq < updates[idx].uid1 || seq > updates[idx].uid2)
+ return rec;
+
+ /* yes, we have flag updates. since we can't modify rec directly and
+ we want to be able to handle multiple mail_index_lookup() calls
+ without the second one overriding the first one's data, we'll
+ create a records array and return data from there.
+
+ it's also possible that the record size increases, so we potentially
+ have to create multiple arrays. they all get eventually freed when
+ the view gets freed. */
+ if (map->hdr.record_size > tview->record_size) {
+ if (!array_is_created(&tview->all_recs))
+ i_array_init(&tview->all_recs, 4);
+ tview->recs_count = t->first_new_seq;
+ tview->record_size = I_MAX(map->hdr.record_size,
+ tview->view.map->hdr.record_size);
+ tview->recs = i_malloc(MALLOC_MULTIPLY(tview->record_size,
+ tview->recs_count));
+ array_push_back(&tview->all_recs, &tview->recs);
+ }
+ i_assert(tview->recs_count == t->first_new_seq);
+ i_assert(seq > 0 && seq <= tview->recs_count);
+
+ trec = PTR_OFFSET(tview->recs, (seq-1) * tview->record_size);
+ memcpy(trec, rec, map->hdr.record_size);
+ trec->flags |= updates[idx].add_flags & 0xff;
+ trec->flags &= ENUM_NEGATE(updates[idx].remove_flags);
+ return trec;
+}
+
+static const struct mail_index_record *
+tview_lookup_full(struct mail_index_view *view, uint32_t seq,
+ struct mail_index_map **map_r, bool *expunged_r)
+{
+ struct mail_index_view_transaction *tview =
+ (struct mail_index_view_transaction *)view;
+ const struct mail_index_record *rec;
+
+ if (seq >= tview->t->first_new_seq) {
+ /* FIXME: is this right to return index map..?
+ it's not there yet. */
+ *map_r = view->index->map;
+ if (expunged_r != NULL)
+ *expunged_r = FALSE;
+ return mail_index_transaction_lookup(tview->t, seq);
+ }
+
+ rec = tview->super->lookup_full(view, seq, map_r, expunged_r);
+ rec = tview_apply_flag_updates(tview, *map_r, rec, seq);
+
+ if (expunged_r != NULL &&
+ mail_index_transaction_is_expunged(tview->t, seq))
+ *expunged_r = TRUE;
+ return rec;
+}
+
+static void
+tview_lookup_uid(struct mail_index_view *view, uint32_t seq, uint32_t *uid_r)
+{
+ struct mail_index_view_transaction *tview =
+ (struct mail_index_view_transaction *)view;
+
+ if (seq >= tview->t->first_new_seq)
+ *uid_r = mail_index_transaction_lookup(tview->t, seq)->uid;
+ else
+ tview->super->lookup_uid(view, seq, uid_r);
+}
+
+static void tview_lookup_seq_range(struct mail_index_view *view,
+ uint32_t first_uid, uint32_t last_uid,
+ uint32_t *first_seq_r, uint32_t *last_seq_r)
+{
+ struct mail_index_view_transaction *tview =
+ (struct mail_index_view_transaction *)view;
+ const struct mail_index_record *rec;
+ uint32_t seq;
+
+ if (!tview->t->reset) {
+ tview->super->lookup_seq_range(view, first_uid, last_uid,
+ first_seq_r, last_seq_r);
+ } else {
+ /* index is being reset. we never want to return old
+ sequences. */
+ *first_seq_r = *last_seq_r = 0;
+ }
+ if (tview->t->last_new_seq == 0) {
+ /* no new messages, the results are final. */
+ return;
+ }
+
+ rec = mail_index_transaction_lookup(tview->t, tview->t->first_new_seq);
+ if (rec->uid == 0) {
+ /* new messages don't have UIDs */
+ return;
+ }
+ if (last_uid < rec->uid) {
+ /* all wanted messages were existing */
+ return;
+ }
+
+ /* at least some of the wanted messages are newly created */
+ if (*first_seq_r == 0) {
+ seq = tview->t->first_new_seq;
+ for (; seq <= tview->t->last_new_seq; seq++) {
+ rec = mail_index_transaction_lookup(tview->t, seq);
+ if (first_uid <= rec->uid)
+ break;
+ }
+ if (seq > tview->t->last_new_seq || rec->uid > last_uid) {
+ /* no messages in range */
+ return;
+ }
+ *first_seq_r = seq;
+
+ if (rec->uid == last_uid) {
+ /* one seq in range */
+ *last_seq_r = seq;
+ return;
+ }
+ }
+
+ seq = tview->t->last_new_seq;
+ for (; seq >= tview->t->first_new_seq; seq--) {
+ rec = mail_index_transaction_lookup(tview->t, seq);
+ if (rec->uid <= last_uid) {
+ *last_seq_r = seq;
+ break;
+ }
+ }
+ i_assert(seq >= tview->t->first_new_seq);
+}
+
+static void tview_lookup_first(struct mail_index_view *view,
+ enum mail_flags flags, uint8_t flags_mask,
+ uint32_t *seq_r)
+{
+ struct mail_index_view_transaction *tview =
+ (struct mail_index_view_transaction *)view;
+ const struct mail_index_record *rec;
+ unsigned int append_count;
+ uint32_t seq, message_count;
+
+ if (!tview->t->reset) {
+ tview->super->lookup_first(view, flags, flags_mask, seq_r);
+ if (*seq_r != 0)
+ return;
+ } else {
+ *seq_r = 0;
+ }
+
+ rec = array_get(&tview->t->appends, &append_count);
+ seq = tview->t->first_new_seq;
+ message_count = tview->t->last_new_seq;
+ i_assert(append_count == message_count - seq + 1);
+
+ for (; seq <= message_count; seq++, rec++) {
+ if ((rec->flags & flags_mask) == (uint8_t)flags) {
+ *seq_r = seq;
+ break;
+ }
+ }
+}
+
+static void keyword_index_add(ARRAY_TYPE(keyword_indexes) *keywords,
+ unsigned int idx)
+{
+ const unsigned int *indexes;
+ unsigned int i, count;
+
+ indexes = array_get(keywords, &count);
+ for (i = 0; i < count; i++) {
+ if (indexes[i] == idx)
+ return;
+ }
+ array_push_back(keywords, &idx);
+}
+
+static void keyword_index_remove(ARRAY_TYPE(keyword_indexes) *keywords,
+ unsigned int idx)
+{
+ const unsigned int *indexes;
+ unsigned int i, count;
+
+ indexes = array_get(keywords, &count);
+ for (i = 0; i < count; i++) {
+ if (indexes[i] == idx) {
+ array_delete(keywords, i, 1);
+ break;
+ }
+ }
+}
+
+static void tview_lookup_keywords(struct mail_index_view *view, uint32_t seq,
+ ARRAY_TYPE(keyword_indexes) *keyword_idx)
+{
+ struct mail_index_view_transaction *tview =
+ (struct mail_index_view_transaction *)view;
+ struct mail_index_transaction *t = tview->t;
+ const struct mail_index_transaction_keyword_update *updates;
+ unsigned int i, count;
+
+ tview->super->lookup_keywords(view, seq, keyword_idx);
+
+ if (seq < t->min_flagupdate_seq || seq > t->max_flagupdate_seq) {
+ /* no keyword updates for this sequence */
+ return;
+ }
+
+ if (array_is_created(&t->keyword_updates))
+ updates = array_get(&t->keyword_updates, &count);
+ else {
+ updates = NULL;
+ count = 0;
+ }
+ for (i = 0; i < count; i++) {
+ if (array_is_created(&updates[i].add_seq) &&
+ seq_range_exists(&updates[i].add_seq, seq))
+ keyword_index_add(keyword_idx, i);
+ else if (array_is_created(&updates[i].remove_seq) &&
+ seq_range_exists(&updates[i].remove_seq, seq))
+ keyword_index_remove(keyword_idx, i);
+ }
+}
+
+static const void *
+tview_return_updated_ext(struct mail_index_view_transaction *tview,
+ uint32_t seq, const void *data, uint32_t ext_id)
+{
+ const struct mail_index_ext *ext;
+ const struct mail_index_registered_ext *rext;
+ const struct mail_transaction_ext_intro *intro;
+ unsigned int record_align, record_size;
+ uint32_t ext_idx;
+ size_t pos;
+
+ /* data begins with a 32bit sequence, followed by the actual
+ extension data */
+ data = CONST_PTR_OFFSET(data, sizeof(uint32_t));
+
+ if (!mail_index_map_get_ext_idx(tview->lookup_map, ext_id, &ext_idx)) {
+ /* we're adding the extension now. */
+ rext = array_idx(&tview->view.index->extensions, ext_id);
+ record_align = rext->record_align;
+ record_size = rext->record_size;
+ } else {
+ ext = array_idx(&tview->lookup_map->extensions, ext_idx);
+ record_align = ext->record_align;
+ record_size = ext->record_size;
+ }
+
+ /* see if the extension has been resized within this transaction */
+ if (array_is_created(&tview->t->ext_resizes) &&
+ ext_id < array_count(&tview->t->ext_resizes)) {
+ intro = array_idx(&tview->t->ext_resizes, ext_id);
+ if (intro[ext_id].name_size != 0) {
+ record_align = intro->record_align;
+ record_size = intro->record_size;
+ }
+ }
+
+ if (record_align <= sizeof(uint32_t)) {
+ /* data is 32bit aligned already */
+ return data;
+ } else {
+ /* assume we want 64bit alignment - copy the data to
+ temporary buffer and return it */
+ if (tview->lookup_return_data == NULL) {
+ tview->lookup_return_data =
+ buffer_create_dynamic(default_pool,
+ record_size + 64);
+ } else if (seq != tview->lookup_prev_seq) {
+ /* clear the buffer between lookups for different
+ messages */
+ buffer_set_used_size(tview->lookup_return_data, 0);
+ }
+ tview->lookup_prev_seq = seq;
+ pos = tview->lookup_return_data->used;
+ buffer_append(tview->lookup_return_data, data, record_size);
+ return CONST_PTR_OFFSET(tview->lookup_return_data->data, pos);
+ }
+}
+
+static bool
+tview_is_ext_reset(struct mail_index_view_transaction *tview, uint32_t ext_id)
+{
+ const struct mail_transaction_ext_reset *resets;
+ unsigned int count;
+
+ if (!array_is_created(&tview->t->ext_resets))
+ return FALSE;
+
+ resets = array_get(&tview->t->ext_resets, &count);
+ return ext_id < count && resets[ext_id].new_reset_id != 0;
+}
+
+static bool
+tview_lookup_ext_update(struct mail_index_view_transaction *tview, uint32_t seq,
+ uint32_t ext_id, struct mail_index_map **map_r,
+ const void **data_r)
+{
+ const ARRAY_TYPE(seq_array) *ext_buf;
+ const void *data;
+ unsigned int idx;
+ uint32_t map_ext_idx;
+
+ ext_buf = array_idx(&tview->t->ext_rec_updates, ext_id);
+ if (!array_is_created(ext_buf) ||
+ !mail_index_seq_array_lookup(ext_buf, seq, &idx))
+ return FALSE;
+
+ if (tview->lookup_map == NULL) {
+ tview->lookup_map =
+ mail_index_map_clone(tview->view.index->map);
+ }
+ if (!mail_index_map_get_ext_idx(tview->lookup_map, ext_id, &map_ext_idx)) {
+ /* extension doesn't yet exist in the map. add it there with
+ the preliminary information (mainly its size) so if caller
+ looks it up, it's going to be found. */
+ const struct mail_index_registered_ext *rext =
+ array_idx(&tview->view.index->extensions, ext_id);
+ struct mail_index_ext_header ext_hdr;
+
+ i_zero(&ext_hdr);
+ ext_hdr.hdr_size = rext->hdr_size;
+ ext_hdr.record_size = ext_buf->arr.element_size - sizeof(uint32_t);
+ ext_hdr.record_align = rext->record_align;
+
+ mail_index_map_register_ext(tview->lookup_map, rext->name,
+ (uint32_t)-1, &ext_hdr);
+ }
+
+ data = array_idx(ext_buf, idx);
+ *map_r = tview->lookup_map;
+ *data_r = tview_return_updated_ext(tview, seq, data, ext_id);
+ return TRUE;
+}
+
+static void
+tview_lookup_ext_full(struct mail_index_view *view, uint32_t seq,
+ uint32_t ext_id, struct mail_index_map **map_r,
+ const void **data_r, bool *expunged_r)
+{
+ struct mail_index_view_transaction *tview =
+ (struct mail_index_view_transaction *)view;
+
+ i_assert(ext_id < array_count(&view->index->extensions));
+
+ if (expunged_r != NULL)
+ *expunged_r = FALSE;
+
+ if (array_is_created(&tview->t->ext_rec_updates) &&
+ ext_id < array_count(&tview->t->ext_rec_updates)) {
+ /* there are some ext updates in transaction.
+ see if there's any for this sequence. */
+ if (tview_lookup_ext_update(tview, seq, ext_id, map_r, data_r))
+ return;
+ }
+
+ /* not updated, return the existing value, unless ext was
+ already reset */
+ if (seq < tview->t->first_new_seq &&
+ !tview_is_ext_reset(tview, ext_id)) {
+ tview->super->lookup_ext_full(view, seq, ext_id,
+ map_r, data_r, expunged_r);
+ } else {
+ *map_r = view->index->map;
+ *data_r = NULL;
+ }
+}
+
+static void tview_get_header_ext(struct mail_index_view *view,
+ struct mail_index_map *map, uint32_t ext_id,
+ const void **data_r, size_t *data_size_r)
+{
+ struct mail_index_view_transaction *tview =
+ (struct mail_index_view_transaction *)view;
+
+ /* FIXME: check updates */
+ tview->super->get_header_ext(view, map, ext_id, data_r, data_size_r);
+}
+
+static bool tview_ext_get_reset_id(struct mail_index_view *view,
+ struct mail_index_map *map,
+ uint32_t ext_id, uint32_t *reset_id_r)
+{
+ struct mail_index_view_transaction *tview =
+ (struct mail_index_view_transaction *)view;
+ const uint32_t *reset_id_p;
+
+ if (array_is_created(&tview->t->ext_reset_ids) &&
+ ext_id < array_count(&tview->t->ext_reset_ids) &&
+ map == tview->lookup_map) {
+ reset_id_p = array_idx(&tview->t->ext_reset_ids, ext_id);
+ *reset_id_r = *reset_id_p;
+ return TRUE;
+ }
+
+ return tview->super->ext_get_reset_id(view, map, ext_id, reset_id_r);
+}
+
+static struct mail_index_view_vfuncs trans_view_vfuncs = {
+ tview_close,
+ tview_get_message_count,
+ tview_get_header,
+ tview_lookup_full,
+ tview_lookup_uid,
+ tview_lookup_seq_range,
+ tview_lookup_first,
+ tview_lookup_keywords,
+ tview_lookup_ext_full,
+ tview_get_header_ext,
+ tview_ext_get_reset_id
+};
+
+struct mail_index_view *
+mail_index_transaction_open_updated_view(struct mail_index_transaction *t)
+{
+ struct mail_index_view_transaction *tview;
+
+ if (t->view->syncing) {
+ /* transaction view is being synced. while it's done, it's not
+ possible to add new messages, but the view itself might
+ change. so we can't make a copy of the view. */
+ mail_index_view_ref(t->view);
+ return t->view;
+ }
+
+ tview = i_new(struct mail_index_view_transaction, 1);
+ mail_index_view_clone(&tview->view, t->view);
+ tview->view.v = trans_view_vfuncs;
+ tview->super = &t->view->v;
+ tview->t = t;
+
+ mail_index_transaction_ref(t);
+ return &tview->view;
+}
diff --git a/src/lib-index/mail-index-transaction.c b/src/lib-index/mail-index-transaction.c
new file mode 100644
index 0000000..0c61170
--- /dev/null
+++ b/src/lib-index/mail-index-transaction.c
@@ -0,0 +1,360 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "ioloop.h"
+#include "array.h"
+#include "hook-build.h"
+#include "bsearch-insert-pos.h"
+#include "llist.h"
+#include "mail-index-private.h"
+#include "mail-transaction-log-private.h"
+#include "mail-index-transaction-private.h"
+
+static ARRAY(hook_mail_index_transaction_created_t *)
+ hook_mail_index_transaction_created;
+
+void mail_index_transaction_hook_register(hook_mail_index_transaction_created_t *hook)
+{
+ if (!array_is_created(&hook_mail_index_transaction_created))
+ i_array_init(&hook_mail_index_transaction_created, 8);
+ array_push_back(&hook_mail_index_transaction_created, &hook);
+}
+
+void mail_index_transaction_hook_unregister(hook_mail_index_transaction_created_t *hook)
+{
+ unsigned int idx;
+ bool found = FALSE;
+
+ i_assert(array_is_created(&hook_mail_index_transaction_created));
+ for(idx = 0; idx < array_count(&hook_mail_index_transaction_created); idx++) {
+ hook_mail_index_transaction_created_t *arr_hook =
+ array_idx_elem(&hook_mail_index_transaction_created, idx);
+ if (arr_hook == hook) {
+ array_delete(&hook_mail_index_transaction_created, idx, 1);
+ found = TRUE;
+ break;
+ }
+ }
+ i_assert(found == TRUE);
+ if (array_count(&hook_mail_index_transaction_created) == 0)
+ array_free(&hook_mail_index_transaction_created);
+}
+
+
+struct mail_index_view *
+mail_index_transaction_get_view(struct mail_index_transaction *t)
+{
+ return t->view;
+}
+
+bool mail_index_transaction_is_expunged(struct mail_index_transaction *t,
+ uint32_t seq)
+{
+ struct mail_transaction_expunge_guid key;
+
+ if (!array_is_created(&t->expunges))
+ return FALSE;
+
+ if (t->expunges_nonsorted)
+ mail_index_transaction_sort_expunges(t);
+
+ key.uid = seq;
+ return array_bsearch(&t->expunges, &key,
+ mail_transaction_expunge_guid_cmp) != NULL;
+}
+
+void mail_index_transaction_ref(struct mail_index_transaction *t)
+{
+ t->refcount++;
+}
+
+void mail_index_transaction_unref(struct mail_index_transaction **_t)
+{
+ struct mail_index_transaction *t = *_t;
+
+ *_t = NULL;
+ if (--t->refcount > 0)
+ return;
+
+ mail_index_transaction_reset_v(t);
+
+ DLLIST_REMOVE(&t->view->transactions_list, t);
+ array_free(&t->module_contexts);
+ if (t->latest_view != NULL)
+ mail_index_view_close(&t->latest_view);
+ mail_index_view_close(&t->view);
+ i_free(t);
+}
+
+uint32_t mail_index_transaction_get_next_uid(struct mail_index_transaction *t)
+{
+ const struct mail_index_header *head_hdr, *hdr;
+ unsigned int offset;
+ uint32_t next_uid;
+
+ head_hdr = &t->view->index->map->hdr;
+ hdr = &t->view->map->hdr;
+ next_uid = t->reset || head_hdr->uid_validity != hdr->uid_validity ?
+ 1 : hdr->next_uid;
+ if (array_is_created(&t->appends) && t->highest_append_uid != 0) {
+ /* get next_uid from appends if they have UIDs. it's possible
+ that some appends have too low UIDs, they'll be caught
+ later. */
+ if (next_uid <= t->highest_append_uid)
+ next_uid = t->highest_append_uid + 1;
+ }
+
+ /* see if it's been updated in pre/post header changes */
+ offset = offsetof(struct mail_index_header, next_uid);
+ if (t->post_hdr_mask[offset] != 0) {
+ hdr = (const void *)t->post_hdr_change;
+ if (hdr->next_uid > next_uid)
+ next_uid = hdr->next_uid;
+ }
+ if (t->pre_hdr_mask[offset] != 0) {
+ hdr = (const void *)t->pre_hdr_change;
+ if (hdr->next_uid > next_uid)
+ next_uid = hdr->next_uid;
+ }
+ return next_uid;
+}
+
+void mail_index_transaction_lookup_latest_keywords(struct mail_index_transaction *t,
+ uint32_t seq,
+ ARRAY_TYPE(keyword_indexes) *keywords)
+{
+ uint32_t uid, latest_seq;
+
+ /* seq points to the transaction's primary view */
+ mail_index_lookup_uid(t->view, seq, &uid);
+
+ /* get the latest keywords from the updated index, or fallback to the
+ primary view if the message is already expunged */
+ if (t->latest_view == NULL) {
+ mail_index_refresh(t->view->index);
+ t->latest_view = mail_index_view_open(t->view->index);
+ }
+ if (mail_index_lookup_seq(t->latest_view, uid, &latest_seq))
+ mail_index_lookup_keywords(t->latest_view, latest_seq, keywords);
+ else
+ mail_index_lookup_keywords(t->view, seq, keywords);
+}
+
+static int
+mail_transaction_log_file_refresh(struct mail_index_transaction *t,
+ struct mail_transaction_log_append_ctx *ctx)
+{
+ struct mail_transaction_log_file *file;
+
+ if (t->reset) {
+ /* Reset the whole index, preserving only indexid. Begin by
+ rotating the log. We don't care if we skip some non-synced
+ transactions. */
+ if (mail_transaction_log_rotate(t->view->index->log, TRUE) < 0)
+ return -1;
+
+ if (!MAIL_INDEX_TRANSACTION_HAS_CHANGES(t)) {
+ /* we only wanted to reset */
+ return 0;
+ }
+ }
+ file = t->view->index->log->head;
+
+ /* make sure we have everything mapped */
+ if (mail_index_map(t->view->index, MAIL_INDEX_SYNC_HANDLER_HEAD) <= 0)
+ return -1;
+
+ i_assert(file->sync_offset >= file->buffer_offset);
+ ctx->new_highest_modseq = file->sync_highest_modseq;
+ return 1;
+}
+
+static int
+mail_index_transaction_commit_real(struct mail_index_transaction *t,
+ uoff_t *commit_size_r,
+ enum mail_index_transaction_change *changes_r)
+{
+ struct mail_transaction_log *log = t->view->index->log;
+ struct mail_transaction_log_append_ctx *ctx;
+ enum mail_transaction_type trans_flags = 0;
+ uint32_t log_seq1, log_seq2;
+ uoff_t log_offset1, log_offset2;
+ int ret;
+
+ *changes_r = 0;
+
+ if ((t->flags & MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL) != 0)
+ trans_flags |= MAIL_TRANSACTION_EXTERNAL;
+ if ((t->flags & MAIL_INDEX_TRANSACTION_FLAG_SYNC) != 0)
+ trans_flags |= MAIL_TRANSACTION_SYNC;
+
+ if (mail_transaction_log_append_begin(log->index, trans_flags, &ctx) < 0)
+ return -1;
+ ret = mail_transaction_log_file_refresh(t, ctx);
+ if (ret > 0) T_BEGIN {
+ mail_index_transaction_finish(t);
+ mail_index_transaction_export(t, ctx, changes_r);
+ } T_END;
+
+ mail_transaction_log_get_head(log, &log_seq1, &log_offset1);
+ if (mail_transaction_log_append_commit(&ctx) < 0 || ret < 0)
+ return -1;
+ mail_transaction_log_get_head(log, &log_seq2, &log_offset2);
+ i_assert(log_seq1 == log_seq2);
+
+ if (t->reset) {
+ /* get rid of the old index. it might just confuse readers,
+ especially if it's broken. */
+ i_unlink_if_exists(log->index->filepath);
+ }
+
+ *commit_size_r = log_offset2 - log_offset1;
+
+ if ((t->flags & MAIL_INDEX_TRANSACTION_FLAG_HIDE) != 0 &&
+ log_offset1 != log_offset2) {
+ /* mark the area covered by this transaction hidden */
+ mail_index_view_add_hidden_transaction(t->view, log_seq1,
+ log_offset1, log_offset2 - log_offset1);
+ }
+ return 0;
+}
+
+static int mail_index_transaction_commit_v(struct mail_index_transaction *t,
+ struct mail_index_transaction_commit_result *result_r)
+{
+ struct mail_index *index = t->view->index;
+ bool changed;
+ int ret;
+
+ i_assert(t->first_new_seq >
+ mail_index_view_get_messages_count(t->view));
+
+ changed = MAIL_INDEX_TRANSACTION_HAS_CHANGES(t) || t->reset;
+ ret = !changed ? 0 :
+ mail_index_transaction_commit_real(t, &result_r->commit_size,
+ &result_r->changes_mask);
+ mail_transaction_log_get_head(index->log, &result_r->log_file_seq,
+ &result_r->log_file_offset);
+
+ if (ret == 0 && !index->syncing && changed) {
+ /* if we're committing a normal transaction, we want to
+ have those changes in the index mapping immediately. this
+ is especially important when committing cache offset
+ updates.
+
+ however if we're syncing the index now, the mapping must
+ be done later as MAIL_INDEX_SYNC_HANDLER_FILE so that
+ expunge handlers get run for the newly expunged messages
+ (and sync handlers that require HANDLER_FILE as well). */
+ index->sync_commit_result = result_r;
+ mail_index_refresh(index);
+ index->sync_commit_result = NULL;
+ }
+
+ mail_index_transaction_unref(&t);
+ return ret;
+}
+
+static void mail_index_transaction_rollback_v(struct mail_index_transaction *t)
+{
+ mail_index_transaction_unref(&t);
+}
+
+int mail_index_transaction_commit(struct mail_index_transaction **t)
+{
+ struct mail_index_transaction_commit_result result;
+
+ return mail_index_transaction_commit_full(t, &result);
+}
+
+int mail_index_transaction_commit_full(struct mail_index_transaction **_t,
+ struct mail_index_transaction_commit_result *result_r)
+{
+ struct mail_index_transaction *t = *_t;
+ struct mail_index *index = t->view->index;
+ bool index_undeleted = t->index_undeleted;
+
+ if (mail_index_view_is_inconsistent(t->view)) {
+ mail_index_set_error_nolog(index, "View is inconsistent");
+ mail_index_transaction_rollback(_t);
+ return -1;
+ }
+ if (!index_undeleted && !t->commit_deleted_index) {
+ if (t->view->index->index_deleted ||
+ (t->view->index->index_delete_requested &&
+ !t->view->index->syncing)) {
+ /* no further changes allowed */
+ mail_index_set_error_nolog(index, "Index is marked deleted");
+ mail_index_transaction_rollback(_t);
+ return -1;
+ }
+ }
+
+ *_t = NULL;
+ i_zero(result_r);
+ if (t->v.commit(t, result_r) < 0)
+ return -1;
+
+ if (index_undeleted) {
+ index->index_deleted = FALSE;
+ index->index_delete_requested = FALSE;
+ }
+ return 0;
+}
+
+void mail_index_transaction_rollback(struct mail_index_transaction **_t)
+{
+ struct mail_index_transaction *t = *_t;
+
+ *_t = NULL;
+ t->v.rollback(t);
+}
+
+static struct mail_index_transaction_vfuncs trans_vfuncs = {
+ mail_index_transaction_reset_v,
+ mail_index_transaction_commit_v,
+ mail_index_transaction_rollback_v
+};
+
+struct mail_index_transaction *
+mail_index_transaction_begin(struct mail_index_view *view,
+ enum mail_index_transaction_flags flags)
+{
+ struct mail_index_transaction *t;
+
+ /* don't allow syncing view while there's ongoing transactions */
+ mail_index_view_ref(view);
+
+ t = i_new(struct mail_index_transaction, 1);
+ t->refcount = 1;
+ t->v = trans_vfuncs;
+ t->view = view;
+ t->flags = flags;
+
+ if (view->syncing) {
+ /* transaction view cannot work if new records are being added
+ in two places. make sure it doesn't happen. */
+ t->no_appends = TRUE;
+ t->first_new_seq = (uint32_t)-1;
+ } else {
+ t->first_new_seq =
+ mail_index_view_get_messages_count(t->view) + 1;
+ }
+
+ i_array_init(&t->module_contexts,
+ I_MIN(5, mail_index_module_register.id));
+ DLLIST_PREPEND(&view->transactions_list, t);
+
+ if (array_is_created(&hook_mail_index_transaction_created)) {
+ struct hook_build_context *ctx =
+ hook_build_init((void *)&t->v, sizeof(t->v));
+ hook_mail_index_transaction_created_t *callback;
+ array_foreach_elem(&hook_mail_index_transaction_created, callback) {
+ callback(t);
+ hook_build_update(ctx, t->vlast);
+ }
+ t->vlast = NULL;
+ hook_build_deinit(&ctx);
+ }
+ return t;
+}
diff --git a/src/lib-index/mail-index-util.c b/src/lib-index/mail-index-util.c
new file mode 100644
index 0000000..2b27ae1
--- /dev/null
+++ b/src/lib-index/mail-index-util.c
@@ -0,0 +1,138 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "bsearch-insert-pos.h"
+#include "mail-index-private.h"
+
+uint32_t mail_index_uint32_to_offset(uint32_t offset)
+{
+ i_assert(offset < 0x40000000);
+ i_assert((offset & 3) == 0);
+
+ offset >>= 2;
+ offset = 0x00000080 | ((offset & 0x0000007f)) |
+ 0x00008000 | ((offset & 0x00003f80) >> 7 << 8) |
+ 0x00800000 | ((offset & 0x001fc000) >> 14 << 16) |
+ 0x80000000 | ((offset & 0x0fe00000) >> 21 << 24);
+
+ return cpu32_to_be(offset);
+}
+
+uint32_t mail_index_offset_to_uint32(uint32_t offset)
+{
+ offset = be32_to_cpu(offset);
+
+ if ((offset & 0x80808080) != 0x80808080)
+ return 0;
+
+ return (((offset & 0x0000007f)) |
+ ((offset & 0x00007f00) >> 8 << 7) |
+ ((offset & 0x007f0000) >> 16 << 14) |
+ ((offset & 0x7f000000) >> 24 << 21)) << 2;
+}
+
+void mail_index_pack_num(uint8_t **p, uint32_t num)
+{
+ /* number continues as long as the highest bit is set */
+ while (num >= 0x80) {
+ **p = (num & 0x7f) | 0x80;
+ *p += 1;
+ num >>= 7;
+ }
+
+ **p = num;
+ *p += 1;
+}
+
+int mail_index_unpack_num(const uint8_t **p, const uint8_t *end,
+ uint32_t *num_r)
+{
+ const uint8_t *c = *p;
+ uint32_t value = 0;
+ unsigned int bits = 0;
+
+ for (;;) {
+ if (unlikely(c == end)) {
+ /* we should never see EOF */
+ *num_r = 0;
+ return -1;
+ }
+
+ value |= (*c & 0x7f) << bits;
+ if (*c < 0x80)
+ break;
+
+ bits += 7;
+ c++;
+ }
+
+ if (unlikely(bits >= 32)) {
+ /* broken input */
+ *p = end;
+ *num_r = 0;
+ return -1;
+ }
+
+ *p = c + 1;
+ *num_r = value;
+ return 0;
+}
+
+static int mail_index_seq_record_cmp(const uint32_t *key_seq,
+ const uint32_t *data_seq)
+{
+ return (int)*key_seq - (int)*data_seq;
+}
+
+bool mail_index_seq_array_lookup(const ARRAY_TYPE(seq_array) *array,
+ uint32_t seq, unsigned int *idx_r)
+{
+ return array_bsearch_insert_pos(array, &seq,
+ mail_index_seq_record_cmp, idx_r);
+}
+
+void mail_index_seq_array_alloc(ARRAY_TYPE(seq_array) *array,
+ size_t record_size)
+{
+ size_t aligned_record_size = (record_size + 3) & ~3U;
+
+ i_assert(!array_is_created(array));
+
+ array_create(array, default_pool,
+ sizeof(uint32_t) + aligned_record_size,
+ 1024 / (sizeof(uint32_t) + aligned_record_size));
+}
+
+bool mail_index_seq_array_add(ARRAY_TYPE(seq_array) *array, uint32_t seq,
+ const void *record, size_t record_size,
+ void *old_record)
+{
+ void *p;
+ unsigned int idx, aligned_record_size;
+
+ /* records need to be 32bit aligned */
+ aligned_record_size = (record_size + 3) & ~3U;
+
+ if (!array_is_created(array))
+ mail_index_seq_array_alloc(array, record_size);
+ i_assert(array->arr.element_size == sizeof(seq) + aligned_record_size);
+
+ if (mail_index_seq_array_lookup(array, seq, &idx)) {
+ /* already there, update */
+ p = array_idx_modifiable(array, idx);
+ if (old_record != NULL) {
+ /* save the old record before overwriting it */
+ memcpy(old_record, PTR_OFFSET(p, sizeof(seq)),
+ record_size);
+ }
+ memcpy(PTR_OFFSET(p, sizeof(seq)), record, record_size);
+ return TRUE;
+ } else {
+ /* insert */
+ p = array_insert_space(array, idx);
+ memcpy(p, &seq, sizeof(seq));
+ memcpy(PTR_OFFSET(p, sizeof(seq)), record, record_size);
+ return FALSE;
+ }
+}
diff --git a/src/lib-index/mail-index-util.h b/src/lib-index/mail-index-util.h
new file mode 100644
index 0000000..b61e16a
--- /dev/null
+++ b/src/lib-index/mail-index-util.h
@@ -0,0 +1,22 @@
+#ifndef MAIL_INDEX_UTIL_H
+#define MAIL_INDEX_UTIL_H
+
+ARRAY_DEFINE_TYPE(seq_array, uint32_t);
+
+uint32_t mail_index_uint32_to_offset(uint32_t offset);
+uint32_t mail_index_offset_to_uint32(uint32_t offset);
+
+#define MAIL_INDEX_PACK_MAX_SIZE ((sizeof(uint32_t) * 8 + 7) / 7)
+void mail_index_pack_num(uint8_t **p, uint32_t num);
+int mail_index_unpack_num(const uint8_t **p, const uint8_t *end,
+ uint32_t *num_r);
+
+bool mail_index_seq_array_lookup(const ARRAY_TYPE(seq_array) *array,
+ uint32_t seq, unsigned int *idx_r);
+void mail_index_seq_array_alloc(ARRAY_TYPE(seq_array) *array,
+ size_t record_size);
+bool mail_index_seq_array_add(ARRAY_TYPE(seq_array) *array, uint32_t seq,
+ const void *record, size_t record_size,
+ void *old_record) ATTR_NULL(5);
+
+#endif
diff --git a/src/lib-index/mail-index-view-private.h b/src/lib-index/mail-index-view-private.h
new file mode 100644
index 0000000..6e8b091
--- /dev/null
+++ b/src/lib-index/mail-index-view-private.h
@@ -0,0 +1,120 @@
+#ifndef MAIL_INDEX_VIEW_PRIVATE_H
+#define MAIL_INDEX_VIEW_PRIVATE_H
+
+#include "mail-index-private.h"
+
+struct mail_index_view_log_sync_area {
+ uint32_t log_file_seq;
+ unsigned int length;
+ uoff_t log_file_offset;
+};
+ARRAY_DEFINE_TYPE(view_log_sync_area, struct mail_index_view_log_sync_area);
+
+struct mail_index_view_vfuncs {
+ void (*close)(struct mail_index_view *view);
+ uint32_t (*get_messages_count)(struct mail_index_view *view);
+ const struct mail_index_header *
+ (*get_header)(struct mail_index_view *view);
+ const struct mail_index_record *
+ (*lookup_full)(struct mail_index_view *view, uint32_t seq,
+ struct mail_index_map **map_r, bool *expunged_r);
+ void (*lookup_uid)(struct mail_index_view *view, uint32_t seq,
+ uint32_t *uid_r);
+ void (*lookup_seq_range)(struct mail_index_view *view,
+ uint32_t first_uid, uint32_t last_uid,
+ uint32_t *first_seq_r, uint32_t *last_seq_r);
+ void (*lookup_first)(struct mail_index_view *view,
+ enum mail_flags flags, uint8_t flags_mask,
+ uint32_t *seq_r);
+ void (*lookup_keywords)(struct mail_index_view *view, uint32_t seq,
+ ARRAY_TYPE(keyword_indexes) *keyword_idx);
+ void (*lookup_ext_full)(struct mail_index_view *view, uint32_t seq,
+ uint32_t ext_id, struct mail_index_map **map_r,
+ const void **data_r, bool *expunged_r);
+ void (*get_header_ext)(struct mail_index_view *view,
+ struct mail_index_map *map, uint32_t ext_id,
+ const void **data_r, size_t *data_size_r);
+ bool (*ext_get_reset_id)(struct mail_index_view *view,
+ struct mail_index_map *map,
+ uint32_t ext_id, uint32_t *reset_id_r);
+};
+
+union mail_index_view_module_context {
+ struct mail_index_module_register *reg;
+};
+
+struct mail_index_view {
+ struct mail_index_view *prev, *next;
+ int refcount;
+
+ struct mail_index_view_vfuncs v;
+ struct mail_index *index;
+ struct mail_transaction_log_view *log_view;
+
+ /* Source location where the mail_index_view_open() call was done.
+ This helps debugging especially if a view is leaked. */
+ const char *source_filename;
+ unsigned int source_linenum;
+
+ /* Set the view inconsistent if this doesn't match mail_index.indexid */
+ uint32_t indexid;
+ /* Set the view inconsistent if this doesn't match
+ mail_index.inconsistency_id. */
+ unsigned int inconsistency_id;
+ uint64_t highest_modseq;
+
+ struct mail_index_map *map;
+ /* All mappings where we have returned records. They need to be kept
+ valid until view is synchronized. */
+ ARRAY(struct mail_index_map *) map_refs;
+
+ /* expunge <= head. The expunge seq/offset points to the log file
+ how far expunges have been synced. The head seq/offset points to
+ how far non-expunges have been synced. They're usually the same,
+ unless MAIL_INDEX_VIEW_SYNC_FLAG_NOEXPUNGES has been used. */
+ uint32_t log_file_expunge_seq, log_file_head_seq;
+ uoff_t log_file_expunge_offset, log_file_head_offset;
+
+ /* Transaction log areas which are returned as
+ mail_index_view_sync_rec.hidden=TRUE. Used to implement
+ MAIL_INDEX_TRANSACTION_FLAG_HIDE. */
+ ARRAY_TYPE(view_log_sync_area) syncs_hidden;
+
+ /* Module-specific contexts. */
+ ARRAY(union mail_index_view_module_context *) module_contexts;
+
+ /* Linked list of all transactions opened for the view. */
+ struct mail_index_transaction *transactions_list;
+
+ /* View is currently inconsistent. It can't be synced. */
+ bool inconsistent:1;
+ /* this view is being synced */
+ bool syncing:1;
+};
+
+struct mail_index_view *
+mail_index_view_open_with_map(struct mail_index *index,
+ struct mail_index_map *map);
+void mail_index_view_clone(struct mail_index_view *dest,
+ const struct mail_index_view *src,
+ const char *source_filename,
+ unsigned int source_linenum);
+#define mail_index_view_clone(dest, src) \
+ mail_index_view_clone(dest, src, __FILE__, __LINE__)
+
+struct mail_index_view *
+mail_index_view_dup_private(const struct mail_index_view *src,
+ const char *source_filename,
+ unsigned int source_linenum);
+#define mail_index_view_dup_private(src) \
+ mail_index_view_dup_private(src, __FILE__, __LINE__)
+void mail_index_view_ref(struct mail_index_view *view);
+void mail_index_view_unref_maps(struct mail_index_view *view);
+void mail_index_view_add_hidden_transaction(struct mail_index_view *view,
+ uint32_t log_file_seq,
+ uoff_t log_file_offset,
+ unsigned int length);
+
+struct mail_index_view *mail_index_dummy_view_open(struct mail_index *index);
+
+#endif
diff --git a/src/lib-index/mail-index-view-sync.c b/src/lib-index/mail-index-view-sync.c
new file mode 100644
index 0000000..d8a5793
--- /dev/null
+++ b/src/lib-index/mail-index-view-sync.c
@@ -0,0 +1,1045 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "buffer.h"
+#include "mail-index-view-private.h"
+#include "mail-index-sync-private.h"
+#include "mail-index-modseq.h"
+#include "mail-transaction-log.h"
+
+
+struct mail_index_view_sync_ctx {
+ struct mail_index_view *view;
+ enum mail_index_view_sync_flags flags;
+ struct mail_index_sync_map_ctx sync_map_ctx;
+
+ /* After syncing view, map is replaced with sync_new_map. */
+ struct mail_index_map *sync_new_map;
+
+ ARRAY_TYPE(seq_range) expunges;
+ unsigned int finish_min_msg_count;
+
+ const struct mail_transaction_header *hdr;
+ const void *data;
+
+ /* temporary variables while handling lost transaction logs: */
+ ARRAY_TYPE(keyword_indexes) lost_old_kw, lost_new_kw;
+ buffer_t *lost_kw_buf;
+ uint32_t lost_new_ext_idx;
+ /* result of lost transaction logs: */
+ ARRAY_TYPE(seq_range) lost_flags;
+ unsigned int lost_flag_idx;
+
+ size_t data_offset;
+ bool failed:1;
+ bool sync_map_update:1;
+ bool skipped_expunges:1;
+ bool last_read:1;
+ bool log_was_lost:1;
+ bool hidden:1;
+};
+
+static int
+view_sync_set_log_view_range(struct mail_index_view *view, bool sync_expunges,
+ bool *reset_r, bool *partial_sync_r,
+ const char **error_r)
+{
+ const struct mail_index_header *hdr = &view->index->map->hdr;
+ uint32_t start_seq, end_seq;
+ uoff_t start_offset, end_offset;
+ const char *reason;
+ int ret;
+
+ *partial_sync_r = FALSE;
+
+ if (sync_expunges) {
+ /* Sync everything after the last expunge syncing position.
+ We'll just skip over the non-expunge transaction records
+ that have already been synced previously. */
+ start_seq = view->log_file_expunge_seq;
+ start_offset = view->log_file_expunge_offset;
+ } else {
+ /* Sync only new changes since the last view sync. */
+ start_seq = view->log_file_head_seq;
+ start_offset = view->log_file_head_offset;
+ }
+ /* Sync the view up to the (already refreshed) index map. */
+ end_seq = hdr->log_file_seq;
+ end_offset = hdr->log_file_head_offset;
+
+ if (end_seq < view->log_file_head_seq ||
+ (end_seq == view->log_file_head_seq &&
+ end_offset < view->log_file_head_offset)) {
+ *error_r = t_strdup_printf(
+ "%s log position went backwards "
+ "(%u,%"PRIuUOFF_T" < %u,%"PRIuUOFF_T")",
+ view->index->filepath, end_seq, end_offset,
+ view->log_file_head_seq, view->log_file_head_offset);
+ return -1;
+ }
+
+ for (;;) {
+ /* the view begins from the first non-synced transaction */
+ ret = mail_transaction_log_view_set(view->log_view,
+ start_seq, start_offset,
+ end_seq, end_offset,
+ reset_r, &reason);
+ if (ret <= 0) {
+ *error_r = t_strdup_printf(
+ "Failed to map view for %s: %s",
+ view->index->filepath, reason);
+ return ret;
+ }
+
+ if (!*reset_r || sync_expunges)
+ break;
+
+ /* log was reset, but we don't want to sync expunges.
+ we can't do this, so sync only up to the reset. */
+ mail_transaction_log_view_get_prev_pos(view->log_view,
+ &end_seq, &end_offset);
+ end_seq--; end_offset = UOFF_T_MAX;
+ if (end_seq < start_seq) {
+ /* we have only this reset log */
+ mail_transaction_log_view_clear(view->log_view,
+ view->log_file_expunge_seq);
+ break;
+ }
+ *partial_sync_r = TRUE;
+ }
+ return 1;
+}
+
+static unsigned int
+view_sync_expunges2seqs(struct mail_index_view_sync_ctx *ctx)
+{
+ struct mail_index_view *view = ctx->view;
+ struct seq_range *src, *src_end, *dest;
+ unsigned int count, expunge_count = 0;
+ uint32_t prev_seq = 0;
+
+ /* convert UIDs to sequences */
+ src = dest = array_get_modifiable(&ctx->expunges, &count);
+ src_end = src + count;
+ for (; src != src_end; src++) {
+ if (!mail_index_lookup_seq_range(view, src->seq1, src->seq2,
+ &dest->seq1, &dest->seq2))
+ count--;
+ else {
+ i_assert(dest->seq1 > prev_seq);
+ prev_seq = dest->seq2;
+
+ expunge_count += dest->seq2 - dest->seq1 + 1;
+ dest++;
+ }
+ }
+ array_delete(&ctx->expunges, count,
+ array_count(&ctx->expunges) - count);
+ return expunge_count;
+}
+
+static void
+view_sync_add_expunge_range(ARRAY_TYPE(seq_range) *dest,
+ const struct seq_range *src, size_t src_size)
+{
+ unsigned int i, src_count;
+
+ i_assert(src_size % sizeof(*src) == 0);
+
+ src_count = src_size / sizeof(*src);
+ for (i = 0; i < src_count; i++)
+ seq_range_array_add_range(dest, src[i].seq1, src[i].seq2);
+}
+
+static void
+view_sync_add_expunge_guids(ARRAY_TYPE(seq_range) *dest,
+ const struct mail_transaction_expunge_guid *src,
+ size_t src_size)
+{
+ unsigned int i, src_count;
+
+ i_assert(src_size % sizeof(*src) == 0);
+
+ src_count = src_size / sizeof(*src);
+ for (i = 0; i < src_count; i++)
+ seq_range_array_add(dest, src[i].uid);
+}
+
+static int
+view_sync_get_expunges(struct mail_index_view_sync_ctx *ctx,
+ unsigned int *expunge_count_r)
+{
+ struct mail_index_view *view = ctx->view;
+ const struct mail_transaction_header *hdr;
+ const void *data;
+ int ret;
+
+ /* get a list of expunge transactions. there may be some that we have
+ already synced, but it doesn't matter because they'll get dropped
+ out when converting to sequences. the uid ranges' validity has
+ already been verified, so we can use them directly. */
+ mail_transaction_log_view_mark(view->log_view);
+ while ((ret = mail_transaction_log_view_next(view->log_view,
+ &hdr, &data)) > 0) {
+ if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0) {
+ /* skip expunge requests */
+ continue;
+ }
+ if ((hdr->type & MAIL_TRANSACTION_EXPUNGE_GUID) != 0) {
+ view_sync_add_expunge_guids(&ctx->expunges,
+ data, hdr->size);
+ } else if ((hdr->type & MAIL_TRANSACTION_EXPUNGE) != 0) {
+ view_sync_add_expunge_range(&ctx->expunges,
+ data, hdr->size);
+ }
+ }
+ mail_transaction_log_view_rewind(view->log_view);
+
+ *expunge_count_r = view_sync_expunges2seqs(ctx);
+ return ret;
+}
+
+static bool have_existing_expunges(struct mail_index_view *view,
+ const struct seq_range *range, size_t size)
+{
+ const struct seq_range *range_end;
+ uint32_t seq1, seq2;
+
+ range_end = CONST_PTR_OFFSET(range, size);
+ for (; range != range_end; range++) {
+ if (mail_index_lookup_seq_range(view, range->seq1, range->seq2,
+ &seq1, &seq2))
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static bool
+have_existing_guid_expunge(struct mail_index_view *view,
+ const struct mail_transaction_expunge_guid *expunges,
+ size_t size)
+{
+ const struct mail_transaction_expunge_guid *expunges_end;
+ uint32_t seq;
+
+ expunges_end = CONST_PTR_OFFSET(expunges, size);
+ for (; expunges != expunges_end; expunges++) {
+ if (mail_index_lookup_seq(view, expunges->uid, &seq))
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static bool view_sync_have_expunges(struct mail_index_view *view)
+{
+ const struct mail_transaction_header *hdr;
+ const void *data;
+ bool have_expunges = FALSE;
+ int ret;
+
+ if (mail_transaction_log_view_is_last(view->log_view))
+ return FALSE;
+
+ mail_transaction_log_view_mark(view->log_view);
+
+ while ((ret = mail_transaction_log_view_next(view->log_view,
+ &hdr, &data)) > 0) {
+ if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0) {
+ /* skip expunge requests */
+ continue;
+ }
+ if ((hdr->type & MAIL_TRANSACTION_EXPUNGE_GUID) != 0) {
+ /* we have an expunge. see if it still exists. */
+ if (have_existing_guid_expunge(view, data, hdr->size)) {
+ have_expunges = TRUE;
+ break;
+ }
+ } else if ((hdr->type & MAIL_TRANSACTION_EXPUNGE) != 0) {
+ /* we have an expunge. see if it still exists. */
+ if (have_existing_expunges(view, data, hdr->size)) {
+ have_expunges = TRUE;
+ break;
+ }
+ }
+ }
+
+ mail_transaction_log_view_rewind(view->log_view);
+
+ /* handle failures as having expunges (which is safer).
+ we'll probably fail later. */
+ return ret < 0 || have_expunges;
+}
+
+static int uint_cmp(const void *p1, const void *p2)
+{
+ const unsigned int *u1 = p1, *u2 = p2;
+
+ if (*u1 < *u2)
+ return -1;
+ if (*u1 > *u2)
+ return 1;
+ return 0;
+}
+
+static bool view_sync_lost_keywords_equal(struct mail_index_view_sync_ctx *ctx)
+{
+ unsigned int *old_idx, *new_idx;
+ unsigned int old_count, new_count;
+
+ old_idx = array_get_modifiable(&ctx->lost_old_kw, &old_count);
+ new_idx = array_get_modifiable(&ctx->lost_new_kw, &new_count);
+ if (old_count != new_count)
+ return FALSE;
+
+ qsort(old_idx, old_count, sizeof(*old_idx), uint_cmp);
+ qsort(new_idx, new_count, sizeof(*new_idx), uint_cmp);
+ return memcmp(old_idx, new_idx, old_count * sizeof(*old_idx)) == 0;
+}
+
+static int view_sync_update_keywords(struct mail_index_view_sync_ctx *ctx,
+ uint32_t uid)
+{
+ struct mail_transaction_header thdr;
+ struct mail_transaction_keyword_update kw_up;
+ const unsigned int *kw_idx;
+ const char *const *kw_names;
+ unsigned int i, count;
+
+ kw_idx = array_get(&ctx->lost_new_kw, &count);
+ if (count == 0)
+ return 0;
+ kw_names = array_front(&ctx->view->index->keywords);
+
+ i_zero(&thdr);
+ thdr.type = MAIL_TRANSACTION_KEYWORD_UPDATE | MAIL_TRANSACTION_EXTERNAL;
+ i_zero(&kw_up);
+ kw_up.modify_type = MODIFY_ADD;
+ /* add new flags one by one */
+ for (i = 0; i < count; i++) {
+ kw_up.name_size = strlen(kw_names[kw_idx[i]]);
+ buffer_set_used_size(ctx->lost_kw_buf, 0);
+ buffer_append(ctx->lost_kw_buf, &kw_up, sizeof(kw_up));
+ buffer_append(ctx->lost_kw_buf, kw_names[kw_idx[i]],
+ kw_up.name_size);
+ if (ctx->lost_kw_buf->used % 4 != 0) {
+ buffer_append_zero(ctx->lost_kw_buf,
+ 4 - ctx->lost_kw_buf->used % 4);
+ }
+ buffer_append(ctx->lost_kw_buf, &uid, sizeof(uid));
+ buffer_append(ctx->lost_kw_buf, &uid, sizeof(uid));
+
+ thdr.size = ctx->lost_kw_buf->used;
+ if (mail_index_sync_record(&ctx->sync_map_ctx, &thdr,
+ ctx->lost_kw_buf->data) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+static int view_sync_apply_lost_changes(struct mail_index_view_sync_ctx *ctx,
+ uint32_t old_seq, uint32_t new_seq)
+{
+ struct mail_index_map *old_map = ctx->view->map;
+ struct mail_index_map *new_map = ctx->view->index->map;
+ const struct mail_index_record *old_rec, *new_rec;
+ struct mail_transaction_header thdr;
+ const struct mail_index_ext *ext;
+ const uint64_t *modseqp;
+ uint64_t new_modseq;
+ bool changed = FALSE;
+
+ old_rec = MAIL_INDEX_REC_AT_SEQ(old_map, old_seq);
+ new_rec = MAIL_INDEX_REC_AT_SEQ(new_map, new_seq);
+
+ i_zero(&thdr);
+ if (old_rec->flags != new_rec->flags) {
+ struct mail_transaction_flag_update flag_update;
+
+ /* check this before syncing the record, since it updates
+ old_rec. */
+ if ((old_rec->flags & MAIL_INDEX_FLAGS_MASK) !=
+ (new_rec->flags & MAIL_INDEX_FLAGS_MASK))
+ changed = TRUE;
+
+ thdr.type = MAIL_TRANSACTION_FLAG_UPDATE |
+ MAIL_TRANSACTION_EXTERNAL;
+ thdr.size = sizeof(flag_update);
+
+ i_zero(&flag_update);
+ flag_update.uid1 = flag_update.uid2 = new_rec->uid;
+ flag_update.add_flags = new_rec->flags;
+ flag_update.remove_flags = ENUM_NEGATE(new_rec->flags) & 0xff;
+ if (mail_index_sync_record(&ctx->sync_map_ctx, &thdr,
+ &flag_update) < 0)
+ return -1;
+ }
+
+ mail_index_map_lookup_keywords(old_map, old_seq, &ctx->lost_old_kw);
+ mail_index_map_lookup_keywords(new_map, new_seq, &ctx->lost_new_kw);
+ if (!view_sync_lost_keywords_equal(ctx)) {
+ struct mail_transaction_keyword_reset kw_reset;
+
+ thdr.type = MAIL_TRANSACTION_KEYWORD_RESET |
+ MAIL_TRANSACTION_EXTERNAL;
+ thdr.size = sizeof(kw_reset);
+
+ /* remove all old flags by resetting them */
+ i_zero(&kw_reset);
+ kw_reset.uid1 = kw_reset.uid2 = new_rec->uid;
+ if (mail_index_sync_record(&ctx->sync_map_ctx, &thdr,
+ &kw_reset) < 0)
+ return -1;
+
+ if (view_sync_update_keywords(ctx, new_rec->uid) < 0)
+ return -1;
+ changed = TRUE;
+ }
+
+ if (changed) {
+ /* flags or keywords changed */
+ } else if (ctx->view->highest_modseq != 0 &&
+ ctx->lost_new_ext_idx != (uint32_t)-1) {
+ /* if modseq has changed include this message in changed flags
+ list, even if we didn't see any changes above. */
+ ext = array_idx(&new_map->extensions, ctx->lost_new_ext_idx);
+ modseqp = CONST_PTR_OFFSET(new_rec, ext->record_offset);
+ new_modseq = *modseqp;
+
+ if (new_modseq > ctx->view->highest_modseq)
+ changed = TRUE;
+ }
+
+ /* without modseqs lost_flags isn't updated perfectly correctly, because
+ by the time we're comparing old flags it may have changed from what
+ we last sent to the client (because the map is shared). This could
+ be avoided by always keeping a private copy of the map in the view,
+ but that's a waste of memory for as rare of a problem as this. */
+ if (changed)
+ seq_range_array_add(&ctx->lost_flags, new_rec->uid);
+ return 0;
+}
+
+static int
+view_sync_get_log_lost_changes(struct mail_index_view_sync_ctx *ctx,
+ unsigned int *expunge_count_r)
+{
+ struct mail_index_view *view = ctx->view;
+ struct mail_index_map *old_map = view->map;
+ struct mail_index_map *new_map = view->index->map;
+ const unsigned int old_count = old_map->hdr.messages_count;
+ const unsigned int new_count = new_map->hdr.messages_count;
+ const struct mail_index_record *old_rec, *new_rec;
+ struct mail_transaction_header thdr;
+ uint32_t seqi, seqj;
+
+ /* we don't update the map in the same order as it's typically done.
+ map->rec_map may already have some messages appended that we don't
+ want. get an atomic map to make sure these get removed. */
+ (void)mail_index_sync_get_atomic_map(&ctx->sync_map_ctx);
+
+ if (!mail_index_map_get_ext_idx(new_map, view->index->modseq_ext_id,
+ &ctx->lost_new_ext_idx))
+ ctx->lost_new_ext_idx = (uint32_t)-1;
+
+ i_array_init(&ctx->lost_flags, 64);
+ t_array_init(&ctx->lost_old_kw, 32);
+ t_array_init(&ctx->lost_new_kw, 32);
+ ctx->lost_kw_buf = t_buffer_create(128);
+
+ /* handle expunges and sync flags */
+ seqi = seqj = 1;
+ while (seqi <= old_count && seqj <= new_count) {
+ old_rec = MAIL_INDEX_REC_AT_SEQ(old_map, seqi);
+ new_rec = MAIL_INDEX_REC_AT_SEQ(new_map, seqj);
+ if (old_rec->uid == new_rec->uid) {
+ /* message found - check if flags have changed */
+ if (view_sync_apply_lost_changes(ctx, seqi, seqj) < 0)
+ return -1;
+ seqi++; seqj++;
+ } else if (old_rec->uid < new_rec->uid) {
+ /* message expunged */
+ seq_range_array_add(&ctx->expunges, old_rec->uid);
+ seqi++;
+ } else {
+ /* new message appeared out of nowhere */
+ mail_index_set_error(view->index,
+ "%s view is inconsistent: "
+ "uid=%u inserted in the middle of mailbox",
+ view->index->filepath, new_rec->uid);
+ return -1;
+ }
+ }
+ /* if there are old messages left, they're all expunged */
+ for (; seqi <= old_count; seqi++) {
+ old_rec = MAIL_INDEX_REC_AT_SEQ(old_map, seqi);
+ seq_range_array_add(&ctx->expunges, old_rec->uid);
+ }
+ /* if there are new messages left, they're all new messages */
+ thdr.type = MAIL_TRANSACTION_APPEND | MAIL_TRANSACTION_EXTERNAL;
+ thdr.size = sizeof(*new_rec);
+ for (; seqj <= new_count; seqj++) {
+ new_rec = MAIL_INDEX_REC_AT_SEQ(new_map, seqj);
+ if (mail_index_sync_record(&ctx->sync_map_ctx,
+ &thdr, new_rec) < 0)
+ return -1;
+ mail_index_map_lookup_keywords(new_map, seqj,
+ &ctx->lost_new_kw);
+ if (view_sync_update_keywords(ctx, new_rec->uid) < 0)
+ return -1;
+ }
+ *expunge_count_r = view_sync_expunges2seqs(ctx);
+
+ /* we have no idea how far we've synced - make sure these aren't used */
+ old_map->hdr.log_file_seq = 0;
+ old_map->hdr.log_file_head_offset = 0;
+ old_map->hdr.log_file_tail_offset = 0;
+
+ if ((ctx->flags & MAIL_INDEX_VIEW_SYNC_FLAG_NOEXPUNGES) != 0) {
+ /* Expunges aren't wanted to be synced. Remember if we skipped
+ over any expunges. If yes, we must not update
+ log_file_expunge_seq/offset at the end of the view sync
+ so that a later sync can finish the expunges. */
+ array_clear(&ctx->expunges);
+ ctx->skipped_expunges = *expunge_count_r > 0;
+ }
+ /* After the view sync is finished, update
+ log_file_head_seq/offset, since we've synced everything
+ (except maybe the expunges) up to this point. */
+ view->log_file_head_seq = new_map->hdr.log_file_seq;
+ view->log_file_head_offset = new_map->hdr.log_file_head_offset;
+ return 0;
+}
+
+static int mail_index_view_sync_init_fix(struct mail_index_view_sync_ctx *ctx)
+{
+ struct mail_index_view *view = ctx->view;
+ uint32_t seq;
+ uoff_t offset;
+ const char *reason;
+ bool reset;
+ int ret;
+
+ /* replace the view's map */
+ view->index->map->refcount++;
+ mail_index_unmap(&view->map);
+ view->map = view->index->map;
+
+ /* update log positions */
+ view->log_file_head_seq = seq = view->map->hdr.log_file_seq;
+ view->log_file_head_offset = offset =
+ view->map->hdr.log_file_head_offset;
+
+ ret = mail_transaction_log_view_set(view->log_view, seq, offset,
+ seq, offset, &reset, &reason);
+ if (ret <= 0) {
+ mail_index_set_error(view->index, "Failed to fix view for %s: %s",
+ view->index->filepath, reason);
+ return ret;
+ }
+ view->inconsistent = FALSE;
+ return 0;
+}
+
+struct mail_index_view_sync_ctx *
+mail_index_view_sync_begin(struct mail_index_view *view,
+ enum mail_index_view_sync_flags flags)
+{
+ struct mail_index_view_sync_ctx *ctx;
+ struct mail_index_map *tmp_map;
+ unsigned int expunge_count = 0;
+ bool reset, partial_sync, sync_expunges, have_expunges;
+ const char *error;
+ int ret;
+
+ i_assert(!view->syncing);
+ i_assert(view->transactions_list == NULL);
+
+ view->syncing = TRUE;
+
+ /* Syncing the view invalidates all previous looked up records.
+ Unreference the mappings this view keeps because of them. */
+ mail_index_view_unref_maps(view);
+
+ ctx = i_new(struct mail_index_view_sync_ctx, 1);
+ ctx->view = view;
+ ctx->flags = flags;
+
+ sync_expunges = (flags & MAIL_INDEX_VIEW_SYNC_FLAG_NOEXPUNGES) == 0;
+ if (sync_expunges)
+ i_array_init(&ctx->expunges, 64);
+ if ((flags & MAIL_INDEX_VIEW_SYNC_FLAG_FIX_INCONSISTENT) != 0) {
+ /* just get this view synced - don't return anything */
+ i_assert(sync_expunges);
+ if (mail_index_view_sync_init_fix(ctx) < 0)
+ ctx->failed = TRUE;
+ return ctx;
+ }
+ if (mail_index_view_is_inconsistent(view)) {
+ mail_index_set_error(view->index, "%s view is inconsistent",
+ view->index->filepath);
+ ctx->failed = TRUE;
+ return ctx;
+ }
+
+ ret = view_sync_set_log_view_range(view, sync_expunges, &reset,
+ &partial_sync, &error);
+ if (ret < 0) {
+ mail_index_set_error(view->index, "%s", error);
+ ctx->failed = TRUE;
+ return ctx;
+ }
+
+ if (ret == 0) {
+ /* Log the warning only when all expunges have been synced
+ by previous syncs. This way when there's a _FLAG_NOEXPUNGES
+ sync, there's no second warning logged when the expunges
+ finally are synced. */
+ if (view->log_file_expunge_seq == view->log_file_head_seq &&
+ view->log_file_expunge_offset == view->log_file_head_offset) {
+ e_warning(view->index->event,
+ "%s - generating missing logs", error);
+ }
+ ctx->log_was_lost = TRUE;
+ if (!sync_expunges)
+ i_array_init(&ctx->expunges, 64);
+ mail_index_sync_map_init(&ctx->sync_map_ctx, view,
+ MAIL_INDEX_SYNC_HANDLER_VIEW);
+ ret = view_sync_get_log_lost_changes(ctx, &expunge_count);
+ mail_index_modseq_sync_end(&ctx->sync_map_ctx.modseq_ctx);
+ mail_index_sync_map_deinit(&ctx->sync_map_ctx);
+ if (ret < 0) {
+ mail_index_set_error(view->index,
+ "%s view syncing failed to apply changes",
+ view->index->filepath);
+ view->inconsistent = TRUE;
+ ctx->failed = TRUE;
+ return ctx;
+ }
+ have_expunges = expunge_count > 0;
+ } else if (sync_expunges) {
+ /* get list of all expunges first */
+ if (view_sync_get_expunges(ctx, &expunge_count) < 0) {
+ ctx->failed = TRUE;
+ return ctx;
+ }
+ have_expunges = expunge_count > 0;
+ } else if (view->log_file_expunge_seq == view->log_file_head_seq &&
+ view->log_file_expunge_offset == view->log_file_head_offset) {
+ /* Previous syncs haven't left any pending expunges. See if
+ this sync will. */
+ have_expunges = view_sync_have_expunges(view);
+ } else {
+ /* Expunges weren't synced in the previous sync either.
+ We already know there are missing expunges. */
+ ctx->skipped_expunges = TRUE;
+ have_expunges = TRUE;
+ }
+
+ ctx->finish_min_msg_count = reset ? 0 :
+ view->map->hdr.messages_count - expunge_count;
+ if (!reset)
+ ;
+ else if ((flags & MAIL_INDEX_VIEW_SYNC_FLAG_2ND_INDEX) != 0 &&
+ view->map->hdr.messages_count == 0) {
+ /* The secondary index is still empty, so it may have
+ just been created for the first time. This is
+ expected, so it shouldn't cause the view to become
+ inconsistent. */
+ if (mail_index_view_sync_init_fix(ctx) < 0)
+ ctx->failed = TRUE;
+ return ctx;
+ } else {
+ view->inconsistent = TRUE;
+ mail_index_set_error(view->index,
+ "%s reset, view is now inconsistent",
+ view->index->filepath);
+ ctx->failed = TRUE;
+ return ctx;
+ }
+
+ if (!have_expunges && !partial_sync) {
+ /* no expunges, we can just replace the map */
+ if (view->index->map->hdr.messages_count <
+ ctx->finish_min_msg_count) {
+ mail_index_set_error(view->index,
+ "Index %s lost messages without expunging "
+ "(%u -> %u)", view->index->filepath,
+ view->map->hdr.messages_count,
+ view->index->map->hdr.messages_count);
+ ctx->finish_min_msg_count = 0;
+ view->inconsistent = TRUE;
+ }
+
+ view->index->map->refcount++;
+ mail_index_unmap(&view->map);
+ view->map = view->index->map;
+ } else {
+ /* a) expunges seen. b) doing a partial sync because we saw
+ a reset.
+
+ Create a private map which we update. If we're syncing
+ expunges the map will finally be replaced with the head map
+ to remove the expunged messages. */
+ ctx->sync_map_update = TRUE;
+
+ if (view->map->refcount > 1) {
+ tmp_map = mail_index_map_clone(view->map);
+ mail_index_unmap(&view->map);
+ view->map = tmp_map;
+ }
+
+ if (sync_expunges) {
+ ctx->sync_new_map = view->index->map;
+ ctx->sync_new_map->refcount++;
+ }
+ }
+ mail_index_sync_map_init(&ctx->sync_map_ctx, view,
+ MAIL_INDEX_SYNC_HANDLER_VIEW);
+
+#ifdef DEBUG
+ mail_index_map_check(view->map);
+#endif
+ return ctx;
+}
+
+static bool
+view_sync_is_hidden(struct mail_index_view *view, uint32_t seq, uoff_t offset)
+{
+ const struct mail_index_view_log_sync_area *sync;
+
+ if (!array_is_created(&view->syncs_hidden))
+ return FALSE;
+
+ array_foreach(&view->syncs_hidden, sync) {
+ if (sync->log_file_offset <= offset &&
+ offset - sync->log_file_offset < sync->length &&
+ sync->log_file_seq == seq)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+static bool
+mail_index_view_sync_want(struct mail_index_view_sync_ctx *ctx,
+ const struct mail_transaction_header *hdr)
+{
+ struct mail_index_view *view = ctx->view;
+ uint32_t seq;
+ uoff_t offset, next_offset;
+
+ mail_transaction_log_view_get_prev_pos(view->log_view, &seq, &offset);
+ next_offset = offset + sizeof(*hdr) + hdr->size;
+
+ if ((hdr->type & (MAIL_TRANSACTION_EXPUNGE |
+ MAIL_TRANSACTION_EXPUNGE_GUID)) != 0 &&
+ (hdr->type & MAIL_TRANSACTION_EXTERNAL) != 0) {
+ if ((ctx->flags & MAIL_INDEX_VIEW_SYNC_FLAG_NOEXPUNGES) != 0) {
+ i_assert(!LOG_IS_BEFORE(seq, offset,
+ view->log_file_expunge_seq,
+ view->log_file_expunge_offset));
+ if (!ctx->skipped_expunges) {
+ view->log_file_expunge_seq = seq;
+ view->log_file_expunge_offset = offset;
+ ctx->skipped_expunges = TRUE;
+ }
+ return FALSE;
+ }
+ if (LOG_IS_BEFORE(seq, offset, view->log_file_expunge_seq,
+ view->log_file_expunge_offset)) {
+ /* already synced */
+ return FALSE;
+ }
+ }
+
+ if (LOG_IS_BEFORE(seq, offset, view->log_file_head_seq,
+ view->log_file_head_offset)) {
+ /* already synced */
+ return FALSE;
+ }
+
+ view->log_file_head_seq = seq;
+ view->log_file_head_offset = next_offset;
+ return TRUE;
+}
+
+static int
+mail_index_view_sync_get_next_transaction(struct mail_index_view_sync_ctx *ctx)
+{
+ struct mail_transaction_log_view *log_view = ctx->view->log_view;
+ struct mail_index_view *view = ctx->view;
+ const struct mail_transaction_header *hdr;
+ uint32_t seq;
+ uoff_t offset;
+ int ret;
+ bool synced_to_map;
+
+ do {
+ /* Get the next transaction from log. */
+ ret = mail_transaction_log_view_next(log_view, &ctx->hdr,
+ &ctx->data);
+ if (ret <= 0) {
+ if (ret < 0)
+ return -1;
+
+ ctx->hdr = NULL;
+ ctx->last_read = TRUE;
+ return 0;
+ }
+
+ hdr = ctx->hdr;
+ /* skip records we've already synced */
+ } while (!mail_index_view_sync_want(ctx, hdr));
+
+ mail_transaction_log_view_get_prev_pos(log_view, &seq, &offset);
+
+ /* If we started from a map that we didn't create ourself,
+ some of the transactions may already be synced. at the end
+ of this view sync we'll update file_seq=0 so that this check
+ always becomes FALSE for subsequent syncs. */
+ synced_to_map = view->map->hdr.log_file_seq != 0 &&
+ LOG_IS_BEFORE(seq, offset, view->map->hdr.log_file_seq,
+ view->map->hdr.log_file_head_offset);
+
+ /* Apply transaction to view's mapping if needed (meaning we
+ didn't just re-map the view to head mapping). */
+ if (ctx->sync_map_update && !synced_to_map) {
+ if ((hdr->type & (MAIL_TRANSACTION_EXPUNGE |
+ MAIL_TRANSACTION_EXPUNGE_GUID)) == 0) {
+ ret = mail_index_sync_record(&ctx->sync_map_ctx,
+ hdr, ctx->data);
+ }
+ if (ret < 0)
+ return -1;
+ }
+
+ ctx->hidden = view_sync_is_hidden(view, seq, offset);
+ return 1;
+}
+
+static bool
+mail_index_view_sync_get_rec(struct mail_index_view_sync_ctx *ctx,
+ struct mail_index_view_sync_rec *rec)
+{
+ const struct mail_transaction_header *hdr = ctx->hdr;
+ const void *data = ctx->data;
+
+ switch (hdr->type & MAIL_TRANSACTION_TYPE_MASK) {
+ case MAIL_TRANSACTION_FLAG_UPDATE: {
+ const struct mail_transaction_flag_update *update =
+ CONST_PTR_OFFSET(data, ctx->data_offset);
+
+ /* data contains mail_transaction_flag_update[] */
+ for (;;) {
+ ctx->data_offset += sizeof(*update);
+ if (!MAIL_TRANSACTION_FLAG_UPDATE_IS_INTERNAL(update))
+ break;
+
+ /* skip internal flag changes */
+ if (ctx->data_offset == ctx->hdr->size)
+ return FALSE;
+
+ update = CONST_PTR_OFFSET(data, ctx->data_offset);
+ }
+
+ if (update->add_flags != 0 || update->remove_flags != 0)
+ rec->type = MAIL_INDEX_VIEW_SYNC_TYPE_FLAGS;
+ else
+ rec->type = MAIL_INDEX_VIEW_SYNC_TYPE_MODSEQ;
+ rec->uid1 = update->uid1;
+ rec->uid2 = update->uid2;
+ break;
+ }
+ case MAIL_TRANSACTION_KEYWORD_UPDATE: {
+ const struct mail_transaction_keyword_update *update = data;
+ const uint32_t *uids;
+
+ /* data contains mail_transaction_keyword_update header,
+ the keyword name and an array of { uint32_t uid1, uid2; } */
+
+ if (ctx->data_offset == 0) {
+ /* skip over the header and name */
+ ctx->data_offset = sizeof(*update) + update->name_size;
+ if ((ctx->data_offset % 4) != 0)
+ ctx->data_offset += 4 - (ctx->data_offset % 4);
+ }
+
+ uids = CONST_PTR_OFFSET(data, ctx->data_offset);
+ rec->type = MAIL_INDEX_VIEW_SYNC_TYPE_FLAGS;
+ rec->uid1 = uids[0];
+ rec->uid2 = uids[1];
+
+ ctx->data_offset += sizeof(uint32_t) * 2;
+ break;
+ }
+ case MAIL_TRANSACTION_KEYWORD_RESET: {
+ const struct mail_transaction_keyword_reset *reset =
+ CONST_PTR_OFFSET(data, ctx->data_offset);
+
+ /* data contains mail_transaction_keyword_reset[] */
+ rec->type = MAIL_INDEX_VIEW_SYNC_TYPE_FLAGS;
+ rec->uid1 = reset->uid1;
+ rec->uid2 = reset->uid2;
+ ctx->data_offset += sizeof(*reset);
+ break;
+ }
+ default:
+ ctx->hdr = NULL;
+ return FALSE;
+ }
+
+ rec->hidden = ctx->hidden;
+ return TRUE;
+}
+
+static bool
+mail_index_view_sync_next_lost(struct mail_index_view_sync_ctx *ctx,
+ struct mail_index_view_sync_rec *sync_rec)
+{
+ const struct seq_range *range;
+ unsigned int count;
+
+ range = array_get(&ctx->lost_flags, &count);
+ if (ctx->lost_flag_idx == count) {
+ ctx->last_read = TRUE;
+ return FALSE;
+ }
+
+ sync_rec->type = MAIL_INDEX_VIEW_SYNC_TYPE_FLAGS;
+ sync_rec->uid1 = range[ctx->lost_flag_idx].seq1;
+ sync_rec->uid2 = range[ctx->lost_flag_idx].seq2;
+ sync_rec->hidden = FALSE;
+ ctx->lost_flag_idx++;
+ return TRUE;
+}
+
+bool mail_index_view_sync_next(struct mail_index_view_sync_ctx *ctx,
+ struct mail_index_view_sync_rec *sync_rec)
+{
+ int ret;
+
+ if (ctx->log_was_lost)
+ return mail_index_view_sync_next_lost(ctx, sync_rec);
+
+ do {
+ if (ctx->hdr == NULL || ctx->data_offset == ctx->hdr->size) {
+ ret = mail_index_view_sync_get_next_transaction(ctx);
+ if (ret <= 0) {
+ if (ret < 0)
+ ctx->failed = TRUE;
+ return FALSE;
+ }
+
+ ctx->data_offset = 0;
+ }
+ } while (!mail_index_view_sync_get_rec(ctx, sync_rec));
+
+ return TRUE;
+}
+
+void mail_index_view_sync_get_expunges(struct mail_index_view_sync_ctx *ctx,
+ const ARRAY_TYPE(seq_range) **expunges_r)
+{
+ *expunges_r = &ctx->expunges;
+}
+
+static void
+mail_index_view_sync_clean_log_syncs(struct mail_index_view *view)
+{
+ const struct mail_index_view_log_sync_area *syncs;
+ unsigned int i, count;
+
+ if (!array_is_created(&view->syncs_hidden))
+ return;
+
+ /* Clean up to view's tail */
+ syncs = array_get(&view->syncs_hidden, &count);
+ for (i = 0; i < count; i++) {
+ if ((syncs[i].log_file_offset +
+ syncs[i].length > view->log_file_expunge_offset &&
+ syncs[i].log_file_seq == view->log_file_expunge_seq) ||
+ syncs[i].log_file_seq > view->log_file_expunge_seq)
+ break;
+ }
+ if (i > 0)
+ array_delete(&view->syncs_hidden, 0, i);
+}
+
+int mail_index_view_sync_commit(struct mail_index_view_sync_ctx **_ctx,
+ bool *delayed_expunges_r)
+{
+ struct mail_index_view_sync_ctx *ctx = *_ctx;
+ struct mail_index_view *view = ctx->view;
+ int ret = ctx->failed ? -1 : 0;
+
+ i_assert(view->syncing);
+
+ *_ctx = NULL;
+ *delayed_expunges_r = ctx->skipped_expunges;
+
+ if ((!ctx->last_read || view->inconsistent) &&
+ (ctx->flags & MAIL_INDEX_VIEW_SYNC_FLAG_FIX_INCONSISTENT) == 0) {
+ /* we didn't sync everything */
+ view->inconsistent = TRUE;
+ ret = -1;
+ }
+ if (ctx->sync_map_ctx.modseq_ctx != NULL)
+ mail_index_modseq_sync_end(&ctx->sync_map_ctx.modseq_ctx);
+
+ if (ctx->sync_new_map != NULL) {
+ mail_index_unmap(&view->map);
+ view->map = ctx->sync_new_map;
+ } else if (ctx->sync_map_update) {
+ /* log offsets have no meaning in views. make sure they're not
+ tried to be used wrong by setting them to zero. */
+ view->map->hdr.log_file_seq = 0;
+ view->map->hdr.log_file_head_offset = 0;
+ view->map->hdr.log_file_tail_offset = 0;
+ }
+
+ i_assert(view->map->hdr.messages_count >= ctx->finish_min_msg_count);
+
+ if (!ctx->skipped_expunges) {
+ view->log_file_expunge_seq = view->log_file_head_seq;
+ view->log_file_expunge_offset = view->log_file_head_offset;
+ }
+
+ if (ctx->sync_map_ctx.view != NULL)
+ mail_index_sync_map_deinit(&ctx->sync_map_ctx);
+ mail_index_view_sync_clean_log_syncs(ctx->view);
+
+#ifdef DEBUG
+ mail_index_map_check(view->map);
+#endif
+
+ /* set log view to empty range so unneeded memory gets freed */
+ mail_transaction_log_view_clear(view->log_view,
+ view->log_file_expunge_seq);
+
+ if (array_is_created(&ctx->expunges))
+ array_free(&ctx->expunges);
+ if (array_is_created(&ctx->lost_flags))
+ array_free(&ctx->lost_flags);
+
+ view->highest_modseq = mail_index_map_modseq_get_highest(view->map);
+ view->syncing = FALSE;
+ i_free(ctx);
+ return ret;
+}
+
+void mail_index_view_add_hidden_transaction(struct mail_index_view *view,
+ uint32_t log_file_seq,
+ uoff_t log_file_offset,
+ unsigned int length)
+{
+ struct mail_index_view_log_sync_area *area;
+
+ if (!array_is_created(&view->syncs_hidden))
+ i_array_init(&view->syncs_hidden, 32);
+
+ area = array_append_space(&view->syncs_hidden);
+ area->log_file_seq = log_file_seq;
+ area->log_file_offset = log_file_offset;
+ area->length = length;
+}
diff --git a/src/lib-index/mail-index-view.c b/src/lib-index/mail-index-view.c
new file mode 100644
index 0000000..0850de1
--- /dev/null
+++ b/src/lib-index/mail-index-view.c
@@ -0,0 +1,651 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "buffer.h"
+#include "llist.h"
+#include "mail-index-view-private.h"
+#include "mail-transaction-log.h"
+
+#undef mail_index_view_clone
+#undef mail_index_view_dup_private
+
+struct mail_index_view *
+mail_index_view_dup_private(const struct mail_index_view *src,
+ const char *source_filename,
+ unsigned int source_linenum)
+{
+ struct mail_index_view *view;
+ struct mail_index_map *map;
+
+ view = i_new(struct mail_index_view, 1);
+ mail_index_view_clone(view, src, source_filename, source_linenum);
+
+ map = mail_index_map_clone(view->map);
+ mail_index_unmap(&view->map);
+ view->map = map;
+ return view;
+}
+
+void mail_index_view_clone(struct mail_index_view *dest,
+ const struct mail_index_view *src,
+ const char *source_filename,
+ unsigned int source_linenum)
+{
+ i_zero(dest);
+ dest->refcount = 1;
+ dest->v = src->v;
+ dest->index = src->index;
+ if (src->log_view != NULL) {
+ dest->log_view =
+ mail_transaction_log_view_open(src->index->log);
+ }
+
+ dest->indexid = src->indexid;
+ dest->inconsistency_id = src->inconsistency_id;
+ dest->map = src->map;
+ if (dest->map != NULL)
+ dest->map->refcount++;
+
+ dest->log_file_expunge_seq = src->log_file_expunge_seq;
+ dest->log_file_expunge_offset = src->log_file_expunge_offset;
+ dest->log_file_head_seq = src->log_file_head_seq;
+ dest->log_file_head_offset = src->log_file_head_offset;
+
+ i_array_init(&dest->module_contexts,
+ I_MIN(5, mail_index_module_register.id));
+
+ dest->source_filename = source_filename;
+ dest->source_linenum = source_linenum;
+
+ DLLIST_PREPEND(&dest->index->views, dest);
+}
+
+void mail_index_view_ref(struct mail_index_view *view)
+{
+ view->refcount++;
+}
+
+static void view_close(struct mail_index_view *view)
+{
+ i_assert(view->refcount == 0);
+ i_assert(view->index->views != NULL);
+
+ DLLIST_REMOVE(&view->index->views, view);
+
+ mail_transaction_log_view_close(&view->log_view);
+
+ if (array_is_created(&view->syncs_hidden))
+ array_free(&view->syncs_hidden);
+ mail_index_unmap(&view->map);
+ if (array_is_created(&view->map_refs)) {
+ mail_index_view_unref_maps(view);
+ array_free(&view->map_refs);
+ }
+ array_free(&view->module_contexts);
+ i_free(view);
+}
+
+bool mail_index_view_is_inconsistent(struct mail_index_view *view)
+{
+ if (view->index->indexid != view->indexid ||
+ view->index->inconsistency_id != view->inconsistency_id)
+ view->inconsistent = TRUE;
+ return view->inconsistent;
+}
+
+struct mail_index *mail_index_view_get_index(struct mail_index_view *view)
+{
+ return view->index;
+}
+
+bool mail_index_view_have_transactions(struct mail_index_view *view)
+{
+ return view->transactions_list != NULL;
+}
+
+static void mail_index_view_ref_map(struct mail_index_view *view,
+ struct mail_index_map *map)
+{
+ struct mail_index_map *const *maps;
+ unsigned int i, count;
+
+ if (array_is_created(&view->map_refs)) {
+ maps = array_get(&view->map_refs, &count);
+
+ /* if map is already referenced, do nothing */
+ for (i = 0; i < count; i++) {
+ if (maps[i] == map)
+ return;
+ }
+ } else {
+ i_array_init(&view->map_refs, 4);
+ }
+
+ /* reference the given mapping. the reference is dropped when the view
+ is synchronized or closed. */
+ map->refcount++;
+ array_push_back(&view->map_refs, &map);
+}
+
+void mail_index_view_unref_maps(struct mail_index_view *view)
+{
+ struct mail_index_map **maps;
+ unsigned int i, count;
+
+ if (!array_is_created(&view->map_refs))
+ return;
+
+ maps = array_get_modifiable(&view->map_refs, &count);
+ for (i = 0; i < count; i++)
+ mail_index_unmap(&maps[i]);
+
+ array_clear(&view->map_refs);
+}
+
+static uint32_t view_get_messages_count(struct mail_index_view *view)
+{
+ return view->map->hdr.messages_count;
+}
+
+static const struct mail_index_header *
+view_get_header(struct mail_index_view *view)
+{
+ return &view->map->hdr;
+}
+
+static const struct mail_index_record *
+view_lookup_full(struct mail_index_view *view, uint32_t seq,
+ struct mail_index_map **map_r, bool *expunged_r)
+{
+ static struct mail_index_record broken_rec;
+ struct mail_index_map *map;
+ const struct mail_index_record *rec, *head_rec;
+
+ i_assert(seq > 0 && seq <= mail_index_view_get_messages_count(view));
+
+ /* look up the record */
+ rec = MAIL_INDEX_REC_AT_SEQ(view->map, seq);
+ if (unlikely(rec->uid == 0)) {
+ if (!view->inconsistent) {
+ mail_index_set_error(view->index,
+ "Corrupted Index file %s: Record [%u].uid=0",
+ view->index->filepath, seq);
+ (void)mail_index_fsck(view->index);
+ view->inconsistent = TRUE;
+ }
+
+ /* we'll need to return something so the caller doesn't crash */
+ *map_r = view->map;
+ if (expunged_r != NULL)
+ *expunged_r = TRUE;
+ return &broken_rec;
+ }
+ if (view->map == view->index->map) {
+ /* view's mapping is latest. we can use it directly. */
+ *map_r = view->map;
+ if (expunged_r != NULL)
+ *expunged_r = FALSE;
+ return rec;
+ }
+
+ /* look up the record from head mapping. it may contain some changes.
+
+ start looking up from the same sequence as in the old view.
+ if there are no expunges, it's there. otherwise it's somewhere
+ before (since records can't be inserted).
+
+ usually there are only a few expunges, so just going downwards from
+ our initial sequence position is probably faster than binary
+ search. */
+ if (seq > view->index->map->hdr.messages_count)
+ seq = view->index->map->hdr.messages_count;
+ if (seq == 0) {
+ /* everything is expunged from head. use the old record. */
+ *map_r = view->map;
+ if (expunged_r != NULL)
+ *expunged_r = TRUE;
+ return rec;
+ }
+
+ map = view->index->map;
+ do {
+ head_rec = MAIL_INDEX_REC_AT_SEQ(map, seq);
+ if (head_rec->uid <= rec->uid)
+ break;
+ } while (--seq > 0);
+
+ if (head_rec->uid == rec->uid) {
+ /* found it. use it. reference the index mapping so that the
+ returned record doesn't get invalidated after next sync. */
+ mail_index_view_ref_map(view, view->index->map);
+ *map_r = view->index->map;
+ if (expunged_r != NULL)
+ *expunged_r = FALSE;
+ return head_rec;
+ } else {
+ /* expunged from head. use the old record. */
+ *map_r = view->map;
+ if (expunged_r != NULL)
+ *expunged_r = TRUE;
+ return rec;
+ }
+}
+
+static void view_lookup_uid(struct mail_index_view *view, uint32_t seq,
+ uint32_t *uid_r)
+{
+ i_assert(seq > 0 && seq <= mail_index_view_get_messages_count(view));
+
+ *uid_r = MAIL_INDEX_REC_AT_SEQ(view->map, seq)->uid;
+}
+
+static void view_lookup_seq_range(struct mail_index_view *view,
+ uint32_t first_uid, uint32_t last_uid,
+ uint32_t *first_seq_r, uint32_t *last_seq_r)
+{
+ mail_index_map_lookup_seq_range(view->map, first_uid, last_uid,
+ first_seq_r, last_seq_r);
+}
+
+static void view_lookup_first(struct mail_index_view *view,
+ enum mail_flags flags, uint8_t flags_mask,
+ uint32_t *seq_r)
+{
+#define LOW_UPDATE(x) \
+ STMT_START { if ((x) > low_uid) low_uid = x; } STMT_END
+ const struct mail_index_header *hdr = &view->map->hdr;
+ const struct mail_index_record *rec;
+ uint32_t seq, seq2, low_uid = 1;
+
+ *seq_r = 0;
+
+ if ((flags_mask & MAIL_SEEN) != 0 && (flags & MAIL_SEEN) == 0)
+ LOW_UPDATE(hdr->first_unseen_uid_lowwater);
+ if ((flags_mask & MAIL_DELETED) != 0 && (flags & MAIL_DELETED) != 0)
+ LOW_UPDATE(hdr->first_deleted_uid_lowwater);
+
+ if (low_uid == 1)
+ seq = 1;
+ else {
+ if (!mail_index_lookup_seq_range(view, low_uid, hdr->next_uid,
+ &seq, &seq2))
+ return;
+ }
+
+ i_assert(hdr->messages_count <= view->map->rec_map->records_count);
+ for (; seq <= hdr->messages_count; seq++) {
+ rec = MAIL_INDEX_REC_AT_SEQ(view->map, seq);
+ if ((rec->flags & flags_mask) == (uint8_t)flags) {
+ *seq_r = seq;
+ break;
+ }
+ }
+}
+
+static void
+mail_index_data_lookup_keywords(struct mail_index_map *map,
+ const unsigned char *data,
+ ARRAY_TYPE(keyword_indexes) *keyword_idx)
+{
+ const unsigned int *keyword_idx_map;
+ unsigned int i, j, keyword_count, index_idx;
+ uint32_t idx, hdr_size;
+ uint16_t record_size, record_align;
+
+ array_clear(keyword_idx);
+ if (data == NULL) {
+ /* no keywords at all in index */
+ return;
+ }
+ (void)mail_index_ext_get_size(map, map->index->keywords_ext_id,
+ &hdr_size, &record_size,
+ &record_align);
+
+ /* keyword_idx_map[] contains file => index keyword mapping */
+ if (!array_is_created(&map->keyword_idx_map))
+ return;
+
+ keyword_idx_map = array_get(&map->keyword_idx_map, &keyword_count);
+ for (i = 0; i < record_size; i++) {
+ /* first do the quick check to see if there's keywords at all */
+ if (data[i] == 0)
+ continue;
+
+ idx = i * CHAR_BIT;
+ for (j = 0; j < CHAR_BIT; j++, idx++) {
+ if ((data[i] & (1 << j)) == 0)
+ continue;
+
+ if (idx >= keyword_count) {
+ /* extra bits set in keyword bytes.
+ shouldn't happen, but just ignore. */
+ break;
+ }
+
+ index_idx = keyword_idx_map[idx];
+ array_push_back(keyword_idx, &index_idx);
+ }
+ }
+}
+
+static void view_lookup_keywords(struct mail_index_view *view, uint32_t seq,
+ ARRAY_TYPE(keyword_indexes) *keyword_idx)
+{
+ struct mail_index_map *map;
+ const void *data;
+
+ mail_index_lookup_ext_full(view, seq, view->index->keywords_ext_id,
+ &map, &data, NULL);
+ mail_index_data_lookup_keywords(map, data, keyword_idx);
+}
+
+static const void *
+view_map_lookup_ext_full(struct mail_index_map *map,
+ const struct mail_index_record *rec, uint32_t ext_id)
+{
+ const struct mail_index_ext *ext;
+ uint32_t idx;
+
+ if (!mail_index_map_get_ext_idx(map, ext_id, &idx))
+ return NULL;
+
+ ext = array_idx(&map->extensions, idx);
+ return ext->record_offset == 0 ? NULL :
+ CONST_PTR_OFFSET(rec, ext->record_offset);
+}
+
+static void
+view_lookup_ext_full(struct mail_index_view *view, uint32_t seq,
+ uint32_t ext_id, struct mail_index_map **map_r,
+ const void **data_r, bool *expunged_r)
+{
+ const struct mail_index_record *rec;
+
+ rec = view->v.lookup_full(view, seq, map_r, expunged_r);
+ *data_r = view_map_lookup_ext_full(*map_r, rec, ext_id);
+}
+
+static void view_get_header_ext(struct mail_index_view *view,
+ struct mail_index_map *map, uint32_t ext_id,
+ const void **data_r, size_t *data_size_r)
+{
+ const struct mail_index_ext *ext;
+ uint32_t idx;
+
+ if (map == NULL) {
+ /* no mapping given, use head mapping */
+ map = view->index->map;
+ }
+
+ if (!mail_index_map_get_ext_idx(map, ext_id, &idx)) {
+ /* extension doesn't exist in this index file */
+ *data_r = NULL;
+ *data_size_r = 0;
+ return;
+ }
+
+ ext = array_idx(&map->extensions, idx);
+ *data_r = MAIL_INDEX_MAP_HDR_OFFSET(map, ext->hdr_offset);
+ *data_size_r = ext->hdr_size;
+}
+
+static bool view_ext_get_reset_id(struct mail_index_view *view ATTR_UNUSED,
+ struct mail_index_map *map,
+ uint32_t ext_id, uint32_t *reset_id_r)
+{
+ const struct mail_index_ext *ext;
+ uint32_t idx;
+
+ if (!mail_index_map_get_ext_idx(map, ext_id, &idx))
+ return FALSE;
+
+ ext = array_idx(&map->extensions, idx);
+ *reset_id_r = ext->reset_id;
+ return TRUE;
+}
+
+void mail_index_view_close(struct mail_index_view **_view)
+{
+ struct mail_index_view *view = *_view;
+
+ *_view = NULL;
+ if (--view->refcount > 0)
+ return;
+
+ i_assert(view->transactions_list == NULL);
+
+ view->v.close(view);
+}
+
+uint32_t mail_index_view_get_messages_count(struct mail_index_view *view)
+{
+ return view->v.get_messages_count(view);
+}
+
+const struct mail_index_header *
+mail_index_get_header(struct mail_index_view *view)
+{
+ return view->v.get_header(view);
+}
+
+const struct mail_index_record *
+mail_index_lookup(struct mail_index_view *view, uint32_t seq)
+{
+ struct mail_index_map *map;
+
+ return mail_index_lookup_full(view, seq, &map, NULL);
+}
+
+const struct mail_index_record *
+mail_index_lookup_full(struct mail_index_view *view, uint32_t seq,
+ struct mail_index_map **map_r, bool *expunged_r)
+{
+ return view->v.lookup_full(view, seq, map_r, expunged_r);
+}
+
+bool mail_index_is_expunged(struct mail_index_view *view, uint32_t seq)
+{
+ struct mail_index_map *map;
+ bool expunged;
+
+ (void)view->v.lookup_full(view, seq, &map, &expunged);
+ return expunged;
+}
+
+void mail_index_map_lookup_keywords(struct mail_index_map *map, uint32_t seq,
+ ARRAY_TYPE(keyword_indexes) *keyword_idx)
+{
+ const struct mail_index_ext *ext;
+ const struct mail_index_record *rec;
+ const void *data;
+ uint32_t idx;
+
+ if (!mail_index_map_get_ext_idx(map, map->index->keywords_ext_id, &idx))
+ data = NULL;
+ else {
+ rec = MAIL_INDEX_REC_AT_SEQ(map, seq);
+ ext = array_idx(&map->extensions, idx);
+ data = ext->record_offset == 0 ? NULL :
+ CONST_PTR_OFFSET(rec, ext->record_offset);
+ }
+ mail_index_data_lookup_keywords(map, data, keyword_idx);
+}
+
+void mail_index_lookup_keywords(struct mail_index_view *view, uint32_t seq,
+ ARRAY_TYPE(keyword_indexes) *keyword_idx)
+{
+ view->v.lookup_keywords(view, seq, keyword_idx);
+}
+
+void mail_index_lookup_view_flags(struct mail_index_view *view, uint32_t seq,
+ enum mail_flags *flags_r,
+ ARRAY_TYPE(keyword_indexes) *keyword_idx)
+{
+ const struct mail_index_record *rec;
+ const unsigned char *keyword_data;
+
+ i_assert(seq > 0 && seq <= mail_index_view_get_messages_count(view));
+
+ rec = MAIL_INDEX_REC_AT_SEQ(view->map, seq);
+ *flags_r = rec->flags;
+
+ keyword_data = view_map_lookup_ext_full(view->map, rec,
+ view->index->keywords_ext_id);
+ mail_index_data_lookup_keywords(view->map, keyword_data, keyword_idx);
+}
+
+void mail_index_lookup_uid(struct mail_index_view *view, uint32_t seq,
+ uint32_t *uid_r)
+{
+ view->v.lookup_uid(view, seq, uid_r);
+}
+
+bool mail_index_lookup_seq_range(struct mail_index_view *view,
+ uint32_t first_uid, uint32_t last_uid,
+ uint32_t *first_seq_r, uint32_t *last_seq_r)
+{
+ view->v.lookup_seq_range(view, first_uid, last_uid,
+ first_seq_r, last_seq_r);
+ return *first_seq_r != 0;
+}
+
+bool mail_index_lookup_seq(struct mail_index_view *view,
+ uint32_t uid, uint32_t *seq_r)
+{
+ view->v.lookup_seq_range(view, uid, uid, seq_r, seq_r);
+ return *seq_r != 0;
+}
+
+void mail_index_lookup_first(struct mail_index_view *view,
+ enum mail_flags flags, uint8_t flags_mask,
+ uint32_t *seq_r)
+{
+ view->v.lookup_first(view, flags, flags_mask, seq_r);
+}
+
+void mail_index_lookup_ext(struct mail_index_view *view, uint32_t seq,
+ uint32_t ext_id, const void **data_r,
+ bool *expunged_r)
+{
+ struct mail_index_map *map;
+
+ mail_index_lookup_ext_full(view, seq, ext_id, &map, data_r, expunged_r);
+}
+
+void mail_index_lookup_ext_full(struct mail_index_view *view, uint32_t seq,
+ uint32_t ext_id, struct mail_index_map **map_r,
+ const void **data_r, bool *expunged_r)
+{
+ view->v.lookup_ext_full(view, seq, ext_id, map_r, data_r, expunged_r);
+}
+
+void mail_index_get_header_ext(struct mail_index_view *view, uint32_t ext_id,
+ const void **data_r, size_t *data_size_r)
+{
+ view->v.get_header_ext(view, NULL, ext_id, data_r, data_size_r);
+}
+
+void mail_index_map_get_header_ext(struct mail_index_view *view,
+ struct mail_index_map *map, uint32_t ext_id,
+ const void **data_r, size_t *data_size_r)
+{
+ view->v.get_header_ext(view, map, ext_id, data_r, data_size_r);
+}
+
+bool mail_index_ext_get_reset_id(struct mail_index_view *view,
+ struct mail_index_map *map,
+ uint32_t ext_id, uint32_t *reset_id_r)
+{
+ return view->v.ext_get_reset_id(view, map, ext_id, reset_id_r);
+}
+
+void mail_index_ext_get_size(struct mail_index_map *map, uint32_t ext_id,
+ uint32_t *hdr_size_r, uint16_t *record_size_r,
+ uint16_t *record_align_r)
+{
+ const struct mail_index_ext *ext;
+ uint32_t idx;
+
+ i_assert(map != NULL);
+
+ if (!mail_index_map_get_ext_idx(map, ext_id, &idx)) {
+ /* extension doesn't exist in this index file */
+ *hdr_size_r = 0;
+ *record_size_r = 0;
+ *record_align_r = 0;
+ return;
+ }
+
+ ext = array_idx(&map->extensions, idx);
+ *hdr_size_r = ext->hdr_size;
+ *record_size_r = ext->record_size;
+ *record_align_r = ext->record_align;
+}
+
+static struct mail_index_view_vfuncs view_vfuncs = {
+ view_close,
+ view_get_messages_count,
+ view_get_header,
+ view_lookup_full,
+ view_lookup_uid,
+ view_lookup_seq_range,
+ view_lookup_first,
+ view_lookup_keywords,
+ view_lookup_ext_full,
+ view_get_header_ext,
+ view_ext_get_reset_id
+};
+
+struct mail_index_view *
+mail_index_view_open_with_map(struct mail_index *index,
+ struct mail_index_map *map)
+{
+ struct mail_index_view *view;
+
+ view = i_new(struct mail_index_view, 1);
+ view->refcount = 1;
+ view->v = view_vfuncs;
+ view->index = index;
+ view->log_view = mail_transaction_log_view_open(index->log);
+
+ view->indexid = index->indexid;
+ view->inconsistency_id = index->inconsistency_id;
+ view->map = map;
+ view->map->refcount++;
+
+ view->log_file_expunge_seq = view->log_file_head_seq =
+ view->map->hdr.log_file_seq;
+ view->log_file_expunge_offset = view->log_file_head_offset =
+ view->map->hdr.log_file_head_offset;
+
+ i_array_init(&view->module_contexts,
+ I_MIN(5, mail_index_module_register.id));
+ DLLIST_PREPEND(&index->views, view);
+ return view;
+}
+
+#undef mail_index_view_open
+struct mail_index_view *
+mail_index_view_open(struct mail_index *index,
+ const char *source_filename, unsigned int source_linenum)
+{
+ struct mail_index_view *view;
+
+ view = mail_index_view_open_with_map(index, index->map);
+ /* these can be used to debug mail_index_view_close() leaks */
+ view->source_filename = source_filename;
+ view->source_linenum = source_linenum;
+ return view;
+}
+
+const struct mail_index_ext *
+mail_index_view_get_ext(struct mail_index_view *view, uint32_t ext_id)
+{
+ uint32_t idx;
+
+ if (!mail_index_map_get_ext_idx(view->map, ext_id, &idx))
+ return NULL;
+
+ return array_idx(&view->map->extensions, idx);
+}
diff --git a/src/lib-index/mail-index-write.c b/src/lib-index/mail-index-write.c
new file mode 100644
index 0000000..689cb9c
--- /dev/null
+++ b/src/lib-index/mail-index-write.c
@@ -0,0 +1,215 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "nfs-workarounds.h"
+#include "read-full.h"
+#include "write-full.h"
+#include "ostream.h"
+#include "mail-index-private.h"
+#include "mail-transaction-log-private.h"
+
+#include <stdio.h>
+
+#define MAIL_INDEX_MIN_UPDATE_SIZE 1024
+/* if we're updating >= count-n messages, recreate the index */
+#define MAIL_INDEX_MAX_OVERWRITE_NEG_SEQ_COUNT 10
+
+static int mail_index_create_backup(struct mail_index *index)
+{
+ const char *backup_path, *tmp_backup_path;
+ int ret;
+
+ if (index->fd != -1) {
+ /* we very much want to avoid creating a backup file that
+ hasn't been written to disk yet */
+ if (fdatasync(index->fd) < 0) {
+ mail_index_set_error(index, "fdatasync(%s) failed: %m",
+ index->filepath);
+ return -1;
+ }
+ }
+
+ backup_path = t_strconcat(index->filepath, ".backup", NULL);
+ tmp_backup_path = t_strconcat(backup_path, ".tmp", NULL);
+ ret = link(index->filepath, tmp_backup_path);
+ if (ret < 0 && errno == EEXIST) {
+ if (unlink(tmp_backup_path) < 0 && errno != ENOENT) {
+ mail_index_set_error(index, "unlink(%s) failed: %m",
+ tmp_backup_path);
+ return -1;
+ }
+ ret = link(index->filepath, tmp_backup_path);
+ }
+ if (ret < 0) {
+ if (errno == ENOENT) {
+ /* no dovecot.index file, ignore */
+ return 0;
+ }
+ mail_index_set_error(index, "link(%s, %s) failed: %m",
+ index->filepath, tmp_backup_path);
+ return -1;
+ }
+
+ if (rename(tmp_backup_path, backup_path) < 0) {
+ mail_index_set_error(index, "rename(%s, %s) failed: %m",
+ tmp_backup_path, backup_path);
+ return -1;
+ }
+ return 0;
+}
+
+static int mail_index_recreate(struct mail_index *index)
+{
+ struct mail_index_map *map = index->map;
+ struct ostream *output;
+ unsigned int base_size;
+ const char *path;
+ int ret = 0, fd;
+
+ i_assert(!MAIL_INDEX_IS_IN_MEMORY(index));
+ i_assert(map->hdr.indexid == index->indexid);
+ i_assert((map->hdr.flags & MAIL_INDEX_HDR_FLAG_CORRUPTED) == 0);
+ i_assert(index->indexid != 0);
+
+ fd = mail_index_create_tmp_file(index, index->filepath, &path);
+ if (fd == -1)
+ return -1;
+
+ output = o_stream_create_fd_file(fd, 0, FALSE);
+ o_stream_cork(output);
+
+ struct mail_index_header hdr = map->hdr;
+ /* Write tail_offset the same as head_offset. This function must not
+ be called unless it's safe to do this. See the explanations in
+ mail_index_sync_commit(). */
+ hdr.log_file_tail_offset = hdr.log_file_head_offset;
+
+ base_size = I_MIN(hdr.base_header_size, sizeof(hdr));
+ o_stream_nsend(output, &hdr, base_size);
+ o_stream_nsend(output, MAIL_INDEX_MAP_HDR_OFFSET(map, base_size),
+ hdr.header_size - base_size);
+ o_stream_nsend(output, map->rec_map->records,
+ map->rec_map->records_count * hdr.record_size);
+ if (o_stream_finish(output) < 0) {
+ mail_index_file_set_syscall_error(index, path, "write()");
+ ret = -1;
+ }
+ o_stream_destroy(&output);
+
+ if (ret == 0 && index->set.fsync_mode != FSYNC_MODE_NEVER) {
+ if (fdatasync(fd) < 0) {
+ mail_index_file_set_syscall_error(index, path,
+ "fdatasync()");
+ ret = -1;
+ }
+ }
+
+ if (close(fd) < 0) {
+ mail_index_file_set_syscall_error(index, path, "close()");
+ ret = -1;
+ }
+
+ if ((index->flags & MAIL_INDEX_OPEN_FLAG_KEEP_BACKUPS) != 0)
+ (void)mail_index_create_backup(index);
+
+ if (ret == 0 && rename(path, index->filepath) < 0) {
+ mail_index_set_error(index, "rename(%s, %s) failed: %m",
+ path, index->filepath);
+ ret = -1;
+ }
+
+ if (ret < 0)
+ i_unlink(path);
+ return ret;
+}
+
+static bool mail_index_should_recreate(struct mail_index *index)
+{
+ struct stat st1, st2;
+
+ if (nfs_safe_stat(index->filepath, &st1) < 0) {
+ if (errno != ENOENT) {
+ mail_index_set_syscall_error(index, "stat()");
+ return FALSE;
+ } else if (index->fd == -1) {
+ /* main index hasn't been created yet */
+ return TRUE;
+ } else {
+ /* mailbox was just deleted? don't log an error */
+ return FALSE;
+ }
+ }
+ if (index->fd == -1) {
+ /* main index was just created by another process */
+ return FALSE;
+ }
+ if (fstat(index->fd, &st2) < 0) {
+ if (!ESTALE_FSTAT(errno))
+ mail_index_set_syscall_error(index, "fstat()");
+ return FALSE;
+ }
+ if (st1.st_ino != st2.st_ino ||
+ !CMP_DEV_T(st1.st_dev, st2.st_dev)) {
+ /* Index has already been recreated since we last read it.
+ We can't trust our decisions about whether to recreate it. */
+ return FALSE;
+ }
+ return TRUE;
+}
+
+void mail_index_write(struct mail_index *index, bool want_rotate,
+ const char *reason)
+{
+ struct mail_index_header *hdr = &index->map->hdr;
+ bool rotated = FALSE;
+
+ i_assert(index->log_sync_locked);
+
+ if (index->readonly)
+ return;
+
+ /* rotate the .log before writing index, so the index will point to
+ the latest log. Note that it's the caller's responsibility to make
+ sure that the .log can be safely rotated (i.e. everything has been
+ synced). */
+ if (want_rotate) {
+ if (mail_transaction_log_rotate(index->log, FALSE) == 0) {
+ struct mail_transaction_log_file *file =
+ index->log->head;
+ /* Log rotation refreshes the index, which may cause the
+ map to change. Because we're locked, it's not
+ supposed to happen and will likely lead to an
+ assert-crash below, but we still need to make sure
+ we're using the latest map to do the checks. */
+ hdr = &index->map->hdr;
+ i_assert(file->hdr.prev_file_seq == hdr->log_file_seq);
+ i_assert(file->hdr.prev_file_offset == hdr->log_file_head_offset);
+ hdr->log_file_seq = file->hdr.file_seq;
+ hdr->log_file_head_offset =
+ hdr->log_file_tail_offset = file->hdr.hdr_size;
+ /* Assume .log.2 was created successfully. If it
+ wasn't, it just causes an extra stat() and gets
+ fixed later on. */
+ hdr->log2_rotate_time = ioloop_time;
+ rotated = TRUE;
+ }
+ }
+
+ if (MAIL_INDEX_IS_IN_MEMORY(index))
+ ;
+ else if (!rotated && !mail_index_should_recreate(index)) {
+ /* make sure we don't keep getting back in here */
+ index->reopen_main_index = TRUE;
+ } else {
+ if (mail_index_recreate(index) < 0) {
+ (void)mail_index_move_to_memory(index);
+ return;
+ }
+ event_set_name(index->event, "mail_index_recreated");
+ e_debug(index->event, "Recreated %s (file_seq=%u) because: %s",
+ index->filepath, hdr->log_file_seq, reason);
+ }
+
+ index->main_index_hdr_log_file_seq = hdr->log_file_seq;
+ index->main_index_hdr_log_file_tail_offset = hdr->log_file_tail_offset;
+}
diff --git a/src/lib-index/mail-index.c b/src/lib-index/mail-index.c
new file mode 100644
index 0000000..8f89309
--- /dev/null
+++ b/src/lib-index/mail-index.c
@@ -0,0 +1,1110 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "ioloop.h"
+#include "array.h"
+#include "buffer.h"
+#include "eacces-error.h"
+#include "hash.h"
+#include "str-sanitize.h"
+#include "mmap-util.h"
+#include "nfs-workarounds.h"
+#include "read-full.h"
+#include "write-full.h"
+#include "mail-index-alloc-cache.h"
+#include "mail-index-private.h"
+#include "mail-index-view-private.h"
+#include "mail-index-sync-private.h"
+#include "mail-index-modseq.h"
+#include "mail-transaction-log-private.h"
+#include "mail-transaction-log-view-private.h"
+#include "mail-cache.h"
+
+#include <stdio.h>
+#include <stddef.h>
+#include <time.h>
+#include <sys/stat.h>
+#include <ctype.h>
+
+struct mail_index_module_register mail_index_module_register = { 0 };
+
+struct event_category event_category_mail_index = {
+ .name = "mail-index",
+};
+
+static void mail_index_close_nonopened(struct mail_index *index);
+
+static const struct mail_index_optimization_settings default_optimization_set = {
+ .index = {
+ .rewrite_min_log_bytes = 8 * 1024,
+ .rewrite_max_log_bytes = 128 * 1024,
+ },
+ .log = {
+ .min_size = 32 * 1024,
+ .max_size = 1024 * 1024,
+ .min_age_secs = 5 * 60,
+ .log2_max_age_secs = 3600 * 24 * 2,
+ },
+ .cache = {
+ .unaccessed_field_drop_secs = 3600 * 24 * 30,
+ .record_max_size = 64 * 1024,
+ .max_size = 1024 * 1024 * 1024,
+ .purge_min_size = 32 * 1024,
+ .purge_delete_percentage = 20,
+ .purge_continued_percentage = 200,
+ .purge_header_continue_count = 4,
+ },
+};
+
+struct mail_index *mail_index_alloc(struct event *parent_event,
+ const char *dir, const char *prefix)
+{
+ struct mail_index *index;
+
+ index = i_new(struct mail_index, 1);
+ index->dir = i_strdup(dir);
+ index->prefix = i_strdup(prefix);
+ index->fd = -1;
+ index->event = event_create(parent_event);
+ event_add_category(index->event, &event_category_mail_index);
+
+ index->extension_pool =
+ pool_alloconly_create(MEMPOOL_GROWING"index extension", 1024);
+ p_array_init(&index->extensions, index->extension_pool, 5);
+ i_array_init(&index->module_contexts,
+ I_MIN(5, mail_index_module_register.id));
+
+ index->set.mode = 0600;
+ index->set.gid = (gid_t)-1;
+ index->set.lock_method = FILE_LOCK_METHOD_FCNTL;
+ index->set.max_lock_timeout_secs = UINT_MAX;
+ index->optimization_set = default_optimization_set;
+
+ index->keywords_ext_id =
+ mail_index_ext_register(index, MAIL_INDEX_EXT_KEYWORDS,
+ 128, 2, 1);
+ index->keywords_pool = pool_alloconly_create("keywords", 512);
+ i_array_init(&index->keywords, 16);
+ hash_table_create(&index->keywords_hash, index->keywords_pool, 0,
+ strcase_hash, strcasecmp);
+ index->log = mail_transaction_log_alloc(index);
+ mail_index_modseq_init(index);
+ return index;
+}
+
+void mail_index_free(struct mail_index **_index)
+{
+ struct mail_index *index = *_index;
+
+ *_index = NULL;
+
+ i_assert(index->open_count == 0);
+
+ mail_transaction_log_free(&index->log);
+ hash_table_destroy(&index->keywords_hash);
+ pool_unref(&index->extension_pool);
+ pool_unref(&index->keywords_pool);
+
+ array_free(&index->keywords);
+ array_free(&index->module_contexts);
+
+ event_unref(&index->event);
+ i_free(index->set.cache_dir);
+ i_free(index->set.ext_hdr_init_data);
+ i_free(index->set.gid_origin);
+ i_free(index->last_error.text);
+ i_free(index->dir);
+ i_free(index->prefix);
+ i_free(index->need_recreate);
+ i_free(index);
+}
+
+void mail_index_set_cache_dir(struct mail_index *index, const char *dir)
+{
+ i_free(index->set.cache_dir);
+ index->set.cache_dir = i_strdup(dir);
+}
+
+void mail_index_set_fsync_mode(struct mail_index *index,
+ enum fsync_mode mode,
+ enum mail_index_fsync_mask mask)
+{
+ index->set.fsync_mode = mode;
+ index->set.fsync_mask = mask;
+}
+
+bool mail_index_use_existing_permissions(struct mail_index *index)
+{
+ struct stat st;
+
+ if (MAIL_INDEX_IS_IN_MEMORY(index))
+ return FALSE;
+
+ if (stat(index->dir, &st) < 0) {
+ if (errno != ENOENT)
+ e_error(index->event, "stat(%s) failed: %m", index->dir);
+ return FALSE;
+ }
+
+ index->set.mode = st.st_mode & 0666;
+ if (S_ISDIR(st.st_mode) && (st.st_mode & S_ISGID) != 0) {
+ /* directory's GID is used automatically for new files */
+ index->set.gid = (gid_t)-1;
+ } else if ((st.st_mode & 0070) >> 3 == (st.st_mode & 0007)) {
+ /* group has same permissions as world, so don't bother
+ changing it */
+ index->set.gid = (gid_t)-1;
+ } else if (getegid() == st.st_gid) {
+ /* using our own gid, no need to change it */
+ index->set.gid = (gid_t)-1;
+ } else {
+ index->set.gid = st.st_gid;
+ }
+
+ i_free(index->set.gid_origin);
+ if (index->set.gid != (gid_t)-1)
+ index->set.gid_origin = i_strdup("preserved existing GID");
+ return TRUE;
+}
+
+void mail_index_set_permissions(struct mail_index *index,
+ mode_t mode, gid_t gid, const char *gid_origin)
+{
+ index->set.mode = mode & 0666;
+ index->set.gid = gid;
+
+ i_free(index->set.gid_origin);
+ index->set.gid_origin = i_strdup(gid_origin);
+}
+
+void mail_index_set_lock_method(struct mail_index *index,
+ enum file_lock_method lock_method,
+ unsigned int max_timeout_secs)
+{
+ index->set.lock_method = lock_method;
+ index->set.max_lock_timeout_secs = max_timeout_secs;
+}
+
+void mail_index_set_optimization_settings(struct mail_index *index,
+ const struct mail_index_optimization_settings *set)
+{
+ struct mail_index_optimization_settings *dest =
+ &index->optimization_set;
+
+ /* index */
+ if (set->index.rewrite_min_log_bytes != 0)
+ dest->index.rewrite_min_log_bytes = set->index.rewrite_min_log_bytes;
+ if (set->index.rewrite_max_log_bytes != 0)
+ dest->index.rewrite_max_log_bytes = set->index.rewrite_max_log_bytes;
+
+ /* log */
+ if (set->log.min_size != 0)
+ dest->log.min_size = set->log.min_size;
+ if (set->log.max_size != 0)
+ dest->log.max_size = set->log.max_size;
+ if (set->log.min_age_secs != 0)
+ dest->log.min_age_secs = set->log.min_age_secs;
+ if (set->log.log2_max_age_secs != 0)
+ dest->log.log2_max_age_secs = set->log.log2_max_age_secs;
+
+ /* cache */
+ if (set->cache.unaccessed_field_drop_secs != 0)
+ dest->cache.unaccessed_field_drop_secs =
+ set->cache.unaccessed_field_drop_secs;
+ if (set->cache.max_size != 0)
+ dest->cache.max_size = set->cache.max_size;
+ if (set->cache.purge_min_size != 0)
+ dest->cache.purge_min_size = set->cache.purge_min_size;
+ if (set->cache.purge_delete_percentage != 0)
+ dest->cache.purge_delete_percentage =
+ set->cache.purge_delete_percentage;
+ if (set->cache.purge_continued_percentage != 0)
+ dest->cache.purge_continued_percentage =
+ set->cache.purge_continued_percentage;
+ if (set->cache.purge_header_continue_count != 0)
+ dest->cache.purge_header_continue_count =
+ set->cache.purge_header_continue_count;
+ if (set->cache.record_max_size != 0)
+ dest->cache.record_max_size = set->cache.record_max_size;
+}
+
+void mail_index_set_ext_init_data(struct mail_index *index, uint32_t ext_id,
+ const void *data, size_t size)
+{
+ const struct mail_index_registered_ext *rext;
+
+ i_assert(index->set.ext_hdr_init_data == NULL ||
+ index->set.ext_hdr_init_id == ext_id);
+
+ rext = array_idx(&index->extensions, ext_id);
+ i_assert(rext->hdr_size == size);
+
+ index->set.ext_hdr_init_id = ext_id;
+ i_free(index->set.ext_hdr_init_data);
+ index->set.ext_hdr_init_data = i_malloc(size);
+ memcpy(index->set.ext_hdr_init_data, data, size);
+}
+
+bool mail_index_ext_name_is_valid(const char *name)
+{
+ size_t i;
+
+ for (i = 0; name[i] != '\0'; i++) {
+ if (!i_isalnum(name[i]) && name[i] != '-' && name[i] != '_' &&
+ name[i] != ' ')
+ return FALSE;
+
+ }
+ return i == 0 || i < MAIL_INDEX_EXT_NAME_MAX_LENGTH;
+}
+
+uint32_t mail_index_ext_register(struct mail_index *index, const char *name,
+ uint32_t default_hdr_size,
+ uint16_t default_record_size,
+ uint16_t default_record_align)
+{
+ struct mail_index_registered_ext rext;
+ uint32_t ext_id;
+
+ if (!mail_index_ext_name_is_valid(name))
+ i_panic("mail_index_ext_register(%s): Invalid name", name);
+
+ if (default_record_size != 0 && default_record_align == 0) {
+ i_panic("mail_index_ext_register(%s): "
+ "Invalid record alignment", name);
+ }
+
+ if (mail_index_ext_lookup(index, name, &ext_id))
+ return ext_id;
+
+ i_zero(&rext);
+ rext.name = p_strdup(index->extension_pool, name);
+ rext.index_idx = array_count(&index->extensions);
+ rext.hdr_size = default_hdr_size;
+ rext.record_size = default_record_size;
+ rext.record_align = default_record_align;
+
+ array_push_back(&index->extensions, &rext);
+ return rext.index_idx;
+}
+
+void mail_index_ext_register_resize_defaults(struct mail_index *index,
+ uint32_t ext_id,
+ uint32_t default_hdr_size,
+ uint16_t default_record_size,
+ uint16_t default_record_align)
+{
+ struct mail_index_registered_ext *rext;
+
+ rext = array_idx_modifiable(&index->extensions, ext_id);
+ rext->hdr_size = default_hdr_size;
+ rext->record_size = default_record_size;
+ rext->record_align = default_record_align;
+}
+
+bool mail_index_ext_lookup(struct mail_index *index, const char *name,
+ uint32_t *ext_id_r)
+{
+ const struct mail_index_registered_ext *extensions;
+ unsigned int i, count;
+
+ extensions = array_get(&index->extensions, &count);
+ for (i = 0; i < count; i++) {
+ if (strcmp(extensions[i].name, name) == 0) {
+ *ext_id_r = i;
+ return TRUE;
+ }
+ }
+
+ *ext_id_r = (uint32_t)-1;
+ return FALSE;
+}
+
+void mail_index_register_expunge_handler(struct mail_index *index,
+ uint32_t ext_id,
+ mail_index_expunge_handler_t *cb)
+{
+ struct mail_index_registered_ext *rext;
+
+ rext = array_idx_modifiable(&index->extensions, ext_id);
+ i_assert(rext->expunge_handler == NULL || rext->expunge_handler == cb);
+
+ rext->expunge_handler = cb;
+}
+
+void mail_index_unregister_expunge_handler(struct mail_index *index,
+ uint32_t ext_id)
+{
+ struct mail_index_registered_ext *rext;
+
+ rext = array_idx_modifiable(&index->extensions, ext_id);
+ i_assert(rext->expunge_handler != NULL);
+
+ rext->expunge_handler = NULL;
+}
+
+bool mail_index_keyword_lookup(struct mail_index *index,
+ const char *keyword, unsigned int *idx_r)
+{
+ char *key;
+ void *value;
+
+ /* keywords_hash keeps a name => index mapping of keywords.
+ Keywords are never removed from it, so the index values are valid
+ for the lifetime of the mail_index. */
+ if (hash_table_lookup_full(index->keywords_hash, keyword,
+ &key, &value)) {
+ *idx_r = POINTER_CAST_TO(value, unsigned int);
+ return TRUE;
+ }
+
+ *idx_r = UINT_MAX;
+ return FALSE;
+}
+
+void mail_index_keyword_lookup_or_create(struct mail_index *index,
+ const char *keyword,
+ unsigned int *idx_r)
+{
+ char *keyword_dup;
+
+ i_assert(*keyword != '\0');
+
+ if (mail_index_keyword_lookup(index, keyword, idx_r))
+ return;
+
+ keyword = keyword_dup = p_strdup(index->keywords_pool, keyword);
+ *idx_r = array_count(&index->keywords);
+
+ hash_table_insert(index->keywords_hash, keyword_dup,
+ POINTER_CAST(*idx_r));
+ array_push_back(&index->keywords, &keyword);
+
+ /* keep the array NULL-terminated, but the NULL itself invisible */
+ array_append_zero(&index->keywords);
+ array_pop_back(&index->keywords);
+}
+
+const ARRAY_TYPE(keywords) *mail_index_get_keywords(struct mail_index *index)
+{
+ return &index->keywords;
+}
+
+struct mail_keywords *
+mail_index_keywords_create(struct mail_index *index,
+ const char *const keywords[])
+{
+ struct mail_keywords *k;
+ unsigned int src, dest, i, count;
+
+ count = str_array_length(keywords);
+ if (count == 0) {
+ k = i_new(struct mail_keywords, 1);
+ k->index = index;
+ k->refcount = 1;
+ return k;
+ }
+
+ /* @UNSAFE */
+ k = i_malloc(MALLOC_ADD(sizeof(struct mail_keywords),
+ MALLOC_MULTIPLY(sizeof(k->idx[0]), count)));
+ k->index = index;
+ k->refcount = 1;
+
+ /* look up the keywords from index. they're never removed from there
+ so we can permanently store indexes to them. */
+ for (src = dest = 0; src < count; src++) {
+ mail_index_keyword_lookup_or_create(index, keywords[src],
+ &k->idx[dest]);
+ /* ignore if this is a duplicate */
+ for (i = 0; i < src; i++) {
+ if (k->idx[i] == k->idx[dest])
+ break;
+ }
+ if (i == src)
+ dest++;
+ }
+ k->count = dest;
+ return k;
+}
+
+struct mail_keywords *
+mail_index_keywords_create_from_indexes(struct mail_index *index,
+ const ARRAY_TYPE(keyword_indexes)
+ *keyword_indexes)
+{
+ struct mail_keywords *k;
+ const unsigned int *indexes;
+ unsigned int src, dest, i, count;
+
+ indexes = array_get(keyword_indexes, &count);
+ if (count == 0) {
+ k = i_new(struct mail_keywords, 1);
+ k->index = index;
+ k->refcount = 1;
+ return k;
+ }
+
+ /* @UNSAFE */
+ k = i_malloc(MALLOC_ADD(sizeof(struct mail_keywords),
+ MALLOC_MULTIPLY(sizeof(k->idx[0]), count)));
+ k->index = index;
+ k->refcount = 1;
+
+ /* copy but skip duplicates */
+ for (src = dest = 0; src < count; src++) {
+ for (i = 0; i < src; i++) {
+ if (k->idx[i] == indexes[src])
+ break;
+ }
+ if (i == src)
+ k->idx[dest++] = indexes[src];
+ }
+ k->count = dest;
+ return k;
+}
+
+void mail_index_keywords_ref(struct mail_keywords *keywords)
+{
+ keywords->refcount++;
+}
+
+void mail_index_keywords_unref(struct mail_keywords **_keywords)
+{
+ struct mail_keywords *keywords = *_keywords;
+
+ i_assert(keywords->refcount > 0);
+
+ *_keywords = NULL;
+ if (--keywords->refcount == 0)
+ i_free(keywords);
+}
+
+int mail_index_try_open_only(struct mail_index *index)
+{
+ i_assert(index->fd == -1);
+ i_assert(!MAIL_INDEX_IS_IN_MEMORY(index));
+
+ /* Note that our caller must close index->fd by itself. */
+ if (index->readonly)
+ errno = EACCES;
+ else {
+ index->fd = nfs_safe_open(index->filepath, O_RDWR);
+ index->readonly = FALSE;
+ }
+
+ if (index->fd == -1 && errno == EACCES) {
+ index->fd = open(index->filepath, O_RDONLY);
+ index->readonly = TRUE;
+ }
+
+ if (index->fd == -1) {
+ if (errno != ENOENT) {
+ mail_index_set_syscall_error(index, "open()");
+ return -1;
+ }
+
+ /* have to create it */
+ return 0;
+ }
+ return 1;
+}
+
+static int
+mail_index_try_open(struct mail_index *index)
+{
+ int ret;
+
+ i_assert(index->fd == -1);
+
+ if (MAIL_INDEX_IS_IN_MEMORY(index))
+ return 0;
+
+ ret = mail_index_map(index, MAIL_INDEX_SYNC_HANDLER_HEAD);
+ if (ret == 0 && !index->readonly) {
+ /* it's corrupted - recreate it */
+ if (index->fd != -1) {
+ if (close(index->fd) < 0)
+ mail_index_set_syscall_error(index, "close()");
+ index->fd = -1;
+ }
+ }
+ return ret;
+}
+
+int mail_index_create_tmp_file(struct mail_index *index,
+ const char *path_prefix, const char **path_r)
+{
+ mode_t old_mask;
+ const char *path;
+ int fd;
+
+ i_assert(!MAIL_INDEX_IS_IN_MEMORY(index));
+
+ path = *path_r = t_strconcat(path_prefix, ".tmp", NULL);
+ old_mask = umask(0);
+ fd = open(path, O_RDWR|O_CREAT|O_EXCL, index->set.mode);
+ umask(old_mask);
+ if (fd == -1 && errno == EEXIST) {
+ /* stale temp file. unlink and recreate rather than overwriting,
+ just to make sure locking problems won't cause corruption */
+ if (i_unlink(path) < 0)
+ return -1;
+ old_mask = umask(0);
+ fd = open(path, O_RDWR|O_CREAT|O_EXCL, index->set.mode);
+ umask(old_mask);
+ }
+ if (fd == -1) {
+ mail_index_file_set_syscall_error(index, path, "creat()");
+ return -1;
+ }
+
+ mail_index_fchown(index, fd, path);
+ return fd;
+}
+
+static const char *mail_index_get_cache_path(struct mail_index *index)
+{
+ const char *dir;
+
+ if (index->set.cache_dir != NULL)
+ dir = index->set.cache_dir;
+ else if (index->dir != NULL)
+ dir = index->dir;
+ else
+ return NULL;
+ return t_strconcat(dir, "/", index->prefix,
+ MAIL_CACHE_FILE_SUFFIX, NULL);
+}
+
+static int mail_index_open_files(struct mail_index *index,
+ enum mail_index_open_flags flags)
+{
+ int ret;
+
+ ret = mail_transaction_log_open(index->log);
+ if (ret == 0) {
+ if ((flags & MAIL_INDEX_OPEN_FLAG_CREATE) == 0)
+ return 0;
+
+ /* if dovecot.index exists, read it first so that we can get
+ the correct indexid and log sequence */
+ (void)mail_index_try_open(index);
+
+ if (index->indexid == 0) {
+ /* Create a new indexid for us. If we're opening index
+ into memory, index->map doesn't exist yet. */
+ index->indexid = ioloop_time;
+ index->initial_create = TRUE;
+ if (index->map != NULL)
+ index->map->hdr.indexid = index->indexid;
+ }
+
+ ret = mail_transaction_log_create(index->log, FALSE);
+ if (index->map != NULL) {
+ /* log creation could have changed it if someone else
+ just created it. */
+ index->map->hdr.indexid = index->indexid;
+ }
+ index->initial_create = FALSE;
+ }
+ if (ret >= 0) {
+ ret = index->map != NULL ? 1 : mail_index_try_open(index);
+ if (ret == 0 && !index->readonly) {
+ /* corrupted */
+ mail_transaction_log_close(index->log);
+ ret = mail_transaction_log_create(index->log, TRUE);
+ if (ret == 0) {
+ if (index->map != NULL)
+ mail_index_unmap(&index->map);
+ index->map = mail_index_map_alloc(index);
+ }
+ }
+ }
+ if (ret < 0) {
+ /* open/create failed, fallback to in-memory indexes */
+ if ((flags & MAIL_INDEX_OPEN_FLAG_CREATE) == 0)
+ return -1;
+
+ if (mail_index_move_to_memory(index) < 0)
+ return -1;
+ }
+
+ if (index->cache == NULL) {
+ const char *path = mail_index_get_cache_path(index);
+ index->cache = mail_cache_open_or_create_path(index, path);
+ }
+ return 1;
+}
+
+static int
+mail_index_open_opened(struct mail_index *index,
+ enum mail_index_open_flags flags)
+{
+ int ret;
+
+ i_assert(index->map != NULL);
+
+ if ((index->map->hdr.flags & MAIL_INDEX_HDR_FLAG_CORRUPTED) != 0) {
+ /* index was marked corrupted. we'll probably need to
+ recreate the files. */
+ mail_index_unmap(&index->map);
+ mail_index_close_file(index);
+ mail_transaction_log_close(index->log);
+ if ((ret = mail_index_open_files(index, flags)) <= 0)
+ return ret;
+ }
+
+ index->open_count++;
+ return 1;
+}
+
+int mail_index_open(struct mail_index *index, enum mail_index_open_flags flags)
+{
+ int ret;
+
+ if (index->open_count > 0) {
+ if ((ret = mail_index_open_opened(index, flags)) <= 0) {
+ /* doesn't exist and create flag not used */
+ }
+ return ret;
+ }
+
+ index->filepath = MAIL_INDEX_IS_IN_MEMORY(index) ?
+ i_strdup("(in-memory index)") :
+ i_strconcat(index->dir, "/", index->prefix, NULL);
+
+ mail_index_reset_error(index);
+ index->readonly = FALSE;
+ index->log_sync_locked = FALSE;
+ index->flags = flags;
+ index->readonly = (flags & MAIL_INDEX_OPEN_FLAG_READONLY) != 0;
+ if ((flags & MAIL_INDEX_OPEN_FLAG_DEBUG) != 0)
+ event_set_forced_debug(index->event, TRUE);
+ else
+ event_unset_forced_debug(index->event);
+
+ if ((flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0 &&
+ index->set.fsync_mode != FSYNC_MODE_ALWAYS)
+ i_fatal("nfs flush requires mail_fsync=always");
+ if ((flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0 &&
+ (flags & MAIL_INDEX_OPEN_FLAG_MMAP_DISABLE) == 0)
+ i_fatal("nfs flush requires mmap_disable=yes");
+
+ /* NOTE: increase open_count only after mail_index_open_files().
+ it's used elsewhere to check if we're doing an initial opening
+ of the index files */
+ if ((ret = mail_index_open_files(index, flags)) <= 0) {
+ /* doesn't exist and create flag not used */
+ mail_index_close_nonopened(index);
+ return ret;
+ }
+
+ index->open_count++;
+
+ if (index->log->head == NULL) {
+ mail_index_close(index);
+ mail_index_set_error(index, "Index is corrupted "
+ "(log->view->head == NULL)");
+ return -1;
+ }
+
+ i_assert(index->map != NULL);
+ mail_index_alloc_cache_index_opened(index);
+ return 1;
+}
+
+int mail_index_open_or_create(struct mail_index *index,
+ enum mail_index_open_flags flags)
+{
+ int ret;
+
+ flags |= MAIL_INDEX_OPEN_FLAG_CREATE;
+ ret = mail_index_open(index, flags);
+ i_assert(ret != 0);
+ return ret < 0 ? -1 : 0;
+}
+
+void mail_index_close_file(struct mail_index *index)
+{
+ if (index->fd != -1) {
+ if (close(index->fd) < 0)
+ mail_index_set_syscall_error(index, "close()");
+ index->fd = -1;
+ }
+}
+
+static void mail_index_close_nonopened(struct mail_index *index)
+{
+ i_assert(!index->syncing);
+
+ if (index->views != NULL) {
+ i_panic("Leaked view for index %s: Opened in %s:%u",
+ index->filepath, index->views->source_filename,
+ index->views->source_linenum);
+ }
+ i_assert(index->views == NULL);
+
+ if (index->map != NULL)
+ mail_index_unmap(&index->map);
+
+ mail_index_close_file(index);
+ mail_transaction_log_close(index->log);
+ if (index->cache != NULL)
+ mail_cache_free(&index->cache);
+
+ i_free_and_null(index->filepath);
+
+ index->indexid = 0;
+}
+
+void mail_index_close(struct mail_index *index)
+{
+ i_assert(index->open_count > 0);
+
+ mail_index_alloc_cache_index_closing(index);
+ if (--index->open_count == 0)
+ mail_index_close_nonopened(index);
+}
+
+int mail_index_unlink(struct mail_index *index)
+{
+ const char *path;
+ int last_errno = 0;
+
+ if (MAIL_INDEX_IS_IN_MEMORY(index) || index->readonly)
+ return 0;
+
+ /* main index */
+ if (unlink(index->filepath) < 0 && errno != ENOENT)
+ last_errno = errno;
+
+ /* logs */
+ path = t_strconcat(index->filepath, MAIL_TRANSACTION_LOG_SUFFIX, NULL);
+ if (unlink(path) < 0 && errno != ENOENT)
+ last_errno = errno;
+
+ path = t_strconcat(index->filepath,
+ MAIL_TRANSACTION_LOG_SUFFIX".2", NULL);
+ if (unlink(path) < 0 && errno != ENOENT)
+ last_errno = errno;
+
+ /* cache */
+ path = t_strconcat(index->filepath, MAIL_CACHE_FILE_SUFFIX, NULL);
+ if (unlink(path) < 0 && errno != ENOENT)
+ last_errno = errno;
+
+ if (last_errno == 0)
+ return 0;
+ else {
+ errno = last_errno;
+ return -1;
+ }
+}
+
+int mail_index_reopen_if_changed(struct mail_index *index, bool *reopened_r,
+ const char **reason_r)
+{
+ struct stat st1, st2;
+ int ret;
+
+ *reopened_r = FALSE;
+
+ if (MAIL_INDEX_IS_IN_MEMORY(index)) {
+ *reason_r = "in-memory index";
+ return 0;
+ }
+
+ if (index->fd == -1)
+ goto final;
+
+ if ((index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0)
+ nfs_flush_file_handle_cache(index->filepath);
+ if (nfs_safe_stat(index->filepath, &st2) < 0) {
+ if (errno == ENOENT) {
+ *reason_r = "index not found via stat()";
+ return 0;
+ }
+ mail_index_set_syscall_error(index, "stat()");
+ return -1;
+ }
+
+ if (fstat(index->fd, &st1) < 0) {
+ if (!ESTALE_FSTAT(errno)) {
+ mail_index_set_syscall_error(index, "fstat()");
+ return -1;
+ }
+ /* deleted/recreated, reopen */
+ *reason_r = "index is stale";
+ } else if (st1.st_ino == st2.st_ino &&
+ CMP_DEV_T(st1.st_dev, st2.st_dev)) {
+ /* the same file */
+ *reason_r = "index unchanged";
+ return 1;
+ } else {
+ *reason_r = "index inode changed";
+ }
+
+ /* new file, new locks. the old fd can keep its locks, they don't
+ matter anymore as no-one's going to modify the file. */
+ mail_index_close_file(index);
+
+final:
+ if ((ret = mail_index_try_open_only(index)) == 0)
+ *reason_r = "index not found via open()";
+ else if (ret > 0) {
+ *reason_r = "index opened";
+ *reopened_r = TRUE;
+ }
+ return ret;
+}
+
+int mail_index_refresh(struct mail_index *index)
+{
+ int ret;
+
+ ret = mail_index_map(index, MAIL_INDEX_SYNC_HANDLER_HEAD);
+ return ret <= 0 ? -1 : 0;
+}
+
+struct mail_cache *mail_index_get_cache(struct mail_index *index)
+{
+ return index->cache;
+}
+
+void mail_index_set_error(struct mail_index *index, const char *fmt, ...)
+{
+ va_list va;
+
+ i_free(index->last_error.text);
+
+ if (fmt == NULL)
+ index->last_error.text = NULL;
+ else {
+ va_start(va, fmt);
+ index->last_error.text = i_strdup_vprintf(fmt, va);
+ va_end(va);
+
+ e_error(index->event, "%s", index->last_error.text);
+ }
+}
+
+void mail_index_set_error_nolog(struct mail_index *index, const char *str)
+{
+ i_assert(str != NULL);
+
+ char *old_error = index->last_error.text;
+ index->last_error.text = i_strdup(str);
+ i_free(old_error);
+}
+
+bool mail_index_is_in_memory(struct mail_index *index)
+{
+ return MAIL_INDEX_IS_IN_MEMORY(index);
+}
+
+static void mail_index_set_as_in_memory(struct mail_index *index)
+{
+ i_free_and_null(index->dir);
+
+ i_free(index->filepath);
+ index->filepath = i_strdup("(in-memory index)");
+}
+
+int mail_index_move_to_memory(struct mail_index *index)
+{
+ struct mail_index_map *map;
+
+ if (MAIL_INDEX_IS_IN_MEMORY(index))
+ return index->map == NULL ? -1 : 0;
+
+ if ((index->flags & MAIL_INDEX_OPEN_FLAG_NEVER_IN_MEMORY) != 0)
+ return -1;
+
+ if (index->map == NULL) {
+ /* index was never even opened. just mark it as being in
+ memory and let the caller re-open the index. */
+ i_assert(index->fd == -1);
+ mail_index_set_as_in_memory(index);
+ return -1;
+ }
+
+ /* move index map to memory */
+ if (!MAIL_INDEX_MAP_IS_IN_MEMORY(index->map)) {
+ map = mail_index_map_clone(index->map);
+ mail_index_unmap(&index->map);
+ index->map = map;
+ }
+
+ if (index->log != NULL) {
+ /* move transaction log to memory */
+ if (mail_transaction_log_move_to_memory(index->log) < 0)
+ return -1;
+ }
+
+ if (index->fd != -1) {
+ if (close(index->fd) < 0)
+ mail_index_set_syscall_error(index, "close()");
+ index->fd = -1;
+ }
+ mail_index_set_as_in_memory(index);
+ return 0;
+}
+
+void mail_index_mark_corrupted(struct mail_index *index)
+{
+ index->indexid = 0;
+
+ index->map->hdr.flags |= MAIL_INDEX_HDR_FLAG_CORRUPTED;
+ if (!index->readonly) {
+ if (unlink(index->filepath) < 0 &&
+ errno != ENOENT && errno != ESTALE)
+ mail_index_set_syscall_error(index, "unlink()");
+ (void)mail_transaction_log_unlink(index->log);
+ }
+}
+
+bool mail_index_is_deleted(struct mail_index *index)
+{
+ return index->index_delete_requested || index->index_deleted;
+}
+
+int mail_index_get_modification_time(struct mail_index *index, time_t *mtime_r)
+{
+ struct stat st;
+ const char *path;
+
+ *mtime_r = 0;
+ if (MAIL_INDEX_IS_IN_MEMORY(index)) {
+ /* this function doesn't make sense for in-memory indexes */
+ return 0;
+ }
+
+ /* index may not be open, so index->filepath may be NULL */
+ path = t_strconcat(index->dir, "/", index->prefix,
+ MAIL_TRANSACTION_LOG_SUFFIX, NULL);
+ if (stat(path, &st) < 0) {
+ if (errno == ENOENT) {
+ /* .log is always supposed to exist - don't bother
+ trying to stat(dovecot.index) */
+ return 0;
+ }
+ mail_index_file_set_syscall_error(index, path, "stat()");
+ return -1;
+ }
+ *mtime_r = st.st_mtime;
+ return 0;
+}
+
+void mail_index_fchown(struct mail_index *index, int fd, const char *path)
+{
+ mode_t mode;
+
+ if (index->set.gid == (gid_t)-1) {
+ /* no gid changing */
+ return;
+ } else if (fchown(fd, (uid_t)-1, index->set.gid) == 0) {
+ /* success */
+ return;
+ } if ((index->set.mode & 0060) >> 3 == (index->set.mode & 0006)) {
+ /* group and world permissions are the same, so group doesn't
+ really matter. ignore silently. */
+ return;
+ }
+ if (errno != EPERM)
+ mail_index_file_set_syscall_error(index, path, "fchown()");
+ else {
+ mail_index_set_error(index, "%s",
+ eperm_error_get_chgrp("fchown", path, index->set.gid,
+ index->set.gid_origin));
+ }
+
+ /* continue, but change permissions so that only the common
+ subset of group and world is used. this makes sure no one
+ gets any extra permissions. */
+ mode = ((index->set.mode & 0060) >> 3) & (index->set.mode & 0006);
+ mode |= (mode << 3) | (index->set.mode & 0600);
+ if (fchmod(fd, mode) < 0)
+ mail_index_file_set_syscall_error(index, path, "fchmod()");
+}
+
+int mail_index_lock_sync(struct mail_index *index, const char *lock_reason)
+{
+ uint32_t file_seq;
+ uoff_t file_offset;
+
+ return mail_transaction_log_sync_lock(index->log, lock_reason,
+ &file_seq, &file_offset);
+}
+
+void mail_index_unlock(struct mail_index *index, const char *long_lock_reason)
+{
+ mail_transaction_log_sync_unlock(index->log, long_lock_reason);
+}
+
+bool mail_index_is_locked(struct mail_index *index)
+{
+ return index->log_sync_locked;
+}
+
+void mail_index_set_syscall_error(struct mail_index *index,
+ const char *function)
+{
+ mail_index_file_set_syscall_error(index, index->filepath, function);
+}
+
+void mail_index_file_set_syscall_error(struct mail_index *index,
+ const char *filepath,
+ const char *function)
+{
+ const char *errstr;
+
+ i_assert(filepath != NULL);
+ i_assert(function != NULL);
+
+ if (errno == ENOENT) {
+ struct stat st;
+ int old_errno = errno;
+ i_assert(index->log->filepath != NULL);
+ if (nfs_safe_stat(index->log->filepath, &st) < 0 &&
+ errno == ENOENT) {
+ /* the index log has gone away */
+ index->index_deleted = TRUE;
+ errno = old_errno;
+ return;
+ }
+ errno = old_errno;
+ }
+
+ if (ENOSPACE(errno)) {
+ index->last_error.nodiskspace = TRUE;
+ if ((index->flags & MAIL_INDEX_OPEN_FLAG_NEVER_IN_MEMORY) == 0)
+ return;
+ }
+
+ if (errno == EACCES) {
+ function = t_strcut(function, '(');
+ if (strcmp(function, "creat") == 0 ||
+ str_begins(function, "file_dotlock_"))
+ errstr = eacces_error_get_creating(function, filepath);
+ else
+ errstr = eacces_error_get(function, filepath);
+ mail_index_set_error(index, "%s", errstr);
+ } else {
+ const char *suffix = errno != EFBIG ? "" :
+ " (process was started with ulimit -f limit)";
+ mail_index_set_error(index, "%s failed with file %s: "
+ "%m%s", function, filepath, suffix);
+ }
+}
+
+const char *mail_index_get_error_message(struct mail_index *index)
+{
+ return index->last_error.text;
+}
+
+void mail_index_reset_error(struct mail_index *index)
+{
+ i_free(index->last_error.text);
+ i_zero(&index->last_error);
+}
diff --git a/src/lib-index/mail-index.h b/src/lib-index/mail-index.h
new file mode 100644
index 0000000..c1947cf
--- /dev/null
+++ b/src/lib-index/mail-index.h
@@ -0,0 +1,817 @@
+#ifndef MAIL_INDEX_H
+#define MAIL_INDEX_H
+
+#include "file-lock.h"
+#include "fsync-mode.h"
+#include "guid.h"
+#include "mail-types.h"
+#include "seq-range-array.h"
+
+#define MAIL_INDEX_MAJOR_VERSION 7
+#define MAIL_INDEX_MINOR_VERSION 3
+
+#define MAIL_INDEX_HEADER_MIN_SIZE 120
+
+/* Log a warning when transaction log has been locked for this many seconds.
+ This lock is held also between mail_index_sync_begin()..commit(). */
+#define MAIL_TRANSACTION_LOG_LOCK_WARN_SECS 30
+
+enum mail_index_open_flags {
+ /* Create index if it doesn't exist */
+ MAIL_INDEX_OPEN_FLAG_CREATE = 0x01,
+ /* Don't try to mmap() index files */
+ MAIL_INDEX_OPEN_FLAG_MMAP_DISABLE = 0x04,
+ /* Rely on O_EXCL when creating dotlocks */
+ MAIL_INDEX_OPEN_FLAG_DOTLOCK_USE_EXCL = 0x10,
+ /* Flush NFS attr/data/write cache when necessary */
+ MAIL_INDEX_OPEN_FLAG_NFS_FLUSH = 0x40,
+ /* Open the index read-only */
+ MAIL_INDEX_OPEN_FLAG_READONLY = 0x80,
+ /* Create backups of dovecot.index files once in a while */
+ MAIL_INDEX_OPEN_FLAG_KEEP_BACKUPS = 0x100,
+ /* If we run out of disk space, fail modifications instead of moving
+ indexes to memory. */
+ MAIL_INDEX_OPEN_FLAG_NEVER_IN_MEMORY = 0x200,
+ /* We're only going to save new messages to the index.
+ Avoid unnecessary reads. */
+ MAIL_INDEX_OPEN_FLAG_SAVEONLY = 0x400,
+ /* Enable debug logging */
+ MAIL_INDEX_OPEN_FLAG_DEBUG = 0x800,
+ /* MAIL_INDEX_MAIL_FLAG_DIRTY can be used as a backend-specific flag.
+ All special handling of the flag is disabled by this. */
+ MAIL_INDEX_OPEN_FLAG_NO_DIRTY = 0x1000,
+};
+
+enum mail_index_header_compat_flags {
+ /* All fields in these index files are in little-endian format.
+ If the current CPU endianess doesn't match this, the indexes can't
+ be used. There is currently no support to translate endianess. */
+ MAIL_INDEX_COMPAT_LITTLE_ENDIAN = 0x01
+};
+
+enum mail_index_header_flag {
+ /* mail_index_mark_corrupted() was just called by this process.
+ Reopen or recreate it. This flag is never actually written to
+ disk. */
+ MAIL_INDEX_HDR_FLAG_CORRUPTED = 0x0001,
+ /* There are messages with MAIL_INDEX_MAIL_FLAG_DIRTY flag. */
+ MAIL_INDEX_HDR_FLAG_HAVE_DIRTY = 0x0002,
+ /* Index has been fsck'd. The caller may want to resync the index
+ to make sure it's valid and drop this flag. */
+ MAIL_INDEX_HDR_FLAG_FSCKD = 0x0004,
+};
+
+enum mail_index_mail_flags {
+ /* This flag used to contain MAIL_RECENT flag, but is always zero
+ with the current index file format. */
+ MAIL_INDEX_MAIL_FLAG_UNUSED = 0x20,
+ /* For private use by backend. Replacing flags doesn't change this. */
+ MAIL_INDEX_MAIL_FLAG_BACKEND = 0x40,
+ /* Message flags haven't been written to backend. If
+ MAIL_INDEX_OPEN_FLAG_NO_DIRTY is set, this is treated as a
+ backend-specific flag with no special internal handling. */
+ MAIL_INDEX_MAIL_FLAG_DIRTY = 0x80,
+
+ /* Force updating this message's modseq via a flag update record.
+ Note that this flag isn't saved to disk. */
+ MAIL_INDEX_MAIL_FLAG_UPDATE_MODSEQ = 0x100
+};
+
+#define MAIL_INDEX_FLAGS_MASK \
+ (MAIL_ANSWERED | MAIL_FLAGGED | MAIL_DELETED | MAIL_SEEN | MAIL_DRAFT)
+
+struct mail_index_header {
+ /* Major version is increased only when you can't have backwards
+ compatibility. If the field doesn't match MAIL_INDEX_MAJOR_VERSION,
+ don't even try to read it. */
+ uint8_t major_version;
+ /* Minor version is increased when the file format changes in a
+ backwards compatible way. If the field is smaller than
+ MAIL_INDEX_MINOR_VERSION, upgrade the file format and update the
+ minor_version field as well. If minor_version is higher than
+ MAIL_INDEX_MINOR_VERSION, leave it as it is. It likely means that a
+ new Dovecot version is currently being upgraded to, but the file was
+ still accessed by an old version. */
+ uint8_t minor_version;
+
+ /* sizeof(struct mail_index_header) when creating a new index. If the
+ header is smaller, fill the missing fields with 0. If the header is
+ larger, preserve the size and unknown fields. */
+ uint16_t base_header_size;
+ uint32_t header_size; /* base + extended header size */
+ /* sizeof(struct mail_index_record) + extensions */
+ uint32_t record_size;
+
+ uint8_t compat_flags; /* enum mail_index_header_compat_flags */
+ uint8_t unused[3];
+
+ /* Unique index file ID. Initialized with the current UNIX timestamp.
+ This is used to make sure that the main index, transaction log and
+ cache file are all part of the same index. */
+ uint32_t indexid;
+ uint32_t flags; /* enum mail_index_header_flag */
+
+ /* IMAP UIDVALIDITY. Initially can be 0, but must be non-0 after the
+ first mailbox sync. The UIDVALIDITY shouldn't normally change after
+ the mailbox is created. */
+ uint32_t uid_validity;
+ /* UID for the next saved message (must not be lower than this). This
+ value can only increase. */
+ uint32_t next_uid;
+
+ /* Number of messages in the index */
+ uint32_t messages_count;
+ uint32_t unused_old_recent_messages_count;
+ /* Number of messages with MAIL_SEEN flag */
+ uint32_t seen_messages_count;
+ /* Number of messages with MAIL_DELETED flag */
+ uint32_t deleted_messages_count;
+
+ /* The specified UID and all mails after it have MAIL_RECENT flag */
+ uint32_t first_recent_uid;
+ /* There are no UIDs lower than this without MAIL_SEEN flag. There are
+ no guarantees whether this UID has MAIL_SEEN flag, or whether the it
+ even exists. Used to optimize finding the first unseen message. */
+ uint32_t first_unseen_uid_lowwater;
+ /* Similarly to above, used to optimize finding the first deleted
+ message. */
+ uint32_t first_deleted_uid_lowwater;
+
+ /* The index is synced up to this log_file_seq and
+ log_file_head_offset. However, non-external transaction records
+ between tail_offset..head_offset haven't been synced to the
+ mailbox yet. For example there may be pending expunges or flag
+ changes, which will be synced on the next mail_index_sync_*()
+ calls. */
+ uint32_t log_file_seq;
+ uint32_t log_file_tail_offset;
+ uint32_t log_file_head_offset;
+
+ uint32_t unused_old_sync_size_part1;
+ /* Timestamp of when .log was rotated into .log.2. This can be used to
+ optimize checking when it's time to unlink it without stat()ing it.
+ 0 = unknown, -1 = .log.2 doesn't exists. */
+ uint32_t log2_rotate_time;
+ /* Timestamp when the mailbox backend-specific code last checked
+ whether there are old temporary files (left by crashes) that should
+ be deleted. 0 = unknown. */
+ uint32_t last_temp_file_scan;
+
+ /* UNIX timestamp to the beginning of the day (in server's local
+ timezone) when new messages were last added to the index file. */
+ uint32_t day_stamp;
+ /* These fields are updated when day_stamp < today. The [0..6] are
+ first moved to [1..7], then [0] is set to the first appended UID. So
+ they contain the first UID of the day for last 8 days when messages
+ were appended.
+
+ These are used by cache purging to decide when to drop
+ MAIL_CACHE_DECISION_TEMP fields. */
+ uint32_t day_first_uid[8];
+};
+
+#define MAIL_INDEX_RECORD_MIN_SIZE (sizeof(uint32_t) + sizeof(uint8_t))
+struct mail_index_record {
+ uint32_t uid;
+ uint8_t flags; /* enum mail_flags | enum mail_index_mail_flags */
+};
+
+struct mail_keywords {
+ struct mail_index *index;
+ unsigned int count;
+ int refcount;
+
+ /* variable sized list of keyword indexes */
+ unsigned int idx[FLEXIBLE_ARRAY_MEMBER];
+};
+
+enum mail_index_transaction_flags {
+ /* If transaction is marked as hidden, the changes are marked with
+ hidden=TRUE when the view is synchronized. */
+ MAIL_INDEX_TRANSACTION_FLAG_HIDE = 0x01,
+ /* External transactions describe changes to mailbox that have already
+ happened. */
+ MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL = 0x02,
+ /* Don't add flag updates unless they actually change something.
+ This is reliable only when syncing, otherwise someone else might
+ have already committed a transaction that had changed the flags. */
+ MAIL_INDEX_TRANSACTION_FLAG_AVOID_FLAG_UPDATES = 0x04,
+ /* fsync() this transaction (unless fsyncs are disabled) */
+ MAIL_INDEX_TRANSACTION_FLAG_FSYNC = 0x08,
+ /* Sync transaction describes changes to mailbox that already happened
+ to another mailbox with whom we're syncing with (dsync) */
+ MAIL_INDEX_TRANSACTION_FLAG_SYNC = 0x10
+};
+
+enum mail_index_sync_type {
+ MAIL_INDEX_SYNC_TYPE_EXPUNGE = 0x02,
+ MAIL_INDEX_SYNC_TYPE_FLAGS = 0x04,
+ MAIL_INDEX_SYNC_TYPE_KEYWORD_ADD = 0x08,
+ MAIL_INDEX_SYNC_TYPE_KEYWORD_REMOVE = 0x10
+};
+
+enum mail_index_fsync_mask {
+ MAIL_INDEX_FSYNC_MASK_APPENDS = 0x01,
+ MAIL_INDEX_FSYNC_MASK_EXPUNGES = 0x02,
+ MAIL_INDEX_FSYNC_MASK_FLAGS = 0x04,
+ MAIL_INDEX_FSYNC_MASK_KEYWORDS = 0x08
+};
+
+enum mail_index_sync_flags {
+ /* Resync all dirty messages' flags. */
+ MAIL_INDEX_SYNC_FLAG_FLUSH_DIRTY = 0x01,
+ /* Drop recent flags from all messages */
+ MAIL_INDEX_SYNC_FLAG_DROP_RECENT = 0x02,
+ /* Create the transaction with AVOID_FLAG_UPDATES flag */
+ MAIL_INDEX_SYNC_FLAG_AVOID_FLAG_UPDATES = 0x04,
+ /* If there are no new transactions and nothing else to do,
+ return 0 in mail_index_sync_begin() */
+ MAIL_INDEX_SYNC_FLAG_REQUIRE_CHANGES = 0x08,
+ /* Create the transaction with FSYNC flag */
+ MAIL_INDEX_SYNC_FLAG_FSYNC = 0x10,
+ /* If we see "delete index" request transaction, finish it.
+ This flag also allows committing more changes to a deleted index. */
+ MAIL_INDEX_SYNC_FLAG_DELETING_INDEX = 0x20,
+ /* Same as MAIL_INDEX_SYNC_FLAG_DELETING_INDEX, but finish index
+ deletion only once and fail the rest (= avoid race conditions when
+ multiple processes try to mark the index deleted) */
+ MAIL_INDEX_SYNC_FLAG_TRY_DELETING_INDEX = 0x40,
+ /* Update header's tail_offset to head_offset, even if it's the only
+ thing we do and there's no strict need for it. */
+ MAIL_INDEX_SYNC_FLAG_UPDATE_TAIL_OFFSET = 0x80
+};
+
+enum mail_index_view_sync_flags {
+ /* Don't sync expunges */
+ MAIL_INDEX_VIEW_SYNC_FLAG_NOEXPUNGES = 0x01,
+ /* Make sure view isn't inconsistent after syncing. This also means
+ that you don't care about view_sync_next()'s output, so it won't
+ return anything. */
+ MAIL_INDEX_VIEW_SYNC_FLAG_FIX_INCONSISTENT = 0x02,
+ /* Indicate that this is a secondary view, this can be used to indicate
+ that inconsistencies can be expected and if found should be fixed
+ by fully syncing. */
+ MAIL_INDEX_VIEW_SYNC_FLAG_2ND_INDEX = 0x04,
+};
+
+struct mail_index_sync_rec {
+ uint32_t uid1, uid2;
+ enum mail_index_sync_type type;
+
+ /* MAIL_INDEX_SYNC_TYPE_FLAGS: */
+ uint8_t add_flags;
+ uint8_t remove_flags;
+
+ /* MAIL_INDEX_SYNC_TYPE_KEYWORD_ADD, .._REMOVE: */
+ unsigned int keyword_idx;
+
+ /* MAIL_INDEX_SYNC_TYPE_EXPUNGE: */
+ guid_128_t guid_128;
+};
+
+enum mail_index_view_sync_type {
+ /* Flags or keywords changed */
+ MAIL_INDEX_VIEW_SYNC_TYPE_FLAGS = 0x01,
+ MAIL_INDEX_VIEW_SYNC_TYPE_MODSEQ = 0x02
+};
+
+struct mail_index_view_sync_rec {
+ uint32_t uid1, uid2;
+ enum mail_index_view_sync_type type;
+
+ /* TRUE if this was a hidden transaction. */
+ bool hidden:1;
+};
+
+enum mail_index_transaction_change {
+ MAIL_INDEX_TRANSACTION_CHANGE_APPEND = BIT(0),
+ MAIL_INDEX_TRANSACTION_CHANGE_EXPUNGE = BIT(1),
+ MAIL_INDEX_TRANSACTION_CHANGE_FLAGS = BIT(2),
+ MAIL_INDEX_TRANSACTION_CHANGE_KEYWORDS = BIT(3),
+ MAIL_INDEX_TRANSACTION_CHANGE_MODSEQ = BIT(4),
+ MAIL_INDEX_TRANSACTION_CHANGE_ATTRIBUTE = BIT(5),
+
+ MAIL_INDEX_TRANSACTION_CHANGE_OTHERS = BIT(30),
+};
+
+struct mail_index_transaction_commit_result {
+ /* seq/offset points to end of transaction */
+ uint32_t log_file_seq;
+ uoff_t log_file_offset;
+ /* number of bytes in the written transaction.
+ all of it was written to the same file. */
+ uoff_t commit_size;
+
+ enum mail_index_transaction_change changes_mask;
+ unsigned int ignored_modseq_changes;
+};
+
+struct mail_index_base_optimization_settings {
+ /* Rewrite the index when the number of bytes that needs to be read
+ from the .log on refresh is between these min/max values. */
+ uoff_t rewrite_min_log_bytes;
+ uoff_t rewrite_max_log_bytes;
+};
+
+struct mail_index_log_optimization_settings {
+ /* Rotate transaction log after it's a) min_size or larger and it was
+ created at least min_age_secs or b) larger than max_size. */
+ uoff_t min_size;
+ uoff_t max_size;
+ unsigned int min_age_secs;
+
+ /* Delete .log.2 when it's older than log2_stale_secs. Don't be too
+ eager, because older files are useful for QRESYNC and dsync. */
+ unsigned int log2_max_age_secs;
+};
+
+struct mail_index_cache_optimization_settings {
+ /* Drop fields that haven't been accessed for n seconds */
+ unsigned int unaccessed_field_drop_secs;
+ /* If cache record becomes larger than this, don't add it. */
+ unsigned int record_max_size;
+
+ /* Maximum size for the cache file. Internally the limit is 1 GB. */
+ uoff_t max_size;
+ /* Never purge the file if it's smaller than this */
+ uoff_t purge_min_size;
+ /* Purge the file when n% of records are deleted */
+ unsigned int purge_delete_percentage;
+ /* Purge the file when n% of rows contain continued rows.
+ For example 200% means that the record has 2 continued rows, i.e.
+ it exists in 3 separate segments in the cache file. */
+ unsigned int purge_continued_percentage;
+ /* Purge the file when we need to follow more than n next_offsets to
+ find the latest cache header. */
+ unsigned int purge_header_continue_count;
+};
+
+struct mail_index_optimization_settings {
+ struct mail_index_base_optimization_settings index;
+ struct mail_index_log_optimization_settings log;
+ struct mail_index_cache_optimization_settings cache;
+};
+
+struct mail_index;
+struct mail_index_map;
+struct mail_index_view;
+struct mail_index_transaction;
+struct mail_index_sync_ctx;
+struct mail_index_view_sync_ctx;
+
+struct mail_index *mail_index_alloc(struct event *parent_event,
+ const char *dir, const char *prefix);
+void mail_index_free(struct mail_index **index);
+
+/* Change .cache file's directory. */
+void mail_index_set_cache_dir(struct mail_index *index, const char *dir);
+/* Specify how often to do fsyncs. If mode is FSYNC_MODE_OPTIMIZED, the mask
+ can be used to specify which transaction types to fsync. */
+void mail_index_set_fsync_mode(struct mail_index *index, enum fsync_mode mode,
+ enum mail_index_fsync_mask mask);
+/* Try to set the index's permissions based on its index directory. Returns
+ TRUE if successful (directory existed), FALSE if mail_index_set_permissions()
+ should be called. */
+bool mail_index_use_existing_permissions(struct mail_index *index);
+void mail_index_set_permissions(struct mail_index *index,
+ mode_t mode, gid_t gid, const char *gid_origin);
+/* Set locking method and maximum time to wait for a lock
+ (UINT_MAX = default). */
+void mail_index_set_lock_method(struct mail_index *index,
+ enum file_lock_method lock_method,
+ unsigned int max_timeout_secs);
+/* Override the default optimization-related settings. Anything set to 0 will
+ use the default. */
+void mail_index_set_optimization_settings(struct mail_index *index,
+ const struct mail_index_optimization_settings *set);
+/* When creating a new index file or reseting an existing one, add the given
+ extension header data immediately to it. */
+void mail_index_set_ext_init_data(struct mail_index *index, uint32_t ext_id,
+ const void *data, size_t size);
+
+/* Open index. Returns 1 if ok, 0 if index doesn't exist and CREATE flags
+ wasn't given, -1 if error. */
+int mail_index_open(struct mail_index *index, enum mail_index_open_flags flags);
+/* Open or create index. Returns 0 if ok, -1 if error. */
+int mail_index_open_or_create(struct mail_index *index,
+ enum mail_index_open_flags flags);
+void mail_index_close(struct mail_index *index);
+/* unlink() all the index files. */
+int mail_index_unlink(struct mail_index *index);
+
+/* Returns TRUE if index is currently in memory. */
+bool mail_index_is_in_memory(struct mail_index *index);
+/* Move the index into memory. Returns 0 if ok, -1 if error occurred. */
+int mail_index_move_to_memory(struct mail_index *index);
+
+struct mail_cache *mail_index_get_cache(struct mail_index *index);
+
+/* Refresh index so mail_index_lookup*() will return latest values. Note that
+ immediately after this call there may already be changes, so if you need to
+ rely on validity of the returned values, use some external locking for it. */
+int ATTR_NOWARN_UNUSED_RESULT
+mail_index_refresh(struct mail_index *index);
+
+/* View can be used to look into index. Sequence numbers inside view change
+ only when you synchronize it. The view acquires required locks
+ automatically, but you'll have to drop them manually. */
+struct mail_index_view *
+mail_index_view_open(struct mail_index *index,
+ const char *source_filename, unsigned int source_linenum);
+#define mail_index_view_open(index) \
+ mail_index_view_open(index, __FILE__, __LINE__)
+void mail_index_view_close(struct mail_index_view **view);
+
+/* Returns the index for given view. */
+struct mail_index *mail_index_view_get_index(struct mail_index_view *view);
+/* Returns number of mails in view. */
+uint32_t mail_index_view_get_messages_count(struct mail_index_view *view);
+/* Returns TRUE if we lost track of changes for some reason. */
+bool mail_index_view_is_inconsistent(struct mail_index_view *view);
+/* Returns TRUE if there are open transactions open for the view. */
+bool mail_index_view_have_transactions(struct mail_index_view *view);
+
+/* Transaction has to be opened to be able to modify index. You can have
+ multiple transactions open simultaneously. Committed transactions won't
+ show up until you've synchronized the view. Expunges won't show up until
+ you've synchronized the mailbox (mail_index_sync_begin). */
+struct mail_index_transaction *
+mail_index_transaction_begin(struct mail_index_view *view,
+ enum mail_index_transaction_flags flags);
+int mail_index_transaction_commit(struct mail_index_transaction **t);
+int mail_index_transaction_commit_full(struct mail_index_transaction **t,
+ struct mail_index_transaction_commit_result *result_r);
+void mail_index_transaction_rollback(struct mail_index_transaction **t);
+/* Discard all changes in the transaction. */
+void mail_index_transaction_reset(struct mail_index_transaction *t);
+/* When committing transaction, drop flag/keyword updates for messages whose
+ mdoseq is larger than max_modseq. Save those messages' sequences to the
+ given array. */
+void mail_index_transaction_set_max_modseq(struct mail_index_transaction *t,
+ uint64_t max_modseq,
+ ARRAY_TYPE(seq_range) *seqs);
+
+/* Returns the view transaction was created for. */
+struct mail_index_view *
+mail_index_transaction_get_view(struct mail_index_transaction *t);
+/* Returns TRUE if the given sequence is being expunged in this transaction. */
+bool mail_index_transaction_is_expunged(struct mail_index_transaction *t,
+ uint32_t seq);
+
+/* Returns a view containing the mailbox state after changes in transaction
+ are applied. The view can still be used after transaction has been
+ committed. */
+struct mail_index_view *
+mail_index_transaction_open_updated_view(struct mail_index_transaction *t);
+
+/* Begin synchronizing mailbox with index file. Returns 1 if ok,
+ 0 if MAIL_INDEX_SYNC_FLAG_REQUIRE_CHANGES is set and there's nothing to
+ sync, -1 if error.
+
+ mail_index_sync_next() returns all changes from previously committed
+ transactions which haven't yet been committed to the actual mailbox.
+ They're returned in ascending order and they never overlap (if we add more
+ sync types, then they might). You must go through all of them and update
+ the mailbox accordingly.
+
+ Changes done to the returned transaction are expected to describe the
+ mailbox's current state.
+
+ The returned view already contains all the changes (except expunge
+ requests). After applying sync records on top of backend flags they should
+ match flags in the view. If they don't, there have been external changes.
+
+ Returned expunges are treated as expunge requests. They're not really
+ removed from the index until you mark them expunged to the returned
+ transaction. If it's not possible to expunge the message (e.g. permission
+ denied), simply don't mark them expunged.
+
+ Returned sequence numbers describe the mailbox state at the beginning of
+ synchronization, ie. expunges don't affect them. */
+int mail_index_sync_begin(struct mail_index *index,
+ struct mail_index_sync_ctx **ctx_r,
+ struct mail_index_view **view_r,
+ struct mail_index_transaction **trans_r,
+ enum mail_index_sync_flags flags);
+/* Like mail_index_sync_begin(), but returns 1 if OK and if index is already
+ synchronized up to the given log_file_seq+offset, the synchronization isn't
+ started and this function returns 0. This should be done when you wish to
+ sync your committed transaction instead of doing a full mailbox
+ synchronization. */
+int mail_index_sync_begin_to(struct mail_index *index,
+ struct mail_index_sync_ctx **ctx_r,
+ struct mail_index_view **view_r,
+ struct mail_index_transaction **trans_r,
+ uint32_t log_file_seq, uoff_t log_file_offset,
+ enum mail_index_sync_flags flags);
+/* Returns TRUE if it currently looks like syncing would return changes. */
+bool mail_index_sync_have_any(struct mail_index *index,
+ enum mail_index_sync_flags flags);
+/* Returns TRUE if it currently looks like syncing would return expunges. */
+bool mail_index_sync_have_any_expunges(struct mail_index *index);
+/* Returns the log file seq+offsets for the area which this sync is handling. */
+void mail_index_sync_get_offsets(struct mail_index_sync_ctx *ctx,
+ uint32_t *seq1_r, uoff_t *offset1_r,
+ uint32_t *seq2_r, uoff_t *offset2_r);
+/* Returns -1 if error, 0 if sync is finished, 1 if record was filled. */
+bool mail_index_sync_next(struct mail_index_sync_ctx *ctx,
+ struct mail_index_sync_rec *sync_rec);
+/* Returns TRUE if there's more to sync. */
+bool mail_index_sync_have_more(struct mail_index_sync_ctx *ctx);
+/* Returns TRUE if sync has any expunges to handle. */
+bool mail_index_sync_has_expunges(struct mail_index_sync_ctx *ctx);
+/* Reset syncing to initial state after mail_index_sync_begin(), so you can
+ go through all the sync records again with mail_index_sync_next(). */
+void mail_index_sync_reset(struct mail_index_sync_ctx *ctx);
+/* Update result when refreshing index at the end of sync. */
+void mail_index_sync_set_commit_result(struct mail_index_sync_ctx *ctx,
+ struct mail_index_transaction_commit_result *result);
+/* Don't log a warning even if syncing took over
+ MAIL_TRANSACTION_LOG_LOCK_WARN_SECS seconds. Usually this is called because
+ the caller itself already logged a warning about it. */
+void mail_index_sync_no_warning(struct mail_index_sync_ctx *ctx);
+/* If a warning is logged because syncing took over
+ MAIL_TRANSACTION_LOG_LOCK_WARN_SECS seconds, log this as the reason for the
+ syncing. */
+void mail_index_sync_set_reason(struct mail_index_sync_ctx *ctx,
+ const char *reason);
+/* Commit synchronization by writing all changes to mail index file. */
+int mail_index_sync_commit(struct mail_index_sync_ctx **ctx);
+/* Rollback synchronization - none of the changes listed by sync_next() are
+ actually written to index file. */
+void mail_index_sync_rollback(struct mail_index_sync_ctx **ctx);
+
+/* Lock the index exclusively. This is the same locking as what happens when
+ syncing the index. It's not necessary to normally call this function, unless
+ doing something special such as rebuilding the index outside syncing.
+ Returns 0 on success, -1 if locking failed for any reason. */
+int mail_index_lock_sync(struct mail_index *index, const char *lock_reason);
+/* Unlock the locked index. The index must have been locked previously with
+ mail_index_lock_sync(). If the lock had been kept for excessively long,
+ a warning is logged with the long_lock_reason. */
+void mail_index_unlock(struct mail_index *index, const char *long_lock_reason);
+/* Returns TRUE if index is currently exclusively locked. */
+bool mail_index_is_locked(struct mail_index *index);
+
+/* Mark index file corrupted in memory and delete it from disk.
+ Invalidates all views. This should be called only for index files that can
+ safely be recreated without any data loss. */
+void mail_index_mark_corrupted(struct mail_index *index) ATTR_COLD;
+/* Check and fix any found problems. Returns -1 if we couldn't lock for sync,
+ 0 if everything went ok. */
+int mail_index_fsck(struct mail_index *index) ATTR_COLD;
+/* Returns TRUE if mail_index_fsck() has been called since the last
+ mail_index_reset_fscked() call. */
+bool mail_index_reset_fscked(struct mail_index *index);
+
+/* Synchronize changes in view. You have to go through all records, or view
+ will be marked inconsistent. Only sync_mask type records are
+ synchronized. */
+struct mail_index_view_sync_ctx *
+mail_index_view_sync_begin(struct mail_index_view *view,
+ enum mail_index_view_sync_flags flags);
+bool mail_index_view_sync_next(struct mail_index_view_sync_ctx *ctx,
+ struct mail_index_view_sync_rec *sync_rec);
+void
+mail_index_view_sync_get_expunges(struct mail_index_view_sync_ctx *ctx,
+ const ARRAY_TYPE(seq_range) **expunges_r);
+int mail_index_view_sync_commit(struct mail_index_view_sync_ctx **ctx,
+ bool *delayed_expunges_r);
+
+/* Returns the index header. */
+const struct mail_index_header *
+mail_index_get_header(struct mail_index_view *view);
+/* Returns the wanted message record. */
+const struct mail_index_record *
+mail_index_lookup(struct mail_index_view *view, uint32_t seq);
+const struct mail_index_record *
+mail_index_lookup_full(struct mail_index_view *view, uint32_t seq,
+ struct mail_index_map **map_r, bool *expunged_r);
+/* Returns TRUE if the given message has already been expunged from index. */
+bool mail_index_is_expunged(struct mail_index_view *view, uint32_t seq);
+/* Note that returned keyword indexes aren't sorted. */
+void mail_index_lookup_keywords(struct mail_index_view *view, uint32_t seq,
+ ARRAY_TYPE(keyword_indexes) *keyword_idx);
+/* Return keywords from given map. */
+void mail_index_map_lookup_keywords(struct mail_index_map *map, uint32_t seq,
+ ARRAY_TYPE(keyword_indexes) *keyword_idx);
+/* mail_index_lookup[_keywords]() returns the latest flag changes.
+ This function instead attempts to return the flags and keywords done by the
+ last view sync. */
+void mail_index_lookup_view_flags(struct mail_index_view *view, uint32_t seq,
+ enum mail_flags *flags_r,
+ ARRAY_TYPE(keyword_indexes) *keyword_idx);
+/* Returns the UID for given message. May be slightly faster than
+ mail_index_lookup()->uid. */
+void mail_index_lookup_uid(struct mail_index_view *view, uint32_t seq,
+ uint32_t *uid_r);
+/* Convert UID range to sequence range. If no UIDs are found, returns FALSE and
+ sequences are set to 0. Note that any of the returned sequences may have
+ been expunged already. */
+bool mail_index_lookup_seq_range(struct mail_index_view *view,
+ uint32_t first_uid, uint32_t last_uid,
+ uint32_t *first_seq_r, uint32_t *last_seq_r);
+bool mail_index_lookup_seq(struct mail_index_view *view,
+ uint32_t uid, uint32_t *seq_r);
+/* Find first mail with (mail->flags & flags_mask) == flags. Useful mostly for
+ taking advantage of lowwater-fields in headers. */
+void mail_index_lookup_first(struct mail_index_view *view,
+ enum mail_flags flags, uint8_t flags_mask,
+ uint32_t *seq_r);
+
+/* Append a new record to index. */
+void mail_index_append(struct mail_index_transaction *t, uint32_t uid,
+ uint32_t *seq_r);
+/* Assign new UIDs for mails with uid=0 or uid<min_allowed_uid. All the new
+ UIDs are >= first_new_uid, an also higher than the highest seen uid (i.e. it
+ doesn't try to fill UID gaps). Assumes that mailbox is locked in a way that
+ UIDs can be safely assigned. Returns UIDs for all assigned messages, in
+ their sequence order (so UIDs are not necessary ascending). */
+void mail_index_append_finish_uids_full(struct mail_index_transaction *t,
+ uint32_t min_allowed_uid,
+ uint32_t first_new_uid,
+ ARRAY_TYPE(seq_range) *uids_r);
+/* Call mail_index_append_finish_uids_full() with first_uid used for both
+ min_allowed_uid and first_new_uid. */
+void mail_index_append_finish_uids(struct mail_index_transaction *t,
+ uint32_t first_uid,
+ ARRAY_TYPE(seq_range) *uids_r);
+/* Expunge record from index. Note that this doesn't affect sequence numbers
+ until transaction is committed and mailbox is synced. */
+void mail_index_expunge(struct mail_index_transaction *t, uint32_t seq);
+/* Like mail_index_expunge(), but also write message GUID to transaction log. */
+void mail_index_expunge_guid(struct mail_index_transaction *t, uint32_t seq,
+ const guid_128_t guid_128);
+/* Revert all changes done in this transaction to the given existing mail. */
+void mail_index_revert_changes(struct mail_index_transaction *t, uint32_t seq);
+/* Update flags in index. */
+void mail_index_update_flags(struct mail_index_transaction *t, uint32_t seq,
+ enum modify_type modify_type,
+ enum mail_flags flags);
+void mail_index_update_flags_range(struct mail_index_transaction *t,
+ uint32_t seq1, uint32_t seq2,
+ enum modify_type modify_type,
+ enum mail_flags flags);
+/* Specified attribute's value was changed. This is just a notification so the
+ change gets assigned its own modseq and any log readers can find out about
+ this change. */
+void mail_index_attribute_set(struct mail_index_transaction *t,
+ bool pvt, const char *key,
+ time_t timestamp, uint32_t value_len);
+/* Attribute was deleted. */
+void mail_index_attribute_unset(struct mail_index_transaction *t,
+ bool pvt, const char *key, time_t timestamp);
+/* Update message's modseq to be at least min_modseq. */
+void mail_index_update_modseq(struct mail_index_transaction *t, uint32_t seq,
+ uint64_t min_modseq);
+/* Update highest modseq to be at least min_modseq. */
+void mail_index_update_highest_modseq(struct mail_index_transaction *t,
+ uint64_t min_modseq);
+/* Reset the index before committing this transaction. This is usually done
+ only when UIDVALIDITY changes. */
+void mail_index_reset(struct mail_index_transaction *t);
+/* Remove MAIL_INDEX_HDR_FLAG_FSCKD from header if it exists. This must be
+ called only during syncing so that the mailbox is locked. */
+void mail_index_unset_fscked(struct mail_index_transaction *t);
+/* Mark index deleted. No further changes will be possible after the
+ transaction has been committed. */
+void mail_index_set_deleted(struct mail_index_transaction *t);
+/* Mark a deleted index as undeleted. Afterwards index can be changed again. */
+void mail_index_set_undeleted(struct mail_index_transaction *t);
+/* Returns TRUE if index has been set deleted. This gets set only after
+ index has been opened/refreshed and the transaction has been seen. */
+bool mail_index_is_deleted(struct mail_index *index);
+/* Returns the last time the index was modified. This can be called even if the
+ index isn't open. If the index doesn't exist, sets mtime to 0. */
+int mail_index_get_modification_time(struct mail_index *index, time_t *mtime_r);
+
+/* Lookup a keyword, returns TRUE if found, FALSE if not. */
+bool mail_index_keyword_lookup(struct mail_index *index,
+ const char *keyword, unsigned int *idx_r);
+void mail_index_keyword_lookup_or_create(struct mail_index *index,
+ const char *keyword,
+ unsigned int *idx_r);
+/* Return a pointer to array of NULL-terminated list of keywords. Note that
+ the array contents (and thus pointers inside it) may change after calling
+ mail_index_keywords_create() or mail_index_sync_begin(). */
+const ARRAY_TYPE(keywords) *mail_index_get_keywords(struct mail_index *index);
+
+/* Create a keyword list structure. */
+struct mail_keywords *
+mail_index_keywords_create(struct mail_index *index,
+ const char *const keywords[]) ATTR_NULL(2);
+struct mail_keywords *
+mail_index_keywords_create_from_indexes(struct mail_index *index,
+ const ARRAY_TYPE(keyword_indexes)
+ *keyword_indexes);
+void mail_index_keywords_ref(struct mail_keywords *keywords);
+void mail_index_keywords_unref(struct mail_keywords **keywords);
+
+/* Update keywords for given message. */
+void mail_index_update_keywords(struct mail_index_transaction *t, uint32_t seq,
+ enum modify_type modify_type,
+ struct mail_keywords *keywords);
+
+/* Update field in header. If prepend is TRUE, the header change is visible
+ before message syncing begins. */
+void mail_index_update_header(struct mail_index_transaction *t,
+ size_t offset, const void *data, size_t size,
+ bool prepend);
+
+/* Returns the full error message for last error. This message may
+ contain paths etc. so it shouldn't be shown to users. */
+const char *mail_index_get_error_message(struct mail_index *index);
+/* Reset the error message. */
+void mail_index_reset_error(struct mail_index *index);
+
+/* Apply changes in MAIL_INDEX_SYNC_TYPE_FLAGS typed sync records to given
+ flags variable. */
+void mail_index_sync_flags_apply(const struct mail_index_sync_rec *sync_rec,
+ uint8_t *flags);
+/* Apply changes in MAIL_INDEX_SYNC_TYPE_KEYWORD_* typed sync records to given
+ keywords array. Returns TRUE If something was changed. */
+bool mail_index_sync_keywords_apply(const struct mail_index_sync_rec *sync_rec,
+ ARRAY_TYPE(keyword_indexes) *keywords);
+
+/* register index extension. name is a unique identifier for the extension.
+ returns unique identifier for the name. */
+uint32_t mail_index_ext_register(struct mail_index *index, const char *name,
+ uint32_t default_hdr_size,
+ uint16_t default_record_size,
+ uint16_t default_record_align);
+/* Change an already registered extension's default sizes. */
+void mail_index_ext_register_resize_defaults(struct mail_index *index,
+ uint32_t ext_id,
+ uint32_t default_hdr_size,
+ uint16_t default_record_size,
+ uint16_t default_record_align);
+/* Returns TRUE and sets ext_id_r if extension with given name is registered. */
+bool mail_index_ext_lookup(struct mail_index *index, const char *name,
+ uint32_t *ext_id_r);
+/* Resize existing extension data. If size is grown, the new data will be
+ zero-filled. If size is shrinked, the data is simply dropped. */
+void mail_index_ext_resize(struct mail_index_transaction *t, uint32_t ext_id,
+ uint32_t hdr_size, uint16_t record_size,
+ uint16_t record_align);
+/* Resize header, keeping the old record size. */
+void mail_index_ext_resize_hdr(struct mail_index_transaction *t,
+ uint32_t ext_id, uint32_t hdr_size);
+
+/* Reset extension. Any updates for this extension which were issued before the
+ writer had seen this reset are discarded. reset_id is used to figure this
+ out, so it must be different every time. If clear_data=TRUE, records and
+ header is zeroed. */
+void mail_index_ext_reset(struct mail_index_transaction *t, uint32_t ext_id,
+ uint32_t reset_id, bool clear_data);
+/* Like mail_index_ext_reset(), but increase extension's reset_id atomically
+ when the transaction is being committed. If prev_reset_id doesn't match the
+ latest reset_id, the reset_id isn't increased and all extension changes are
+ ignored. */
+void mail_index_ext_reset_inc(struct mail_index_transaction *t, uint32_t ext_id,
+ uint32_t prev_reset_id, bool clear_data);
+/* Discard existing extension updates in this transaction and write new updates
+ using the given reset_id. The difference to mail_index_ext_reset() is that
+ this doesn't clear any existing record or header data. */
+void mail_index_ext_set_reset_id(struct mail_index_transaction *t,
+ uint32_t ext_id, uint32_t reset_id);
+/* Get the current reset_id for given extension. Returns TRUE if it exists. */
+bool mail_index_ext_get_reset_id(struct mail_index_view *view,
+ struct mail_index_map *map,
+ uint32_t ext_id, uint32_t *reset_id_r);
+
+/* Returns extension header. */
+void mail_index_get_header_ext(struct mail_index_view *view, uint32_t ext_id,
+ const void **data_r, size_t *data_size_r);
+void mail_index_map_get_header_ext(struct mail_index_view *view,
+ struct mail_index_map *map, uint32_t ext_id,
+ const void **data_r, size_t *data_size_r);
+/* Returns the wanted extension record for given message. If it doesn't exist,
+ *data_r is set to NULL. expunged_r is TRUE if the message has already been
+ expunged from the index. */
+void mail_index_lookup_ext(struct mail_index_view *view, uint32_t seq,
+ uint32_t ext_id, const void **data_r,
+ bool *expunged_r);
+void mail_index_lookup_ext_full(struct mail_index_view *view, uint32_t seq,
+ uint32_t ext_id, struct mail_index_map **map_r,
+ const void **data_r, bool *expunged_r);
+/* Get current extension sizes. Returns 1 if ok, 0 if extension doesn't exist
+ in view. Any of the _r parameters may be NULL. */
+void mail_index_ext_get_size(struct mail_index_map *map, uint32_t ext_id,
+ uint32_t *hdr_size_r, uint16_t *record_size_r,
+ uint16_t *record_align_r);
+/* Update extension header field. */
+void mail_index_update_header_ext(struct mail_index_transaction *t,
+ uint32_t ext_id, size_t offset,
+ const void *data, size_t size);
+/* Update extension record. If old_data_r is non-NULL and the record extension
+ was already updated in this transaction, it's set to contain the data it's
+ now overwriting. */
+void mail_index_update_ext(struct mail_index_transaction *t, uint32_t seq,
+ uint32_t ext_id, const void *data, void *old_data)
+ ATTR_NULL(5);
+/* Increase/decrease number in extension atomically. Returns the sum of the
+ diffs for this seq. */
+int mail_index_atomic_inc_ext(struct mail_index_transaction *t,
+ uint32_t seq, uint32_t ext_id, int diff);
+
+#endif
diff --git a/src/lib-index/mail-transaction-log-append.c b/src/lib-index/mail-transaction-log-append.c
new file mode 100644
index 0000000..662f72a
--- /dev/null
+++ b/src/lib-index/mail-transaction-log-append.c
@@ -0,0 +1,256 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "write-full.h"
+#include "mail-index-private.h"
+#include "mail-transaction-log-private.h"
+
+void mail_transaction_log_append_add(struct mail_transaction_log_append_ctx *ctx,
+ enum mail_transaction_type type,
+ const void *data, size_t size)
+{
+ struct mail_transaction_header hdr;
+
+ i_assert((type & MAIL_TRANSACTION_TYPE_MASK) != 0);
+ i_assert((size % 4) == 0);
+
+ if (size == 0)
+ return;
+
+ i_zero(&hdr);
+ hdr.type = type | ctx->trans_flags;
+ if (type == MAIL_TRANSACTION_EXPUNGE ||
+ type == MAIL_TRANSACTION_EXPUNGE_GUID)
+ hdr.type |= MAIL_TRANSACTION_EXPUNGE_PROT;
+ if (type == MAIL_TRANSACTION_BOUNDARY)
+ hdr.type |= MAIL_TRANSACTION_EXTERNAL;
+ hdr.size = sizeof(hdr) + size;
+ hdr.size = mail_index_uint32_to_offset(hdr.size);
+
+ buffer_append(ctx->output, &hdr, sizeof(hdr));
+ buffer_append(ctx->output, data, size);
+
+ mail_transaction_update_modseq(&hdr, data, &ctx->new_highest_modseq,
+ MAIL_TRANSACTION_LOG_HDR_VERSION(&ctx->log->head->hdr));
+ ctx->transaction_count++;
+}
+
+static int
+log_buffer_move_to_memory(struct mail_transaction_log_append_ctx *ctx)
+{
+ struct mail_transaction_log_file *file = ctx->log->head;
+
+ /* first we need to truncate this latest write so that log syncing
+ doesn't break */
+ if (ftruncate(file->fd, file->sync_offset) < 0) {
+ mail_index_file_set_syscall_error(ctx->log->index,
+ file->filepath,
+ "ftruncate()");
+ }
+
+ if (mail_index_move_to_memory(ctx->log->index) < 0)
+ return -1;
+ i_assert(MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(file));
+
+ i_assert(file->buffer_offset + file->buffer->used == file->sync_offset);
+ buffer_append_buf(file->buffer, ctx->output, 0, SIZE_MAX);
+ file->sync_offset = file->buffer_offset + file->buffer->used;
+ return 0;
+}
+
+static int log_buffer_write(struct mail_transaction_log_append_ctx *ctx)
+{
+ struct mail_transaction_log_file *file = ctx->log->head;
+
+ if (ctx->output->used == 0)
+ return 0;
+
+ if (MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(file)) {
+ if (file->buffer == NULL) {
+ file->buffer = buffer_create_dynamic(default_pool, 4096);
+ file->buffer_offset = sizeof(file->hdr);
+ }
+ buffer_append_buf(file->buffer, ctx->output, 0, SIZE_MAX);
+ file->sync_offset = file->buffer_offset + file->buffer->used;
+ return 0;
+ }
+
+ if (write_full(file->fd, ctx->output->data, ctx->output->used) < 0) {
+ /* write failure, fallback to in-memory indexes. */
+ mail_index_file_set_syscall_error(ctx->log->index,
+ file->filepath,
+ "write_full()");
+ return log_buffer_move_to_memory(ctx);
+ }
+
+ i_assert(!ctx->sync_includes_this ||
+ file->sync_offset + ctx->output->used ==
+ file->max_tail_offset);
+
+ if ((ctx->want_fsync &&
+ file->log->index->set.fsync_mode != FSYNC_MODE_NEVER) ||
+ file->log->index->set.fsync_mode == FSYNC_MODE_ALWAYS) {
+ if (fdatasync(file->fd) < 0) {
+ mail_index_file_set_syscall_error(ctx->log->index,
+ file->filepath,
+ "fdatasync()");
+ return log_buffer_move_to_memory(ctx);
+ }
+ }
+
+ if (file->mmap_base == NULL && file->buffer != NULL) {
+ /* we're reading from a file. avoid re-reading the data that
+ we just wrote. this is also important for some NFS clients,
+ which for some reason sometimes can't read() this data we
+ just wrote in the same process */
+ i_assert(file->buffer_offset +
+ file->buffer->used == file->sync_offset);
+ buffer_append(file->buffer, ctx->output->data,
+ ctx->output->used);
+ }
+ file->sync_offset += ctx->output->used;
+ return 0;
+}
+
+static void
+log_append_sync_offset_if_needed(struct mail_transaction_log_append_ctx *ctx)
+{
+ struct mail_transaction_log_file *file = ctx->log->head;
+ struct mail_transaction_header_update *u;
+ struct mail_transaction_header *hdr;
+ uint32_t offset;
+ buffer_t buf;
+ unsigned char update_data[sizeof(*u) + sizeof(offset)];
+
+ if (!ctx->index_sync_transaction) {
+ /* this is a non-syncing transaction. update the tail offset
+ only if we're already writing something else to transaction
+ log anyway. */
+ i_assert(!ctx->tail_offset_changed);
+ /* FIXME: For now we never do this update, because it would
+ cause errors about shrinking tail offsets with old Dovecot
+ versions. This is anyway just an optimization, so it doesn't
+ matter all that much if we don't do it here. Finish this
+ in v2.3. */
+ /*if (ctx->output->used == 0)*/
+ return;
+ } else if (file->max_tail_offset == file->sync_offset) {
+ /* we're synced all the way to tail offset, so this sync
+ transaction can also be included in the same tail offset. */
+ if (ctx->output->used == 0 && !ctx->tail_offset_changed) {
+ /* nothing to write here after all (e.g. all unchanged
+ flag updates were dropped by export) */
+ return;
+ }
+
+ /* FIXME: when we remove exclusive log locking, we
+ can't rely on this. then write non-changed offset + check
+ real offset + rewrite the new offset if other transactions
+ weren't written in the middle */
+ file->max_tail_offset += ctx->output->used +
+ sizeof(*hdr) + sizeof(*u) + sizeof(offset);
+ ctx->sync_includes_this = TRUE;
+ } else {
+ /* This is a syncing transaction. Since we're finishing a sync,
+ we may need to update the tail offset even if we don't have
+ anything else to do. */
+ }
+ offset = file->max_tail_offset;
+
+ if (file->last_read_hdr_tail_offset == offset)
+ return;
+ i_assert(offset > file->last_read_hdr_tail_offset);
+
+ buffer_create_from_data(&buf, update_data, sizeof(update_data));
+ u = buffer_append_space_unsafe(&buf, sizeof(*u));
+ u->offset = offsetof(struct mail_index_header, log_file_tail_offset);
+ u->size = sizeof(offset);
+ buffer_append(&buf, &offset, sizeof(offset));
+
+ mail_transaction_log_append_add(ctx, MAIL_TRANSACTION_HEADER_UPDATE,
+ buf.data, buf.used);
+}
+
+static int
+mail_transaction_log_append_locked(struct mail_transaction_log_append_ctx *ctx)
+{
+ struct mail_transaction_log_file *file = ctx->log->head;
+ struct mail_transaction_boundary *boundary;
+
+ if (file->sync_offset < file->last_size) {
+ /* there is some garbage at the end of the transaction log
+ (eg. previous write failed). remove it so reader doesn't
+ break because of it. */
+ buffer_set_used_size(file->buffer,
+ file->sync_offset - file->buffer_offset);
+ if (!MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(file)) {
+ if (ftruncate(file->fd, file->sync_offset) < 0) {
+ mail_index_file_set_syscall_error(ctx->log->index,
+ file->filepath, "ftruncate()");
+ }
+ }
+ }
+
+ /* don't include log_file_tail_offset update in the transaction */
+ boundary = buffer_get_space_unsafe(ctx->output,
+ sizeof(struct mail_transaction_header),
+ sizeof(*boundary));
+ boundary->size = ctx->output->used;
+
+ if (ctx->transaction_count <= 2) {
+ /* 0-1 changes. don't bother with the boundary */
+ unsigned int boundary_size =
+ sizeof(struct mail_transaction_header) +
+ sizeof(*boundary);
+
+ buffer_delete(ctx->output, 0, boundary_size);
+ }
+
+ log_append_sync_offset_if_needed(ctx);
+ if (log_buffer_write(ctx) < 0)
+ return -1;
+ file->sync_highest_modseq = ctx->new_highest_modseq;
+ return 0;
+}
+
+int mail_transaction_log_append_begin(struct mail_index *index,
+ enum mail_transaction_type flags,
+ struct mail_transaction_log_append_ctx **ctx_r)
+{
+ struct mail_transaction_log_append_ctx *ctx;
+ struct mail_transaction_boundary boundary;
+
+ if (!index->log_sync_locked) {
+ if (mail_transaction_log_lock_head(index->log, "appending") < 0)
+ return -1;
+ }
+ ctx = i_new(struct mail_transaction_log_append_ctx, 1);
+ ctx->log = index->log;
+ ctx->output = buffer_create_dynamic(default_pool, 1024);
+ ctx->trans_flags = flags;
+
+ i_zero(&boundary);
+ mail_transaction_log_append_add(ctx, MAIL_TRANSACTION_BOUNDARY,
+ &boundary, sizeof(boundary));
+
+ *ctx_r = ctx;
+ return 0;
+}
+
+int mail_transaction_log_append_commit(struct mail_transaction_log_append_ctx **_ctx)
+{
+ struct mail_transaction_log_append_ctx *ctx = *_ctx;
+ struct mail_index *index = ctx->log->index;
+ int ret = 0;
+
+ *_ctx = NULL;
+
+ ret = mail_transaction_log_append_locked(ctx);
+ if (!index->log_sync_locked)
+ mail_transaction_log_file_unlock(index->log->head, "appending");
+
+ buffer_free(&ctx->output);
+ i_free(ctx);
+ return ret;
+}
diff --git a/src/lib-index/mail-transaction-log-file.c b/src/lib-index/mail-transaction-log-file.c
new file mode 100644
index 0000000..1820169
--- /dev/null
+++ b/src/lib-index/mail-transaction-log-file.c
@@ -0,0 +1,1685 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "ioloop.h"
+#include "file-dotlock.h"
+#include "nfs-workarounds.h"
+#include "read-full.h"
+#include "write-full.h"
+#include "mmap-util.h"
+#include "mail-index-private.h"
+#include "mail-index-modseq.h"
+#include "mail-transaction-log-private.h"
+
+#define LOG_PREFETCH IO_BLOCK_SIZE
+#define MEMORY_LOG_NAME "(in-memory transaction log file)"
+#define LOG_NEW_DOTLOCK_SUFFIX ".newlock"
+
+static int
+mail_transaction_log_file_sync(struct mail_transaction_log_file *file,
+ bool *retry_r, const char **reason_r);
+
+static void
+log_file_set_syscall_error(struct mail_transaction_log_file *file,
+ const char *function)
+{
+ mail_index_file_set_syscall_error(file->log->index,
+ file->filepath, function);
+}
+
+static void
+mail_transaction_log_mark_corrupted(struct mail_transaction_log_file *file)
+{
+ unsigned int offset =
+ offsetof(struct mail_transaction_log_header, indexid);
+ int flags;
+
+ if (MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(file) ||
+ file->log->index->readonly)
+ return;
+
+ /* indexid=0 marks the log file as corrupted. we opened the file with
+ O_APPEND, and now we need to drop it for pwrite() to work (at least
+ in Linux) */
+ flags = fcntl(file->fd, F_GETFL, 0);
+ if (flags < 0) {
+ mail_index_file_set_syscall_error(file->log->index,
+ file->filepath, "fcntl(F_GETFL)");
+ return;
+ }
+ if (fcntl(file->fd, F_SETFL, flags & ~O_APPEND) < 0) {
+ mail_index_file_set_syscall_error(file->log->index,
+ file->filepath, "fcntl(F_SETFL)");
+ return;
+ }
+ if (pwrite_full(file->fd, &file->hdr.indexid,
+ sizeof(file->hdr.indexid), offset) < 0) {
+ mail_index_file_set_syscall_error(file->log->index,
+ file->filepath, "pwrite()");
+ }
+}
+
+void
+mail_transaction_log_file_set_corrupted(struct mail_transaction_log_file *file,
+ const char *fmt, ...)
+{
+ va_list va;
+
+ file->corrupted = TRUE;
+ file->hdr.indexid = 0;
+ mail_transaction_log_mark_corrupted(file);
+
+ va_start(va, fmt);
+ T_BEGIN {
+ mail_index_set_error(file->log->index,
+ "Corrupted transaction log file %s seq %u: %s "
+ "(sync_offset=%"PRIuUOFF_T")",
+ file->filepath, file->hdr.file_seq,
+ t_strdup_vprintf(fmt, va), file->sync_offset);
+ } T_END;
+ va_end(va);
+}
+
+struct mail_transaction_log_file *
+mail_transaction_log_file_alloc(struct mail_transaction_log *log,
+ const char *path)
+{
+ struct mail_transaction_log_file *file;
+
+ file = i_new(struct mail_transaction_log_file, 1);
+ file->log = log;
+ file->filepath = i_strdup(path);
+ file->fd = -1;
+ return file;
+}
+
+void mail_transaction_log_file_free(struct mail_transaction_log_file **_file)
+{
+ struct mail_transaction_log_file *file = *_file;
+ struct mail_transaction_log_file **p;
+ int old_errno = errno;
+
+ *_file = NULL;
+
+ i_assert(!file->locked);
+ i_assert(file->refcount == 0);
+
+ for (p = &file->log->files; *p != NULL; p = &(*p)->next) {
+ if (*p == file) {
+ *p = file->next;
+ break;
+ }
+ }
+
+ if (file == file->log->head)
+ file->log->head = NULL;
+
+ buffer_free(&file->buffer);
+
+ if (file->mmap_base != NULL) {
+ if (munmap(file->mmap_base, file->mmap_size) < 0)
+ log_file_set_syscall_error(file, "munmap()");
+ }
+
+ if (file->fd != -1) {
+ if (close(file->fd) < 0)
+ log_file_set_syscall_error(file, "close()");
+ }
+
+ i_free(file->filepath);
+ i_free(file->need_rotate);
+ i_free(file);
+
+ errno = old_errno;
+}
+
+static void
+mail_transaction_log_file_skip_to_head(struct mail_transaction_log_file *file)
+{
+ struct mail_transaction_log *log = file->log;
+ struct mail_index_map *map = log->index->map;
+ const struct mail_index_modseq_header *modseq_hdr;
+ uoff_t head_offset;
+
+ if (map == NULL || file->hdr.file_seq != map->hdr.log_file_seq ||
+ map->hdr.log_file_head_offset == 0)
+ return;
+
+ /* we can get a valid log offset from index file. initialize
+ sync_offset from it so we don't have to read the whole log
+ file from beginning. */
+ head_offset = map->hdr.log_file_head_offset;
+
+ modseq_hdr = mail_index_map_get_modseq_header(map);
+ if (head_offset < file->hdr.hdr_size) {
+ mail_index_set_error(log->index,
+ "%s: log_file_head_offset too small",
+ log->index->filepath);
+ file->sync_offset = file->hdr.hdr_size;
+ file->sync_highest_modseq = file->hdr.initial_modseq;
+ } else if (modseq_hdr == NULL && file->hdr.initial_modseq == 0) {
+ /* modseqs not used yet */
+ file->sync_offset = head_offset;
+ file->sync_highest_modseq = 0;
+ } else if (modseq_hdr == NULL ||
+ modseq_hdr->log_seq != file->hdr.file_seq) {
+ /* highest_modseq not synced, start from beginning */
+ file->sync_offset = file->hdr.hdr_size;
+ file->sync_highest_modseq = file->hdr.initial_modseq;
+ } else if (modseq_hdr->log_offset > head_offset) {
+ mail_index_set_error(log->index,
+ "%s: modseq_hdr.log_offset too large",
+ log->index->filepath);
+ file->sync_offset = file->hdr.hdr_size;
+ file->sync_highest_modseq = file->hdr.initial_modseq;
+ } else {
+ /* start from where we last stopped tracking modseqs */
+ file->sync_offset = modseq_hdr->log_offset;
+ file->sync_highest_modseq = modseq_hdr->highest_modseq;
+ }
+ if (file->hdr.file_seq == log->index->map->hdr.log_file_seq) {
+ file->last_read_hdr_tail_offset =
+ log->index->map->hdr.log_file_tail_offset;
+ }
+ if (file->last_read_hdr_tail_offset > file->max_tail_offset)
+ file->max_tail_offset = file->last_read_hdr_tail_offset;
+}
+
+static void
+mail_transaction_log_file_add_to_list(struct mail_transaction_log_file *file)
+{
+ struct mail_transaction_log_file **p;
+ const char *reason;
+ bool retry;
+
+ file->sync_offset = file->hdr.hdr_size;
+ file->sync_highest_modseq = file->hdr.initial_modseq;
+ mail_transaction_log_file_skip_to_head(file);
+
+ /* insert it to correct position */
+ for (p = &file->log->files; *p != NULL; p = &(*p)->next) {
+ if ((*p)->hdr.file_seq > file->hdr.file_seq)
+ break;
+ i_assert((*p)->hdr.file_seq < file->hdr.file_seq);
+ }
+
+ file->next = *p;
+ *p = file;
+
+ if (file->buffer != NULL) {
+ /* if we read any unfinished data, make sure the buffer gets
+ truncated. */
+ (void)mail_transaction_log_file_sync(file, &retry, &reason);
+ buffer_set_used_size(file->buffer,
+ file->sync_offset - file->buffer_offset);
+ }
+}
+
+static int
+mail_transaction_log_init_hdr(struct mail_transaction_log *log,
+ struct mail_transaction_log_header *hdr)
+{
+ struct mail_index *index = log->index;
+ struct mail_transaction_log_file *file;
+
+ i_assert(index->indexid != 0);
+
+ i_zero(hdr);
+ hdr->major_version = MAIL_TRANSACTION_LOG_MAJOR_VERSION;
+ hdr->minor_version = MAIL_TRANSACTION_LOG_MINOR_VERSION;
+ hdr->hdr_size = sizeof(struct mail_transaction_log_header);
+ hdr->indexid = log->index->indexid;
+ hdr->create_stamp = ioloop_time;
+#ifndef WORDS_BIGENDIAN
+ hdr->compat_flags |= MAIL_INDEX_COMPAT_LITTLE_ENDIAN;
+#endif
+
+ if (index->fd != -1) {
+ /* not creating index - make sure we have latest header */
+ if (!index->mapping) {
+ if (mail_index_map(index,
+ MAIL_INDEX_SYNC_HANDLER_HEAD) <= 0)
+ return -1;
+ } else {
+ /* if we got here from mapping, the .log file is
+ corrupted. use whatever values we got from index
+ file */
+ }
+ }
+ if (index->map != NULL) {
+ hdr->prev_file_seq = index->map->hdr.log_file_seq;
+ hdr->prev_file_offset = index->map->hdr.log_file_head_offset;
+ hdr->file_seq = index->map->hdr.log_file_seq + 1;
+ hdr->initial_modseq =
+ mail_index_map_modseq_get_highest(index->map);
+ } else {
+ hdr->file_seq = 1;
+ }
+ if (hdr->initial_modseq == 0) {
+ /* modseq tracking in log files is required for many reasons
+ nowadays, even if per-message modseqs aren't enabled in
+ dovecot.index. */
+ hdr->initial_modseq = 1;
+ }
+
+ if (log->head != NULL) {
+ /* make sure the sequence always increases to avoid crashes
+ later. this catches the buggy case where two processes
+ happen to replace the same log file. */
+ for (file = log->head->next; file != NULL; file = file->next) {
+ if (hdr->file_seq <= file->hdr.file_seq)
+ hdr->file_seq = file->hdr.file_seq + 1;
+ }
+
+ if (hdr->file_seq <= log->head->hdr.file_seq) {
+ /* make sure the sequence grows */
+ hdr->file_seq = log->head->hdr.file_seq+1;
+ }
+ if (hdr->initial_modseq < log->head->sync_highest_modseq) {
+ /* this should be always up-to-date */
+ hdr->initial_modseq = log->head->sync_highest_modseq;
+ }
+ }
+ return 0;
+}
+
+struct mail_transaction_log_file *
+mail_transaction_log_file_alloc_in_memory(struct mail_transaction_log *log)
+{
+ struct mail_transaction_log_file *file;
+
+ file = mail_transaction_log_file_alloc(log, MEMORY_LOG_NAME);
+ if (mail_transaction_log_init_hdr(log, &file->hdr) < 0) {
+ i_free(file);
+ return NULL;
+ }
+
+ file->buffer = buffer_create_dynamic(default_pool, 4096);
+ file->buffer_offset = sizeof(file->hdr);
+
+ mail_transaction_log_file_add_to_list(file);
+ return file;
+}
+
+static int
+mail_transaction_log_file_dotlock(struct mail_transaction_log_file *file)
+{
+ struct dotlock_settings dotlock_set;
+ int ret;
+
+ if (file->log->dotlock_refcount > 0)
+ ret = 1;
+ else {
+ i_assert(file->log->dotlock_refcount == 0);
+ mail_transaction_log_get_dotlock_set(file->log, &dotlock_set);
+ ret = file_dotlock_create(&dotlock_set, file->filepath, 0,
+ &file->log->dotlock);
+ }
+ if (ret > 0) {
+ file->log->dotlock_refcount++;
+ file->locked = TRUE;
+ file->lock_create_time = time(NULL);
+ return 0;
+ }
+ if (ret < 0) {
+ log_file_set_syscall_error(file, "file_dotlock_create()");
+ return -1;
+ }
+
+ mail_index_set_error(file->log->index,
+ "Timeout (%us) while waiting for "
+ "dotlock for transaction log file %s",
+ dotlock_set.timeout, file->filepath);
+ return -1;
+}
+
+static int
+mail_transaction_log_file_undotlock(struct mail_transaction_log_file *file)
+{
+ int ret;
+
+ i_assert(file->log->dotlock_refcount >= 0);
+ if (--file->log->dotlock_refcount > 0)
+ return 0;
+
+ ret = file_dotlock_delete(&file->log->dotlock);
+ if (ret < 0) {
+ log_file_set_syscall_error(file, "file_dotlock_delete()");
+ return -1;
+ }
+
+ if (ret == 0) {
+ mail_index_set_error(file->log->index,
+ "Dotlock was lost for transaction log file %s",
+ file->filepath);
+ return -1;
+ }
+ return 0;
+}
+
+int mail_transaction_log_file_lock(struct mail_transaction_log_file *file)
+{
+ unsigned int lock_timeout_secs;
+ int ret;
+
+ if (file->locked)
+ return 0;
+
+ if (MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(file)) {
+ file->locked = TRUE;
+ return 0;
+ }
+
+ if (file->log->index->set.lock_method == FILE_LOCK_METHOD_DOTLOCK)
+ return mail_transaction_log_file_dotlock(file);
+
+ if (file->log->index->readonly) {
+ mail_index_set_error(file->log->index,
+ "Index is read-only, can't write-lock %s",
+ file->filepath);
+ return -1;
+ }
+
+ i_assert(file->file_lock == NULL);
+ lock_timeout_secs = I_MIN(MAIL_TRANSACTION_LOG_LOCK_TIMEOUT,
+ file->log->index->set.max_lock_timeout_secs);
+ ret = mail_index_lock_fd(file->log->index, file->filepath, file->fd,
+ F_WRLCK, lock_timeout_secs,
+ &file->file_lock);
+ if (ret > 0) {
+ file->locked = TRUE;
+ file->lock_create_time = time(NULL);
+ return 0;
+ }
+ if (ret < 0) {
+ log_file_set_syscall_error(file, "mail_index_wait_lock_fd()");
+ return -1;
+ }
+
+ mail_index_set_error(file->log->index,
+ "Timeout (%us) while waiting for lock for "
+ "transaction log file %s%s",
+ lock_timeout_secs, file->filepath,
+ file_lock_find(file->fd, file->log->index->set.lock_method, F_WRLCK));
+ return -1;
+}
+
+void mail_transaction_log_file_unlock(struct mail_transaction_log_file *file,
+ const char *lock_reason)
+{
+ unsigned int lock_time;
+
+ if (!file->locked)
+ return;
+
+ file->locked = FALSE;
+ file->locked_sync_offset_updated = FALSE;
+
+ if (MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(file))
+ return;
+
+ lock_time = time(NULL) - file->lock_create_time;
+ if (lock_time >= MAIL_TRANSACTION_LOG_LOCK_WARN_SECS && lock_reason != NULL) {
+ i_warning("Transaction log file %s was locked for %u seconds (%s)",
+ file->filepath, lock_time, lock_reason);
+ }
+
+ if (file->log->index->set.lock_method == FILE_LOCK_METHOD_DOTLOCK) {
+ (void)mail_transaction_log_file_undotlock(file);
+ return;
+ }
+
+ file_unlock(&file->file_lock);
+}
+
+static ssize_t
+mail_transaction_log_file_read_header(struct mail_transaction_log_file *file)
+{
+ void *dest;
+ size_t pos, dest_size;
+ ssize_t ret;
+
+ i_assert(file->buffer == NULL && file->mmap_base == NULL);
+
+ i_zero(&file->hdr);
+ if (file->last_size < mmap_get_page_size() && file->last_size > 0) {
+ /* just read the entire transaction log to memory.
+ note that if some of the data hasn't been fully committed
+ yet (hdr.size=0), the buffer must be truncated later */
+ file->buffer = buffer_create_dynamic(default_pool, 4096);
+ file->buffer_offset = 0;
+ dest_size = file->last_size;
+ dest = buffer_append_space_unsafe(file->buffer, dest_size);
+ } else {
+ /* read only the header */
+ dest = &file->hdr;
+ dest_size = sizeof(file->hdr);
+ }
+
+ /* it's not necessarily an error to read less than wanted header size,
+ since older versions of the log format used smaller headers. */
+ pos = 0;
+ do {
+ ret = pread(file->fd, PTR_OFFSET(dest, pos),
+ dest_size - pos, pos);
+ if (ret > 0)
+ pos += ret;
+ } while (ret > 0 && pos < dest_size);
+
+ if (file->buffer != NULL) {
+ buffer_set_used_size(file->buffer, pos);
+ memcpy(&file->hdr, file->buffer->data,
+ I_MIN(pos, sizeof(file->hdr)));
+ }
+
+ return ret < 0 ? -1 : (ssize_t)pos;
+}
+
+static int
+mail_transaction_log_file_fail_dupe(struct mail_transaction_log_file *file)
+{
+ int ret;
+
+ /* mark the old file corrupted. we can't safely remove
+ it from the list however, so return failure. */
+ file->hdr.indexid = 0;
+ if (strcmp(file->filepath, file->log->head->filepath) != 0) {
+ /* only mark .2 corrupted, just to make sure we don't lose any
+ changes from .log in case we're somehow wrong */
+ mail_transaction_log_mark_corrupted(file);
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+ if (!file->corrupted) {
+ file->corrupted = TRUE;
+ mail_index_set_error(file->log->index,
+ "Transaction log %s: "
+ "duplicate transaction log sequence (%u)",
+ file->filepath, file->hdr.file_seq);
+ }
+ return ret;
+}
+
+static int
+mail_transaction_log_file_read_hdr(struct mail_transaction_log_file *file,
+ bool ignore_estale)
+{
+ struct mail_transaction_log_file *f;
+ int ret;
+
+ i_assert(!MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(file));
+
+ if (file->corrupted)
+ return 0;
+
+ ret = mail_transaction_log_file_read_header(file);
+ if (ret < 0) {
+ if (errno != ESTALE || !ignore_estale)
+ log_file_set_syscall_error(file, "pread()");
+ return -1;
+ }
+ if (file->hdr.major_version != MAIL_TRANSACTION_LOG_MAJOR_VERSION) {
+ /* incompatible version - fix silently */
+ return 0;
+ }
+ if (ret < MAIL_TRANSACTION_LOG_HEADER_MIN_SIZE) {
+ mail_transaction_log_file_set_corrupted(file,
+ "unexpected end of file while reading header");
+ return 0;
+ }
+
+ const unsigned int hdr_version =
+ MAIL_TRANSACTION_LOG_HDR_VERSION(&file->hdr);
+ if (MAIL_TRANSACTION_LOG_VERSION_HAVE(hdr_version, COMPAT_FLAGS)) {
+ /* we have compatibility flags */
+ enum mail_index_header_compat_flags compat_flags = 0;
+
+#ifndef WORDS_BIGENDIAN
+ compat_flags |= MAIL_INDEX_COMPAT_LITTLE_ENDIAN;
+#endif
+ if (file->hdr.compat_flags != compat_flags) {
+ /* architecture change */
+ mail_index_set_error(file->log->index,
+ "Rebuilding index file %s: "
+ "CPU architecture changed",
+ file->log->index->filepath);
+ return 0;
+ }
+ }
+ if (file->hdr.hdr_size < MAIL_TRANSACTION_LOG_HEADER_MIN_SIZE) {
+ mail_transaction_log_file_set_corrupted(file,
+ "Header size too small");
+ return 0;
+ }
+ if (file->hdr.hdr_size < sizeof(file->hdr)) {
+ /* @UNSAFE: smaller than we expected - zero out the fields we
+ shouldn't have filled */
+ memset(PTR_OFFSET(&file->hdr, file->hdr.hdr_size), 0,
+ sizeof(file->hdr) - file->hdr.hdr_size);
+ }
+
+ if (file->hdr.indexid == 0) {
+ /* corrupted */
+ file->corrupted = TRUE;
+ mail_index_set_error(file->log->index,
+ "Transaction log file %s: marked corrupted",
+ file->filepath);
+ return 0;
+ }
+ if (file->hdr.indexid != file->log->index->indexid) {
+ if (file->log->index->indexid != 0 &&
+ !file->log->index->initial_create) {
+ /* index file was probably just rebuilt and we don't
+ know about it yet */
+ mail_transaction_log_file_set_corrupted(file,
+ "indexid changed: %u -> %u",
+ file->log->index->indexid, file->hdr.indexid);
+ return 0;
+ }
+
+ /* creating index file. since transaction log is created
+ first, use the indexid in it to create the main index
+ to avoid races. */
+ file->log->index->indexid = file->hdr.indexid;
+ }
+
+ /* make sure we already don't have a file with the same sequence
+ opened. it shouldn't happen unless the old log file was
+ corrupted. */
+ for (f = file->log->files; f != NULL; f = f->next) {
+ if (f->hdr.file_seq == file->hdr.file_seq) {
+ if (strcmp(f->filepath, f->log->head->filepath) != 0) {
+ /* old "f" is the .log.2 */
+ return mail_transaction_log_file_fail_dupe(f);
+ } else {
+ /* new "file" is probably the .log.2 */
+ return mail_transaction_log_file_fail_dupe(file);
+ }
+ }
+ }
+
+ file->sync_highest_modseq = file->hdr.initial_modseq;
+ return 1;
+}
+
+static int
+mail_transaction_log_file_stat(struct mail_transaction_log_file *file,
+ bool ignore_estale)
+{
+ struct stat st;
+
+ if (fstat(file->fd, &st) < 0) {
+ if (!ESTALE_FSTAT(errno) || !ignore_estale)
+ log_file_set_syscall_error(file, "fstat()");
+ return -1;
+ }
+
+ file->st_dev = st.st_dev;
+ file->st_ino = st.st_ino;
+ file->last_mtime = st.st_mtime;
+ file->last_size = st.st_size;
+ return 0;
+}
+
+static bool
+mail_transaction_log_file_is_dupe(struct mail_transaction_log_file *file)
+{
+ struct mail_transaction_log_file *tmp;
+
+ for (tmp = file->log->files; tmp != NULL; tmp = tmp->next) {
+ if (tmp->st_ino == file->st_ino &&
+ CMP_DEV_T(tmp->st_dev, file->st_dev))
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static void log_write_ext_hdr_init_data(struct mail_index *index, buffer_t *buf)
+{
+ const struct mail_index_registered_ext *rext;
+ struct mail_transaction_header *hdr;
+ struct mail_transaction_ext_intro *intro;
+ struct mail_transaction_ext_hdr_update *ext_hdr;
+ unsigned int hdr_offset;
+
+ rext = array_idx(&index->extensions, index->set.ext_hdr_init_id);
+
+ /* introduce the extension */
+ hdr_offset = buf->used;
+ hdr = buffer_append_space_unsafe(buf, sizeof(*hdr));
+ hdr->type = MAIL_TRANSACTION_EXT_INTRO;
+
+ intro = buffer_append_space_unsafe(buf, sizeof(*intro));
+ intro->ext_id = (uint32_t)-1;
+ intro->hdr_size = rext->hdr_size;
+ intro->record_size = rext->record_size;
+ intro->record_align = rext->record_align;
+ intro->name_size = strlen(rext->name);
+ buffer_append(buf, rext->name, intro->name_size);
+ if (buf->used % 4 != 0)
+ buffer_append_zero(buf, 4 - buf->used % 4);
+
+ hdr = buffer_get_space_unsafe(buf, hdr_offset, sizeof(*hdr));
+ hdr->size = mail_index_uint32_to_offset(buf->used - hdr_offset);
+
+ /* add the extension header data */
+ hdr_offset = buf->used;
+ hdr = buffer_append_space_unsafe(buf, sizeof(*hdr));
+ hdr->type = MAIL_TRANSACTION_EXT_HDR_UPDATE;
+
+ ext_hdr = buffer_append_space_unsafe(buf, sizeof(*ext_hdr));
+ ext_hdr->size = rext->hdr_size;
+ buffer_append(buf, index->set.ext_hdr_init_data, rext->hdr_size);
+
+ hdr = buffer_get_space_unsafe(buf, hdr_offset, sizeof(*hdr));
+ hdr->size = mail_index_uint32_to_offset(buf->used - hdr_offset);
+}
+
+static int
+mail_transaction_log_file_create2(struct mail_transaction_log_file *file,
+ int new_fd, bool reset,
+ struct dotlock **dotlock)
+{
+ struct mail_index *index = file->log->index;
+ struct stat st;
+ const char *path2;
+ buffer_t *writebuf;
+ int fd, ret;
+ bool rename_existing, need_lock;
+
+ need_lock = file->log->head != NULL && file->log->head->locked;
+
+ if (fcntl(new_fd, F_SETFL, O_APPEND) < 0) {
+ log_file_set_syscall_error(file, "fcntl(O_APPEND)");
+ return -1;
+ }
+
+ if ((index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0) {
+ /* although we check also mtime and file size below, it's done
+ only to fix broken log files. we don't bother flushing
+ attribute cache just for that. */
+ nfs_flush_file_handle_cache(file->filepath);
+ }
+
+ /* log creation is locked now - see if someone already created it.
+ note that if we're rotating, we need to keep the log locked until
+ the file has been rewritten. and because fcntl() locks are stupid,
+ if we go and open()+close() the file and we had it already opened,
+ its locks are lost. so we use stat() to check if the file has been
+ recreated, although it almost never is. */
+ if (reset)
+ rename_existing = FALSE;
+ else if (nfs_safe_stat(file->filepath, &st) < 0) {
+ if (errno != ENOENT) {
+ log_file_set_syscall_error(file, "stat()");
+ return -1;
+ }
+ rename_existing = FALSE;
+ } else if (st.st_ino == file->st_ino &&
+ CMP_DEV_T(st.st_dev, file->st_dev) &&
+ /* inode/dev checks are enough when we're rotating the file,
+ but not when we're replacing a broken log file */
+ st.st_mtime == file->last_mtime &&
+ (uoff_t)st.st_size == file->last_size) {
+ /* no-one else recreated the file */
+ rename_existing = TRUE;
+ } else {
+ /* recreated. use the file if its header is ok */
+ fd = nfs_safe_open(file->filepath, O_RDWR | O_APPEND);
+ if (fd == -1) {
+ if (errno != ENOENT) {
+ log_file_set_syscall_error(file, "open()");
+ return -1;
+ }
+ } else {
+ file->fd = fd;
+ file->last_size = 0;
+ if (mail_transaction_log_file_read_hdr(file,
+ FALSE) > 0 &&
+ mail_transaction_log_file_stat(file, FALSE) == 0) {
+ /* yes, it was ok */
+ file_dotlock_delete(dotlock);
+ mail_transaction_log_file_add_to_list(file);
+ return 0;
+ }
+ file->fd = -1;
+ if (close(fd) < 0)
+ log_file_set_syscall_error(file, "close()");
+ }
+ rename_existing = FALSE;
+ }
+
+ if (index->fd == -1 && !rename_existing) {
+ /* creating the initial index */
+ reset = TRUE;
+ }
+
+ if (mail_transaction_log_init_hdr(file->log, &file->hdr) < 0)
+ return -1;
+
+ if (reset) {
+ /* don't reset modseqs. if we're reseting due to rebuilding
+ indexes we'll probably want to keep uidvalidity and in such
+ cases we really don't want to shrink modseqs. */
+ file->hdr.prev_file_seq = 0;
+ file->hdr.prev_file_offset = 0;
+ }
+
+ writebuf = t_buffer_create(128);
+ buffer_append(writebuf, &file->hdr, sizeof(file->hdr));
+
+ if (index->set.ext_hdr_init_data != NULL && reset)
+ log_write_ext_hdr_init_data(index, writebuf);
+ if (write_full(new_fd, writebuf->data, writebuf->used) < 0) {
+ log_file_set_syscall_error(file, "write_full()");
+ return -1;
+ }
+
+ if (file->log->index->set.fsync_mode == FSYNC_MODE_ALWAYS) {
+ /* the header isn't important, so don't bother calling
+ fdatasync() unless it's required */
+ if (fdatasync(new_fd) < 0) {
+ log_file_set_syscall_error(file, "fdatasync()");
+ return -1;
+ }
+ }
+
+ file->fd = new_fd;
+ ret = mail_transaction_log_file_stat(file, FALSE);
+
+ if (need_lock && ret == 0) {
+ /* we'll need to preserve the lock */
+ if (mail_transaction_log_file_lock(file) < 0)
+ ret = -1;
+ }
+
+ /* if we return -1 the dotlock deletion code closes the fd */
+ file->fd = -1;
+ if (ret < 0)
+ return -1;
+
+ /* keep two log files */
+ if (rename_existing) {
+ /* rename() would be nice and easy way to do this, except then
+ there's a race condition between the rename and
+ file_dotlock_replace(). during that time the log file
+ doesn't exist, which could cause problems. */
+ path2 = t_strconcat(file->filepath, ".2", NULL);
+ if (i_unlink_if_exists(path2) < 0) {
+ /* try to link() anyway */
+ }
+ if (nfs_safe_link(file->filepath, path2, FALSE) < 0 &&
+ errno != ENOENT && errno != EEXIST) {
+ mail_index_set_error(index, "link(%s, %s) failed: %m",
+ file->filepath, path2);
+ /* ignore the error. we don't care that much about the
+ second log file and we're going to overwrite this
+ first one. */
+ }
+ /* NOTE: here's a race condition where both .log and .log.2
+ point to the same file. our reading code should ignore that
+ though by comparing the inodes. */
+ }
+
+ if (file_dotlock_replace(dotlock,
+ DOTLOCK_REPLACE_FLAG_DONT_CLOSE_FD) <= 0) {
+ /* need to unlock to avoid assert-crash in
+ mail_transaction_log_file_free() */
+ mail_transaction_log_file_unlock(file, "creation failed");
+ return -1;
+ }
+
+ /* success */
+ file->fd = new_fd;
+ mail_transaction_log_file_add_to_list(file);
+
+ i_assert(!need_lock || file->locked);
+ return 1;
+}
+
+int mail_transaction_log_file_create(struct mail_transaction_log_file *file,
+ bool reset)
+{
+ struct mail_index *index = file->log->index;
+ struct dotlock_settings new_dotlock_set;
+ struct dotlock *dotlock;
+ mode_t old_mask;
+ int fd, ret;
+
+ i_assert(!MAIL_INDEX_IS_IN_MEMORY(index));
+
+ if (file->log->index->readonly) {
+ mail_index_set_error(index,
+ "Can't create log file %s: Index is read-only",
+ file->filepath);
+ return -1;
+ }
+
+ if (index->indexid == 0) {
+ mail_index_set_error(index,
+ "Can't create log file %s: Index is marked corrupted",
+ file->filepath);
+ return -1;
+ }
+
+ mail_transaction_log_get_dotlock_set(file->log, &new_dotlock_set);
+ new_dotlock_set.lock_suffix = LOG_NEW_DOTLOCK_SUFFIX;
+
+ /* With dotlocking we might already have path.lock created, so this
+ filename has to be different. */
+ old_mask = umask(index->set.mode ^ 0666);
+ fd = file_dotlock_open(&new_dotlock_set, file->filepath, 0, &dotlock);
+ umask(old_mask);
+
+ if (fd == -1) {
+ log_file_set_syscall_error(file, "file_dotlock_open()");
+ return -1;
+ }
+ mail_index_fchown(index, fd, file_dotlock_get_lock_path(dotlock));
+
+ /* either fd gets used or the dotlock gets deleted and returned fd
+ is for the existing file */
+ ret = mail_transaction_log_file_create2(file, fd, reset, &dotlock);
+ if (ret < 0) {
+ if (dotlock != NULL)
+ file_dotlock_delete(&dotlock);
+ return -1;
+ }
+ return ret;
+}
+
+int mail_transaction_log_file_open(struct mail_transaction_log_file *file,
+ const char **reason_r)
+{
+ struct mail_index *index = file->log->index;
+ unsigned int i;
+ bool ignore_estale;
+ int ret;
+
+ for (i = 0;; i++) {
+ if (!index->readonly) {
+ file->fd = nfs_safe_open(file->filepath,
+ O_RDWR | O_APPEND);
+ } else {
+ file->fd = nfs_safe_open(file->filepath, O_RDONLY);
+ }
+ if (file->fd == -1 && errno == EACCES) {
+ file->fd = nfs_safe_open(file->filepath, O_RDONLY);
+ index->readonly = TRUE;
+ }
+ if (file->fd == -1) {
+ if (errno == ENOENT) {
+ *reason_r = "File doesn't exist";
+ return 0;
+ }
+
+ log_file_set_syscall_error(file, "open()");
+ *reason_r = t_strdup_printf("open() failed: %m");
+ return -1;
+ }
+
+ ignore_estale = i < MAIL_INDEX_ESTALE_RETRY_COUNT;
+ if (mail_transaction_log_file_stat(file, ignore_estale) < 0)
+ ret = -1;
+ else if (mail_transaction_log_file_is_dupe(file)) {
+ /* probably our already opened .log file has been
+ renamed to .log.2 and we're trying to reopen it.
+ also possible that hit a race condition where .log
+ and .log.2 are linked. */
+ *reason_r = "File is already open";
+ return 0;
+ } else {
+ ret = mail_transaction_log_file_read_hdr(file,
+ ignore_estale);
+ }
+ if (ret > 0) {
+ /* success */
+ break;
+ }
+
+ if (ret == 0) {
+ /* corrupted */
+ if (index->readonly) {
+ /* don't delete */
+ } else {
+ i_unlink_if_exists(file->filepath);
+ }
+ *reason_r = "File is corrupted";
+ return 0;
+ }
+ if (errno != ESTALE ||
+ i == MAIL_INDEX_ESTALE_RETRY_COUNT) {
+ /* syscall error */
+ *reason_r = t_strdup_printf("fstat() failed: %m");
+ return -1;
+ }
+
+ /* ESTALE - try again */
+ buffer_free(&file->buffer);
+ }
+
+ mail_transaction_log_file_add_to_list(file);
+ return 1;
+}
+
+static int
+log_file_track_mailbox_sync_offset_hdr(struct mail_transaction_log_file *file,
+ const void *data, unsigned int trans_size,
+ const char **error_r)
+{
+ const struct mail_transaction_header_update *u = data;
+ const struct mail_index_header *ihdr;
+ const unsigned int size = trans_size - sizeof(struct mail_transaction_header);
+ const unsigned int offset_pos =
+ offsetof(struct mail_index_header, log_file_tail_offset);
+ const unsigned int offset_size = sizeof(ihdr->log_file_tail_offset);
+ uint32_t tail_offset;
+
+ i_assert(offset_size == sizeof(tail_offset));
+
+ if (size < sizeof(*u) || size < sizeof(*u) + u->size) {
+ *error_r = "header update extends beyond record size";
+ mail_transaction_log_file_set_corrupted(file, "%s", *error_r);
+ return -1;
+ }
+
+ if (u->offset <= offset_pos &&
+ u->offset + u->size >= offset_pos + offset_size) {
+ memcpy(&tail_offset,
+ CONST_PTR_OFFSET(u + 1, offset_pos - u->offset),
+ sizeof(tail_offset));
+
+ if (tail_offset < file->last_read_hdr_tail_offset) {
+ /* ignore shrinking tail offsets */
+ return 1;
+ } else if (tail_offset > file->sync_offset + trans_size) {
+ mail_transaction_log_file_set_corrupted(file,
+ "log_file_tail_offset %u goes past sync offset %"PRIuUOFF_T,
+ tail_offset, file->sync_offset + trans_size);
+ } else {
+ file->last_read_hdr_tail_offset = tail_offset;
+ if (tail_offset > file->max_tail_offset)
+ file->max_tail_offset = tail_offset;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static bool
+flag_updates_have_non_internal(const struct mail_transaction_flag_update *u,
+ unsigned int count, unsigned int version)
+{
+ /* Hide internal flags from modseqs if the log file's version
+ is new enough. This allows upgrading without the modseqs suddenly
+ shrinking. */
+ if (!MAIL_TRANSACTION_LOG_VERSION_HAVE(version, HIDE_INTERNAL_MODSEQS))
+ return TRUE;
+
+ for (unsigned int i = 0; i < count; i++) {
+ if (!MAIL_TRANSACTION_FLAG_UPDATE_IS_INTERNAL(&u[i]))
+ return TRUE;
+ }
+ return FALSE;
+}
+
+void mail_transaction_update_modseq(const struct mail_transaction_header *hdr,
+ const void *data, uint64_t *cur_modseq,
+ unsigned int version)
+{
+ uint32_t trans_size;
+
+ trans_size = mail_index_offset_to_uint32(hdr->size);
+ i_assert(trans_size != 0);
+
+ if (*cur_modseq != 0) {
+ /* tracking modseqs */
+ } else if ((hdr->type & MAIL_TRANSACTION_TYPE_MASK) ==
+ MAIL_TRANSACTION_EXT_INTRO) {
+ /* modseqs not tracked yet. see if this is a modseq
+ extension introduction. */
+ const struct mail_transaction_ext_intro *intro = data;
+ const unsigned int modseq_ext_len =
+ strlen(MAIL_INDEX_MODSEQ_EXT_NAME);
+
+ if (intro->name_size == modseq_ext_len &&
+ memcmp(intro + 1, MAIL_INDEX_MODSEQ_EXT_NAME,
+ modseq_ext_len) == 0) {
+ /* modseq tracking started */
+ *cur_modseq += 1;
+ }
+ return;
+ } else {
+ /* not tracking modseqs */
+ return;
+ }
+
+ switch (hdr->type & MAIL_TRANSACTION_TYPE_MASK) {
+ case MAIL_TRANSACTION_EXPUNGE | MAIL_TRANSACTION_EXPUNGE_PROT:
+ case MAIL_TRANSACTION_EXPUNGE_GUID | MAIL_TRANSACTION_EXPUNGE_PROT:
+ if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0) {
+ /* ignore expunge requests */
+ break;
+ }
+ /* fall through */
+ case MAIL_TRANSACTION_APPEND:
+ case MAIL_TRANSACTION_KEYWORD_UPDATE:
+ case MAIL_TRANSACTION_KEYWORD_RESET:
+ case MAIL_TRANSACTION_ATTRIBUTE_UPDATE:
+ /* these changes increase modseq */
+ *cur_modseq += 1;
+ break;
+ case MAIL_TRANSACTION_FLAG_UPDATE: {
+ const struct mail_transaction_flag_update *rec = data;
+ unsigned int count;
+
+ count = (trans_size - sizeof(*hdr)) / sizeof(*rec);
+ if (flag_updates_have_non_internal(rec, count, version))
+ *cur_modseq += 1;
+ break;
+ }
+ case MAIL_TRANSACTION_MODSEQ_UPDATE: {
+ const struct mail_transaction_modseq_update *rec, *end;
+
+ end = CONST_PTR_OFFSET(data, trans_size - sizeof(*hdr));
+ for (rec = data; rec < end; rec++) {
+ uint64_t modseq = ((uint64_t)rec->modseq_high32 << 32) |
+ rec->modseq_low32;
+ if (*cur_modseq < modseq)
+ *cur_modseq = modseq;
+ }
+ }
+ }
+}
+
+static int
+log_file_track_sync(struct mail_transaction_log_file *file,
+ const struct mail_transaction_header *hdr,
+ unsigned int trans_size, const char **error_r)
+{
+ const void *data = hdr + 1;
+ int ret;
+
+ mail_transaction_update_modseq(hdr, hdr + 1, &file->sync_highest_modseq,
+ MAIL_TRANSACTION_LOG_HDR_VERSION(&file->hdr));
+ if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0)
+ return 1;
+
+ /* external transactions: */
+ switch (hdr->type & MAIL_TRANSACTION_TYPE_MASK) {
+ case MAIL_TRANSACTION_HEADER_UPDATE:
+ /* see if this updates mailbox_sync_offset */
+ ret = log_file_track_mailbox_sync_offset_hdr(file, data,
+ trans_size, error_r);
+ if (ret != 0)
+ return ret < 0 ? -1 : 1;
+ break;
+ case MAIL_TRANSACTION_INDEX_DELETED:
+ if (file->sync_offset < file->index_undeleted_offset ||
+ file->hdr.file_seq < file->log->index->index_delete_changed_file_seq)
+ break;
+ file->log->index->index_deleted = TRUE;
+ file->log->index->index_delete_requested = FALSE;
+ file->log->index->index_delete_changed_file_seq = file->hdr.file_seq;
+ file->index_deleted_offset = file->sync_offset + trans_size;
+ break;
+ case MAIL_TRANSACTION_INDEX_UNDELETED:
+ if (file->sync_offset < file->index_deleted_offset ||
+ file->hdr.file_seq < file->log->index->index_delete_changed_file_seq)
+ break;
+ file->log->index->index_deleted = FALSE;
+ file->log->index->index_delete_requested = FALSE;
+ file->log->index->index_delete_changed_file_seq = file->hdr.file_seq;
+ file->index_undeleted_offset = file->sync_offset + trans_size;
+ break;
+ case MAIL_TRANSACTION_BOUNDARY: {
+ const struct mail_transaction_boundary *boundary =
+ (const void *)(hdr + 1);
+ size_t wanted_buffer_size;
+
+ wanted_buffer_size = file->sync_offset - file->buffer_offset +
+ boundary->size;
+ if (wanted_buffer_size > file->buffer->used) {
+ /* the full transaction hasn't been written yet */
+ return 0;
+ }
+ break;
+ }
+ }
+
+ if (file->max_tail_offset == file->sync_offset) {
+ /* external transactions aren't synced to mailbox. we can
+ update mailbox sync offset to skip this transaction to
+ avoid re-reading it at the next sync. */
+ file->max_tail_offset += trans_size;
+ }
+ return 1;
+}
+
+static int
+mail_transaction_log_file_sync(struct mail_transaction_log_file *file,
+ bool *retry_r, const char **reason_r)
+{
+ const struct mail_transaction_header *hdr;
+ const void *data;
+ struct stat st;
+ size_t size, avail;
+ uint32_t trans_size = 0;
+ int ret;
+
+ i_assert(file->sync_offset >= file->buffer_offset);
+
+ *retry_r = FALSE;
+
+ data = buffer_get_data(file->buffer, &size);
+ if (file->buffer_offset + size < file->sync_offset) {
+ *reason_r = t_strdup_printf(
+ "log file shrank (%"PRIuUOFF_T" < %"PRIuUOFF_T")",
+ file->buffer_offset + (uoff_t)size, file->sync_offset);
+ mail_transaction_log_file_set_corrupted(file, "%s", *reason_r);
+ /* fix the sync_offset to avoid crashes later on */
+ file->sync_offset = file->buffer_offset + size;
+ return 0;
+ }
+ while (file->sync_offset - file->buffer_offset + sizeof(*hdr) <= size) {
+ hdr = CONST_PTR_OFFSET(data, file->sync_offset -
+ file->buffer_offset);
+ trans_size = mail_index_offset_to_uint32(hdr->size);
+ if (trans_size == 0) {
+ /* unfinished or corrupted */
+ break;
+ }
+ if (trans_size < sizeof(*hdr)) {
+ *reason_r = t_strdup_printf(
+ "hdr.size too small (%u)", trans_size);
+ mail_transaction_log_file_set_corrupted(file, "%s", *reason_r);
+ return 0;
+ }
+
+ if (file->sync_offset - file->buffer_offset + trans_size > size)
+ break;
+
+ /* transaction has been fully written */
+ if ((ret = log_file_track_sync(file, hdr, trans_size, reason_r)) <= 0) {
+ if (ret < 0)
+ return 0;
+ break;
+ }
+
+ file->sync_offset += trans_size;
+ }
+
+ if (file->mmap_base != NULL && !file->locked) {
+ /* Now that all the mmaped pages have page faulted, check if
+ the file had changed while doing that. Only after the last
+ page has faulted, the size returned by fstat() can be
+ trusted. Otherwise it might point to a page boundary while
+ the next page is still being written.
+
+ Without this check we might see partial transactions,
+ sometimes causing "Extension record updated without intro
+ prefix" errors. */
+ if (fstat(file->fd, &st) < 0) {
+ log_file_set_syscall_error(file, "fstat()");
+ *reason_r = t_strdup_printf("fstat() failed: %m");
+ return -1;
+ }
+ if ((uoff_t)st.st_size != file->last_size) {
+ file->last_size = st.st_size;
+ *retry_r = TRUE;
+ *reason_r = "File size changed - retrying";
+ return 0;
+ }
+ }
+
+ avail = file->sync_offset - file->buffer_offset;
+ if (avail != size) {
+ /* There's more data than we could sync at the moment. If the
+ last record's size wasn't valid, we can't know if it will
+ be updated unless we've locked the log. */
+ if (file->locked) {
+ *reason_r = "Unexpected garbage at EOF";
+ mail_transaction_log_file_set_corrupted(file, "%s", *reason_r);
+ return 0;
+ }
+ /* The size field will be updated soon */
+ mail_index_flush_read_cache(file->log->index, file->filepath,
+ file->fd, file->locked);
+ }
+
+ if (file->next != NULL &&
+ file->hdr.file_seq == file->next->hdr.prev_file_seq &&
+ file->next->hdr.prev_file_offset != file->sync_offset) {
+ *reason_r = t_strdup_printf(
+ "Invalid transaction log size "
+ "(%"PRIuUOFF_T" vs %u): %s", file->sync_offset,
+ file->log->head->hdr.prev_file_offset, file->filepath);
+ mail_transaction_log_file_set_corrupted(file, "%s", *reason_r);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int
+mail_transaction_log_file_insert_read(struct mail_transaction_log_file *file,
+ uoff_t offset, const char **reason_r)
+{
+ void *data;
+ size_t size;
+ ssize_t ret;
+
+ size = file->buffer_offset - offset;
+ buffer_copy(file->buffer, size, file->buffer, 0, SIZE_MAX);
+
+ data = buffer_get_space_unsafe(file->buffer, 0, size);
+ ret = pread_full(file->fd, data, size, offset);
+ if (ret > 0) {
+ /* success */
+ file->buffer_offset -= size;
+ return 1;
+ }
+
+ /* failure. don't leave ourself to inconsistent state */
+ buffer_copy(file->buffer, 0, file->buffer, size, SIZE_MAX);
+ buffer_set_used_size(file->buffer, file->buffer->used - size);
+
+ if (ret == 0) {
+ *reason_r = "file shrank unexpectedly";
+ mail_transaction_log_file_set_corrupted(file, "%s", *reason_r);
+ return 0;
+ } else if (errno == ESTALE) {
+ /* log file was deleted in NFS server, fail silently */
+ *reason_r = t_strdup_printf("read() failed: %m");
+ return 0;
+ } else {
+ log_file_set_syscall_error(file, "pread()");
+ *reason_r = t_strdup_printf("read() failed: %m");
+ return -1;
+ }
+}
+
+static int
+mail_transaction_log_file_read_more(struct mail_transaction_log_file *file,
+ const char **reason_r)
+{
+ void *data;
+ size_t size;
+ uint32_t read_offset;
+ ssize_t ret;
+
+ read_offset = file->buffer_offset + file->buffer->used;
+
+ do {
+ data = buffer_append_space_unsafe(file->buffer, LOG_PREFETCH);
+ ret = pread(file->fd, data, LOG_PREFETCH, read_offset);
+ if (ret > 0)
+ read_offset += ret;
+
+ size = read_offset - file->buffer_offset;
+ buffer_set_used_size(file->buffer, size);
+ } while (ret > 0 || (ret < 0 && errno == EINTR));
+
+ file->last_size = read_offset;
+
+ if (ret < 0) {
+ *reason_r = t_strdup_printf("pread() failed: %m");
+ if (errno == ESTALE) {
+ /* log file was deleted in NFS server, fail silently */
+ return 0;
+ }
+ log_file_set_syscall_error(file, "pread()");
+ return -1;
+ }
+ return 1;
+}
+
+static bool
+mail_transaction_log_file_need_nfs_flush(struct mail_transaction_log_file *file)
+{
+ const struct mail_index_header *hdr = &file->log->index->map->hdr;
+ uoff_t max_offset = file->last_size;
+
+ if (file->next != NULL &&
+ file->hdr.file_seq == file->next->hdr.prev_file_seq &&
+ file->next->hdr.prev_file_offset != max_offset) {
+ /* we already have a newer log file which says that we haven't
+ synced the entire file. */
+ return TRUE;
+ }
+
+ if (file->hdr.file_seq == hdr->log_file_seq &&
+ max_offset < hdr->log_file_head_offset)
+ return TRUE;
+
+ return FALSE;
+}
+
+static int
+mail_transaction_log_file_read(struct mail_transaction_log_file *file,
+ uoff_t start_offset, bool nfs_flush,
+ const char **reason_r)
+{
+ bool retry;
+ int ret;
+
+ i_assert(file->mmap_base == NULL);
+
+ /* NFS: if file isn't locked, we're optimistic that we can read enough
+ data without flushing attribute cache. if after reading we notice
+ that we really should have read more, flush the cache and try again.
+ if file is locked, the attribute cache was already flushed when
+ refreshing the log. */
+ if (nfs_flush &&
+ (file->log->index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0) {
+ if (!file->locked)
+ nfs_flush_attr_cache_unlocked(file->filepath);
+ else
+ nfs_flush_attr_cache_fd_locked(file->filepath, file->fd);
+ }
+
+ if (file->buffer != NULL && file->buffer_offset > start_offset) {
+ /* we have to insert missing data to beginning of buffer */
+ ret = mail_transaction_log_file_insert_read(file, start_offset, reason_r);
+ if (ret <= 0)
+ return ret;
+ }
+
+ if (file->buffer == NULL) {
+ file->buffer =
+ buffer_create_dynamic(default_pool, LOG_PREFETCH);
+ file->buffer_offset = start_offset;
+ }
+
+ if ((ret = mail_transaction_log_file_read_more(file, reason_r)) <= 0)
+ ;
+ else if (!nfs_flush &&
+ (file->log->index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0 &&
+ mail_transaction_log_file_need_nfs_flush(file)) {
+ /* we didn't read enough data. flush and try again. */
+ return mail_transaction_log_file_read(file, start_offset, TRUE, reason_r);
+ } else if ((ret = mail_transaction_log_file_sync(file, &retry, reason_r)) == 0) {
+ i_assert(!retry); /* retry happens only with mmap */
+ }
+ i_assert(file->sync_offset >= file->buffer_offset);
+ buffer_set_used_size(file->buffer,
+ file->sync_offset - file->buffer_offset);
+ return ret;
+}
+
+static bool
+log_file_map_check_offsets(struct mail_transaction_log_file *file,
+ uoff_t start_offset, uoff_t end_offset,
+ const char **reason_r)
+{
+ struct stat st, st2;
+
+ if (start_offset > file->sync_offset) {
+ /* broken start offset */
+ if (MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(file)) {
+ *reason_r = t_strdup_printf(
+ "%s: start_offset (%"PRIuUOFF_T") > "
+ "current sync_offset (%"PRIuUOFF_T")",
+ file->filepath, start_offset, file->sync_offset);
+ return FALSE;
+ }
+
+ if (fstat(file->fd, &st) < 0) {
+ log_file_set_syscall_error(file, "fstat()");
+ st.st_size = -1;
+ }
+ *reason_r = t_strdup_printf(
+ "%s: start_offset (%"PRIuUOFF_T") > "
+ "current sync_offset (%"PRIuUOFF_T"), file size=%"PRIuUOFF_T,
+ file->filepath, start_offset, file->sync_offset,
+ st.st_size);
+ if (stat(file->filepath, &st2) == 0) {
+ if (st.st_ino != st2.st_ino) {
+ *reason_r = t_strdup_printf(
+ "%s, file unexpectedly replaced", *reason_r);
+ }
+ } else if (errno == ENOENT) {
+ *reason_r = t_strdup_printf(
+ "%s, file unexpectedly deleted", *reason_r);
+ } else {
+ log_file_set_syscall_error(file, "stat()");
+ }
+ return FALSE;
+ }
+ if (end_offset != UOFF_T_MAX && end_offset > file->sync_offset) {
+ *reason_r = t_strdup_printf(
+ "%s: end_offset (%"PRIuUOFF_T") > "
+ "current sync_offset (%"PRIuUOFF_T")",
+ file->filepath, start_offset, file->sync_offset);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static int
+mail_transaction_log_file_mmap(struct mail_transaction_log_file *file,
+ const char **reason_r)
+{
+ /* we may have switched to mmaping */
+ buffer_free(&file->buffer);
+
+ file->mmap_size = file->last_size;
+ file->mmap_base = mmap(NULL, file->mmap_size, PROT_READ, MAP_SHARED,
+ file->fd, 0);
+ if (file->mmap_base == MAP_FAILED) {
+ file->mmap_base = NULL;
+ if (ioloop_time != file->last_mmap_error_time) {
+ file->last_mmap_error_time = ioloop_time;
+ log_file_set_syscall_error(file, t_strdup_printf(
+ "mmap(size=%zu)", file->mmap_size));
+ }
+ *reason_r = t_strdup_printf("mmap(size=%zu) failed: %m",
+ file->mmap_size);
+ file->mmap_size = 0;
+ return -1;
+ }
+
+ if (file->mmap_size > mmap_get_page_size()) {
+ if (madvise(file->mmap_base, file->mmap_size,
+ MADV_SEQUENTIAL) < 0)
+ log_file_set_syscall_error(file, "madvise()");
+ }
+
+ buffer_create_from_const_data(&file->mmap_buffer,
+ file->mmap_base, file->mmap_size);
+ file->buffer = &file->mmap_buffer;
+ file->buffer_offset = 0;
+ return 0;
+}
+
+static void
+mail_transaction_log_file_munmap(struct mail_transaction_log_file *file)
+{
+ if (file->mmap_base == NULL)
+ return;
+
+ i_assert(file->buffer != NULL);
+ if (munmap(file->mmap_base, file->mmap_size) < 0)
+ log_file_set_syscall_error(file, "munmap()");
+ file->mmap_base = NULL;
+ file->mmap_size = 0;
+ buffer_free(&file->buffer);
+}
+
+static int
+mail_transaction_log_file_map_mmap(struct mail_transaction_log_file *file,
+ uoff_t start_offset, const char **reason_r)
+{
+ struct stat st;
+ bool retry;
+ int ret;
+
+ /* we are going to mmap() this file, but it's not necessarily
+ mmaped currently. */
+ i_assert(file->buffer_offset == 0 || file->mmap_base == NULL);
+ i_assert(file->mmap_size == 0 || file->mmap_base != NULL);
+
+ if (fstat(file->fd, &st) < 0) {
+ log_file_set_syscall_error(file, "fstat()");
+ *reason_r = t_strdup_printf("fstat() failed: %m");
+ return -1;
+ }
+ file->last_size = st.st_size;
+
+ if ((uoff_t)st.st_size < file->sync_offset) {
+ *reason_r = t_strdup_printf(
+ "file size shrank (%"PRIuUOFF_T" < %"PRIuUOFF_T")",
+ (uoff_t)st.st_size, file->sync_offset);
+ mail_transaction_log_file_set_corrupted(file, "%s", *reason_r);
+ return 0;
+ }
+
+ if (file->buffer != NULL && file->buffer_offset <= start_offset &&
+ (uoff_t)st.st_size == file->buffer_offset + file->buffer->used) {
+ /* we already have the whole file mapped */
+ if ((ret = mail_transaction_log_file_sync(file, &retry, reason_r)) != 0 ||
+ !retry)
+ return ret;
+ /* size changed, re-mmap */
+ }
+
+ do {
+ mail_transaction_log_file_munmap(file);
+
+ if (file->last_size - start_offset < mmap_get_page_size()) {
+ /* just reading the file is probably faster */
+ return mail_transaction_log_file_read(file,
+ start_offset,
+ FALSE, reason_r);
+ }
+
+ if (mail_transaction_log_file_mmap(file, reason_r) < 0)
+ return -1;
+ ret = mail_transaction_log_file_sync(file, &retry, reason_r);
+ } while (retry);
+
+ return ret;
+}
+
+int mail_transaction_log_file_map(struct mail_transaction_log_file *file,
+ uoff_t start_offset, uoff_t end_offset,
+ const char **reason_r)
+{
+ uoff_t map_start_offset = start_offset;
+ size_t size;
+ int ret;
+
+ if (file->hdr.indexid == 0) {
+ /* corrupted */
+ *reason_r = "corrupted, indexid=0";
+ return 0;
+ }
+
+ i_assert(start_offset >= file->hdr.hdr_size);
+ i_assert(start_offset <= end_offset);
+ i_assert(file->buffer == NULL || file->mmap_base != NULL ||
+ file->sync_offset >= file->buffer_offset + file->buffer->used);
+
+ if (file->locked_sync_offset_updated && file == file->log->head &&
+ end_offset == UOFF_T_MAX) {
+ /* we're not interested of going further than sync_offset */
+ if (!log_file_map_check_offsets(file, start_offset,
+ end_offset, reason_r))
+ return 0;
+ i_assert(start_offset <= file->sync_offset);
+ end_offset = file->sync_offset;
+ }
+
+ if (file->buffer != NULL && file->buffer_offset <= start_offset) {
+ /* see if we already have it */
+ size = file->buffer->used;
+ if (file->buffer_offset + size >= end_offset)
+ return 1;
+ }
+
+ if (file->locked) {
+ /* set this only when we've synced to end of file while locked
+ (either end_offset=UOFF_T_MAX or we had to read anyway) */
+ file->locked_sync_offset_updated = TRUE;
+ }
+
+ if (MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(file)) {
+ if (start_offset < file->buffer_offset || file->buffer == NULL) {
+ /* we had moved the log to memory but failed to read
+ the beginning of the log file */
+ *reason_r = "Beginning of the log isn't available";
+ return 0;
+ }
+ return log_file_map_check_offsets(file, start_offset,
+ end_offset, reason_r) ? 1 : 0;
+ }
+
+ if (start_offset > file->sync_offset)
+ mail_transaction_log_file_skip_to_head(file);
+ if (start_offset > file->sync_offset) {
+ /* although we could just skip over the unwanted data, we have
+ to sync everything so that modseqs are calculated
+ correctly */
+ map_start_offset = file->sync_offset;
+ }
+
+ if ((file->log->index->flags & MAIL_INDEX_OPEN_FLAG_MMAP_DISABLE) == 0)
+ ret = mail_transaction_log_file_map_mmap(file, map_start_offset, reason_r);
+ else {
+ mail_transaction_log_file_munmap(file);
+ ret = mail_transaction_log_file_read(file, map_start_offset, FALSE, reason_r);
+ }
+
+ i_assert(file->buffer == NULL || file->mmap_base != NULL ||
+ file->sync_offset >= file->buffer_offset + file->buffer->used);
+ if (ret <= 0)
+ return ret;
+
+ i_assert(file->buffer != NULL);
+ return log_file_map_check_offsets(file, start_offset, end_offset,
+ reason_r) ? 1 : 0;
+}
+
+int mail_transaction_log_file_move_to_memory(struct mail_transaction_log_file *file)
+{
+ const char *error;
+ buffer_t *buf;
+ int ret = 0;
+
+ if (MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(file))
+ return 0;
+
+ if (file->mmap_base != NULL) {
+ /* just copy to memory */
+ i_assert(file->buffer_offset == 0);
+
+ buf = buffer_create_dynamic(default_pool, file->mmap_size);
+ buffer_append(buf, file->mmap_base, file->mmap_size);
+ buffer_free(&file->buffer);
+ file->buffer = buf;
+
+ /* and lose the mmap */
+ if (munmap(file->mmap_base, file->mmap_size) < 0)
+ log_file_set_syscall_error(file, "munmap()");
+ file->mmap_base = NULL;
+ } else if (file->buffer_offset != 0) {
+ /* we don't have the full log in the memory. read it. */
+ ret = mail_transaction_log_file_read(file, 0, FALSE, &error);
+ if (ret <= 0) {
+ mail_index_set_error(file->log->index,
+ "%s: Failed to read into memory: %s", file->filepath, error);
+ }
+ }
+ file->last_size = 0;
+
+ if (close(file->fd) < 0)
+ log_file_set_syscall_error(file, "close()");
+ file->fd = -1;
+
+ i_free(file->filepath);
+ file->filepath = i_strdup(file->log->filepath);
+ return ret < 0 ? -1 : 0;
+}
diff --git a/src/lib-index/mail-transaction-log-modseq.c b/src/lib-index/mail-transaction-log-modseq.c
new file mode 100644
index 0000000..eb2b533
--- /dev/null
+++ b/src/lib-index/mail-transaction-log-modseq.c
@@ -0,0 +1,298 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "mail-index-private.h"
+#include "mail-index-modseq.h"
+#include "mail-transaction-log-private.h"
+
+static struct modseq_cache *
+modseq_cache_hit(struct mail_transaction_log_file *file, unsigned int idx)
+{
+ struct modseq_cache cache;
+
+ if (idx > 0) {
+ /* @UNSAFE: move it to top */
+ cache = file->modseq_cache[idx];
+ memmove(file->modseq_cache + 1, file->modseq_cache,
+ sizeof(*file->modseq_cache) * idx);
+ file->modseq_cache[0] = cache;
+ }
+ return &file->modseq_cache[0];
+}
+
+static struct modseq_cache *
+modseq_cache_get_offset(struct mail_transaction_log_file *file, uoff_t offset)
+{
+ unsigned int i, best = UINT_MAX;
+
+ for (i = 0; i < N_ELEMENTS(file->modseq_cache); i++) {
+ if (offset < file->modseq_cache[i].offset)
+ continue;
+
+ if (file->modseq_cache[i].offset == 0)
+ return NULL;
+
+ if (offset == file->modseq_cache[i].offset) {
+ /* exact cache hit */
+ return modseq_cache_hit(file, i);
+ }
+
+ if (best == UINT_MAX ||
+ file->modseq_cache[i].offset <
+ file->modseq_cache[best].offset)
+ best = i;
+ }
+ if (best == UINT_MAX)
+ return NULL;
+ return &file->modseq_cache[best];
+}
+
+static struct modseq_cache *
+modseq_cache_get_modseq(struct mail_transaction_log_file *file, uint64_t modseq)
+{
+ unsigned int i, best = UINT_MAX;
+
+ for (i = 0; i < N_ELEMENTS(file->modseq_cache); i++) {
+ if (modseq < file->modseq_cache[i].highest_modseq)
+ continue;
+
+ if (file->modseq_cache[i].offset == 0)
+ return NULL;
+
+ if (modseq == file->modseq_cache[i].highest_modseq) {
+ /* exact cache hit */
+ return modseq_cache_hit(file, i);
+ }
+
+ if (best == UINT_MAX ||
+ file->modseq_cache[i].highest_modseq <
+ file->modseq_cache[best].highest_modseq)
+ best = i;
+ }
+ if (best == UINT_MAX)
+ return NULL;
+ return &file->modseq_cache[best];
+}
+
+static int
+log_get_synced_record(struct mail_transaction_log_file *file, uoff_t *offset,
+ const struct mail_transaction_header **hdr_r,
+ const char **error_r)
+{
+ const struct mail_transaction_header *hdr;
+ uint32_t trans_size;
+
+ hdr = CONST_PTR_OFFSET(file->buffer->data,
+ *offset - file->buffer_offset);
+
+ /* we've already synced this record at some point. it should
+ be valid. */
+ trans_size = mail_index_offset_to_uint32(hdr->size);
+ if (trans_size < sizeof(*hdr) ||
+ *offset - file->buffer_offset + trans_size > file->buffer->used) {
+ *error_r = t_strdup_printf(
+ "Transaction log corrupted unexpectedly at "
+ "%"PRIuUOFF_T": Invalid size %u (type=%x)",
+ *offset, trans_size, hdr->type);
+ mail_transaction_log_file_set_corrupted(file, "%s", *error_r);
+ return -1;
+ }
+ *offset += trans_size;
+ *hdr_r = hdr;
+ return 0;
+}
+
+int mail_transaction_log_file_get_highest_modseq_at(
+ struct mail_transaction_log_file *file,
+ uoff_t offset, uint64_t *highest_modseq_r,
+ const char **error_r)
+{
+ const struct mail_transaction_header *hdr;
+ struct modseq_cache *cache;
+ uoff_t cur_offset;
+ uint64_t cur_modseq;
+ const char *reason;
+ int ret;
+
+ i_assert(offset <= file->sync_offset);
+
+ if (offset == file->sync_offset) {
+ *highest_modseq_r = file->sync_highest_modseq;
+ return 1;
+ }
+
+ cache = modseq_cache_get_offset(file, offset);
+ if (cache == NULL) {
+ /* nothing usable in cache - scan from beginning */
+ cur_offset = file->hdr.hdr_size;
+ cur_modseq = file->hdr.initial_modseq;
+ } else if (cache->offset == offset) {
+ /* exact cache hit */
+ *highest_modseq_r = cache->highest_modseq;
+ return 1;
+ } else {
+ /* use cache to skip over some records */
+ cur_offset = cache->offset;
+ cur_modseq = cache->highest_modseq;
+ }
+
+ /* See if we can use the "modseq" header in dovecot.index to further
+ reduce how much we have to scan. */
+ const struct mail_index_modseq_header *modseq_hdr =
+ file->log->index->map == NULL ? NULL :
+ &file->log->index->map->modseq_hdr_snapshot;
+ if (modseq_hdr != NULL &&
+ modseq_hdr->log_seq == file->hdr.file_seq &&
+ modseq_hdr->log_offset <= offset &&
+ modseq_hdr->log_offset >= cur_offset) {
+ cur_offset = modseq_hdr->log_offset;
+ cur_modseq = modseq_hdr->highest_modseq;
+ }
+
+ ret = mail_transaction_log_file_map(file, cur_offset, offset, &reason);
+ if (ret <= 0) {
+ *error_r = t_strdup_printf(
+ "Failed to map transaction log %s for getting modseq "
+ "at offset=%"PRIuUOFF_T" with start_offset=%"PRIuUOFF_T": %s",
+ file->filepath, offset, cur_offset, reason);
+ return ret;
+ }
+
+ i_assert(cur_offset >= file->buffer_offset);
+ i_assert(cur_offset + file->buffer->used >= offset);
+ while (cur_offset < offset) {
+ if (log_get_synced_record(file, &cur_offset, &hdr, error_r) < 0)
+ return 0;
+ mail_transaction_update_modseq(hdr, hdr + 1, &cur_modseq,
+ MAIL_TRANSACTION_LOG_HDR_VERSION(&file->hdr));
+ }
+
+ /* @UNSAFE: cache the value */
+ memmove(file->modseq_cache + 1, file->modseq_cache,
+ sizeof(*file->modseq_cache) *
+ (N_ELEMENTS(file->modseq_cache) - 1));
+ file->modseq_cache[0].offset = cur_offset;
+ file->modseq_cache[0].highest_modseq = cur_modseq;
+
+ *highest_modseq_r = cur_modseq;
+ return 1;
+}
+
+static int
+get_modseq_next_offset_at(struct mail_transaction_log_file *file,
+ uint64_t modseq, bool use_highest,
+ uoff_t *cur_offset, uint64_t *cur_modseq,
+ uoff_t *next_offset_r)
+{
+ const struct mail_transaction_header *hdr;
+ const char *reason;
+ int ret;
+
+ /* make sure we've read until end of file. this is especially important
+ with non-head logs which might only have been opened without being
+ synced. */
+ ret = mail_transaction_log_file_map(file, *cur_offset, UOFF_T_MAX, &reason);
+ if (ret <= 0) {
+ mail_index_set_error(file->log->index,
+ "Failed to map transaction log %s for getting offset "
+ "for modseq=%"PRIu64" with start_offset=%"PRIuUOFF_T": %s",
+ file->filepath, modseq, *cur_offset, reason);
+ return -1;
+ }
+
+ /* check sync_highest_modseq again in case sync_offset was updated */
+ if (modseq >= file->sync_highest_modseq && use_highest) {
+ *next_offset_r = file->sync_offset;
+ return 0;
+ }
+
+ i_assert(*cur_offset >= file->buffer_offset);
+ while (*cur_offset < file->sync_offset) {
+ if (log_get_synced_record(file, cur_offset, &hdr, &reason) < 0) {
+ mail_index_set_error(file->log->index,
+ "%s: %s", file->filepath, reason);
+ return -1;
+ }
+ mail_transaction_update_modseq(hdr, hdr + 1, cur_modseq,
+ MAIL_TRANSACTION_LOG_HDR_VERSION(&file->hdr));
+ if (*cur_modseq >= modseq)
+ break;
+ }
+ return 1;
+}
+
+int mail_transaction_log_file_get_modseq_next_offset(
+ struct mail_transaction_log_file *file,
+ uint64_t modseq, uoff_t *next_offset_r)
+{
+ struct modseq_cache *cache;
+ uoff_t cur_offset;
+ uint64_t cur_modseq;
+ int ret;
+
+ if (modseq == file->sync_highest_modseq) {
+ *next_offset_r = file->sync_offset;
+ return 0;
+ }
+ if (modseq == file->hdr.initial_modseq) {
+ *next_offset_r = file->hdr.hdr_size;
+ return 0;
+ }
+
+ cache = modseq_cache_get_modseq(file, modseq);
+ if (cache == NULL) {
+ /* nothing usable in cache - scan from beginning */
+ cur_offset = file->hdr.hdr_size;
+ cur_modseq = file->hdr.initial_modseq;
+ } else if (cache->highest_modseq == modseq) {
+ /* exact cache hit */
+ *next_offset_r = cache->offset;
+ return 0;
+ } else {
+ /* use cache to skip over some records */
+ cur_offset = cache->offset;
+ cur_modseq = cache->highest_modseq;
+ }
+
+ if ((ret = get_modseq_next_offset_at(file, modseq, TRUE, &cur_offset,
+ &cur_modseq, next_offset_r)) <= 0)
+ return ret;
+ if (cur_offset == file->sync_offset) {
+ /* if we got to sync_offset, cur_modseq should be
+ sync_highest_modseq */
+ mail_index_set_error(file->log->index,
+ "%s: Transaction log modseq tracking is corrupted - fixing",
+ file->filepath);
+ /* retry getting the offset by reading from the beginning
+ of the file */
+ cur_offset = file->hdr.hdr_size;
+ cur_modseq = file->hdr.initial_modseq;
+ ret = get_modseq_next_offset_at(file, modseq, FALSE,
+ &cur_offset, &cur_modseq,
+ next_offset_r);
+ if (ret < 0)
+ return -1;
+ i_assert(ret != 0);
+ /* get it fixed on the next sync */
+ if (file->log->index->need_recreate == NULL) {
+ file->log->index->need_recreate =
+ i_strdup("modseq tracking is corrupted");
+ }
+ if (file->need_rotate == NULL) {
+ file->need_rotate =
+ i_strdup("modseq tracking is corrupted");
+ }
+ /* clear cache, since it's unreliable */
+ memset(file->modseq_cache, 0, sizeof(file->modseq_cache));
+ }
+
+ /* @UNSAFE: cache the value */
+ memmove(file->modseq_cache + 1, file->modseq_cache,
+ sizeof(*file->modseq_cache) *
+ (N_ELEMENTS(file->modseq_cache) - 1));
+ file->modseq_cache[0].offset = cur_offset;
+ file->modseq_cache[0].highest_modseq = cur_modseq;
+
+ *next_offset_r = cur_offset;
+ return 0;
+}
diff --git a/src/lib-index/mail-transaction-log-private.h b/src/lib-index/mail-transaction-log-private.h
new file mode 100644
index 0000000..4961fe0
--- /dev/null
+++ b/src/lib-index/mail-transaction-log-private.h
@@ -0,0 +1,199 @@
+#ifndef MAIL_TRANSACTION_LOG_VIEW_H
+#define MAIL_TRANSACTION_LOG_VIEW_H
+
+#include "buffer.h"
+#include "mail-transaction-log.h"
+
+struct dotlock_settings;
+
+/* Synchronization can take a while sometimes, especially when copying lots of
+ mails. */
+#define MAIL_TRANSACTION_LOG_LOCK_TIMEOUT (3*60)
+#define MAIL_TRANSACTION_LOG_DOTLOCK_CHANGE_TIMEOUT (3*60)
+
+#define MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(file) ((file)->fd == -1)
+
+#define LOG_FILE_MODSEQ_CACHE_SIZE 10
+
+struct modseq_cache {
+ uoff_t offset;
+ uint64_t highest_modseq;
+};
+
+struct mail_transaction_log_file {
+ struct mail_transaction_log *log;
+ /* Next file in the mail_transaction_log.files list. Sorted by
+ hdr.file_seq. */
+ struct mail_transaction_log_file *next;
+
+ /* refcount=0 is a valid state. files start that way, and they're
+ freed only when mail_transaction_logs_clean() is called. */
+ int refcount;
+
+ char *filepath;
+ int fd;
+
+ /* Cached values for last stat()/fstat() */
+ ino_t st_ino;
+ dev_t st_dev;
+ time_t last_mtime;
+ uoff_t last_size;
+
+ /* Used to avoid logging mmap() errors too rapidly. */
+ time_t last_mmap_error_time;
+ /* If non-NULL, the log file should be rotated. The string contains a
+ human-readable reason why the rotation was requested. */
+ char *need_rotate;
+
+ /* Copy of the log file header. Set when opened. */
+ struct mail_transaction_log_header hdr;
+ /* Buffer that points to mmap_base */
+ buffer_t mmap_buffer;
+ /* Buffer that can be used to access the log file contents. Either
+ points to mmap_buffer, or it's a copy of the file contents starting
+ from buffer_offset. */
+ buffer_t *buffer;
+ /* Offset to log where the buffer starts from. 0 with mmaped log. */
+ uoff_t buffer_offset;
+ /* If non-NULL, mmap()ed log file */
+ void *mmap_base;
+ size_t mmap_size;
+
+ /* Offset to log file how far it's been read. Usually it's the same
+ as the log file size. However, if the last multi-record transaction
+ wasn't fully written (or is in the middle of being written), this
+ points to the beginning of the MAIL_TRANSACTION_BOUNDARY record. */
+ uoff_t sync_offset;
+ /* highest modseq at sync_offset */
+ uint64_t sync_highest_modseq;
+ /* The last mail_index_header.log_file_tail_offset update that was
+ read from the log. */
+ uoff_t last_read_hdr_tail_offset;
+ /* Update mail_index_header.log_file_tail_offset to this offset the
+ next time a transaction is written. Transaction log handling may
+ increase this automatically by making it skip external transactions
+ after last_read_hdr_tail_offset (to avoid re-reading them
+ needlessly). */
+ uoff_t max_tail_offset;
+
+ /* Last seen offsets for MAIL_TRANSACTION_INDEX_DELETED and
+ MAIL_TRANSACTION_INDEX_UNDELETED records. These are used to update
+ mail_index.index_delete* fields. */
+ uoff_t index_deleted_offset, index_undeleted_offset;
+
+ /* Cache to optimize mail_transaction_log_file_get_modseq_next_offset()
+ so it doesn't always have to start from the beginning of the log
+ file to find the wanted modseq. */
+ struct modseq_cache modseq_cache[LOG_FILE_MODSEQ_CACHE_SIZE];
+
+ /* Lock for the log file fd. If dotlocking is used, this is NULL and
+ mail_transaction_log.dotlock is used instead. */
+ struct file_lock *file_lock;
+ /* Time when the log was successfully locked */
+ time_t lock_create_time;
+
+ /* Log is currently locked. */
+ bool locked:1;
+ /* TRUE if sync_offset has already been updated while this log was
+ locked. This can be used to optimize away unnecessary checks to see
+ whether there's more data written to log after sync_offset. */
+ bool locked_sync_offset_updated:1;
+ /* Log file has found to be corrupted. Stop trying to read it.
+ The indexid is also usually overwritten to be 0 in the log header at
+ this time. */
+ bool corrupted:1;
+};
+
+struct mail_transaction_log {
+ struct mail_index *index;
+ /* Linked list of all transaction log views */
+ struct mail_transaction_log_view *views;
+ /* Paths to .log and .log.2 */
+ char *filepath, *filepath2;
+
+ /* Linked list of all the opened log files. The oldest files may have
+ already been unlinked. The list is sorted by the log file sequence
+ (oldest/lowest first), so that transaction views can use them
+ easily. */
+ struct mail_transaction_log_file *files;
+ /* Latest log file (the last file in the files linked list) */
+ struct mail_transaction_log_file *head;
+ /* open_file is used temporarily while opening the log file.
+ if mail_transaction_log_open() failed, it's left there for
+ mail_transaction_log_create(). */
+ struct mail_transaction_log_file *open_file;
+
+ /* Normally the .log locking is done via their file descriptors, so
+ e.g. rotating a log needs to lock both the old and the new files
+ at the same time. However, when FILE_LOCK_METHOD_DOTLOCK is used,
+ the lock isn't file-specific. There is just a single dotlock that
+ is created by the first log file lock. The second lock simply
+ increases the refcount. (It's not expected that there would be more
+ than 2 locks.) */
+ int dotlock_refcount;
+ struct dotlock *dotlock;
+
+ /* This session has already checked whether an old .log.2 should be
+ unlinked. */
+ bool log_2_unlink_checked:1;
+};
+
+void
+mail_transaction_log_file_set_corrupted(struct mail_transaction_log_file *file,
+ const char *fmt, ...)
+ ATTR_FORMAT(2, 3) ATTR_COLD;
+
+void mail_transaction_log_get_dotlock_set(struct mail_transaction_log *log,
+ struct dotlock_settings *set_r);
+
+struct mail_transaction_log_file *
+mail_transaction_log_file_alloc_in_memory(struct mail_transaction_log *log);
+struct mail_transaction_log_file *
+mail_transaction_log_file_alloc(struct mail_transaction_log *log,
+ const char *path);
+void mail_transaction_log_file_free(struct mail_transaction_log_file **file);
+
+/* Returns 1 if log was opened, 0 if it didn't exist or was already open,
+ -1 if error. */
+int mail_transaction_log_file_open(struct mail_transaction_log_file *file,
+ const char **reason_r);
+int mail_transaction_log_file_create(struct mail_transaction_log_file *file,
+ bool reset);
+int mail_transaction_log_file_lock(struct mail_transaction_log_file *file);
+
+int mail_transaction_log_find_file(struct mail_transaction_log *log,
+ uint32_t file_seq, bool nfs_flush,
+ struct mail_transaction_log_file **file_r,
+ const char **reason_r);
+
+/* Returns 1 if ok, 0 if file is corrupted or offset range is invalid,
+ -1 if I/O error */
+int mail_transaction_log_file_map(struct mail_transaction_log_file *file,
+ uoff_t start_offset, uoff_t end_offset,
+ const char **reason_r);
+int mail_transaction_log_file_move_to_memory(struct mail_transaction_log_file *file);
+
+void mail_transaction_logs_clean(struct mail_transaction_log *log);
+
+bool mail_transaction_log_want_rotate(struct mail_transaction_log *log,
+ const char **reason_r);
+int mail_transaction_log_rotate(struct mail_transaction_log *log, bool reset);
+int mail_transaction_log_lock_head(struct mail_transaction_log *log,
+ const char *lock_reason);
+void mail_transaction_log_file_unlock(struct mail_transaction_log_file *file,
+ const char *lock_reason);
+
+void mail_transaction_update_modseq(const struct mail_transaction_header *hdr,
+ const void *data, uint64_t *cur_modseq,
+ unsigned int version);
+/* Returns 1 if ok, 0 if file is corrupted or offset range is invalid,
+ -1 if I/O error */
+int mail_transaction_log_file_get_highest_modseq_at(
+ struct mail_transaction_log_file *file,
+ uoff_t offset, uint64_t *highest_modseq_r,
+ const char **error_r);
+int mail_transaction_log_file_get_modseq_next_offset(
+ struct mail_transaction_log_file *file,
+ uint64_t modseq, uoff_t *next_offset_r);
+
+#endif
diff --git a/src/lib-index/mail-transaction-log-view-private.h b/src/lib-index/mail-transaction-log-view-private.h
new file mode 100644
index 0000000..e739609
--- /dev/null
+++ b/src/lib-index/mail-transaction-log-view-private.h
@@ -0,0 +1,33 @@
+#ifndef MAIL_TRANSACTION_LOG_VIEW_PRIVATE_H
+#define MAIL_TRANSACTION_LOG_VIEW_PRIVATE_H
+
+#include "mail-transaction-log-private.h"
+
+struct mail_transaction_log_view {
+ struct mail_transaction_log *log;
+ struct mail_transaction_log_view *next;
+
+ uint32_t min_file_seq, max_file_seq;
+ uoff_t min_file_offset, max_file_offset;
+
+ struct mail_transaction_header tmp_hdr;
+
+ /* a list of log files we've referenced. we have to keep this list
+ explicitly because more files may be added into the linked list
+ at any time. */
+ ARRAY(struct mail_transaction_log_file *) file_refs;
+ struct mail_transaction_log_file *cur, *head, *tail;
+ uoff_t cur_offset;
+
+ uint64_t prev_modseq;
+ uint32_t prev_file_seq;
+ uoff_t prev_file_offset;
+
+ struct mail_transaction_log_file *mark_file;
+ uoff_t mark_offset, mark_next_offset;
+ uint64_t mark_modseq;
+
+ bool broken:1;
+};
+
+#endif
diff --git a/src/lib-index/mail-transaction-log-view.c b/src/lib-index/mail-transaction-log-view.c
new file mode 100644
index 0000000..0113bfb
--- /dev/null
+++ b/src/lib-index/mail-transaction-log-view.c
@@ -0,0 +1,909 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "str.h"
+#include "mail-index-private.h"
+#include "mail-transaction-log-view-private.h"
+
+struct mail_transaction_log_view *
+mail_transaction_log_view_open(struct mail_transaction_log *log)
+{
+ struct mail_transaction_log_view *view;
+
+ view = i_new(struct mail_transaction_log_view, 1);
+ view->log = log;
+ view->broken = TRUE;
+
+ i_assert(view->log->head != NULL);
+
+ view->head = view->tail = view->log->head;
+ view->head->refcount++;
+ i_array_init(&view->file_refs, 8);
+ array_push_back(&view->file_refs, &view->head);
+
+ view->next = log->views;
+ log->views = view;
+ return view;
+}
+
+static void
+mail_transaction_log_view_unref_all(struct mail_transaction_log_view *view)
+{
+ struct mail_transaction_log_file *const *files;
+ unsigned int i, count;
+
+ files = array_get(&view->file_refs, &count);
+ for (i = 0; i < count; i++)
+ files[i]->refcount--;
+
+ array_clear(&view->file_refs);
+}
+
+void mail_transaction_log_view_close(struct mail_transaction_log_view **_view)
+{
+ struct mail_transaction_log_view *view = *_view;
+ struct mail_transaction_log_view **p;
+
+ *_view = NULL;
+
+ for (p = &view->log->views; *p != NULL; p = &(*p)->next) {
+ if (*p == view) {
+ *p = view->next;
+ break;
+ }
+ }
+
+ mail_transaction_log_view_unref_all(view);
+ mail_transaction_logs_clean(view->log);
+
+ array_free(&view->file_refs);
+ i_free(view);
+}
+
+static const char *
+mail_transaction_log_get_file_seqs(struct mail_transaction_log *log)
+{
+ struct mail_transaction_log_file *file;
+ string_t *str = t_str_new(32);
+
+ if (log->files == NULL)
+ return "";
+
+ for (file = log->files; file != NULL; file = file->next)
+ str_printfa(str, ",%u", file->hdr.file_seq);
+ return str_c(str) + 1;
+}
+
+static void
+view_set_failed_unref(struct mail_transaction_log_file *head,
+ struct mail_transaction_log_file *tail)
+{
+ struct mail_transaction_log_file *file;
+
+ if (tail == NULL) {
+ i_assert(head == NULL);
+ return;
+ }
+
+ for (file = tail; file != head; file = file->next) {
+ i_assert(file != NULL);
+ i_assert(file->refcount > 0);
+ file->refcount--;
+ }
+ i_assert(file != NULL);
+ i_assert(file->refcount > 0);
+ file->refcount--;
+}
+
+int mail_transaction_log_view_set(struct mail_transaction_log_view *view,
+ uint32_t min_file_seq, uoff_t min_file_offset,
+ uint32_t max_file_seq, uoff_t max_file_offset,
+ bool *reset_r, const char **reason_r)
+{
+ struct mail_transaction_log_file *tail, *head, *file, *const *files;
+ uoff_t start_offset, end_offset;
+ unsigned int i;
+ uint32_t seq;
+ int ret;
+
+ *reset_r = FALSE;
+ *reason_r = NULL;
+
+ if (view->log == NULL) {
+ /* transaction log is closed already. this log view shouldn't
+ be used anymore. */
+ *reason_r = "Log already closed";
+ return 0;
+ }
+
+ if (min_file_seq == 0) {
+ /* index file doesn't exist yet. this transaction log should
+ start from the beginning */
+ if (view->log->files->hdr.prev_file_seq != 0) {
+ /* but it doesn't */
+ *reason_r = t_strdup_printf(
+ "Wanted log beginning, but found prev_file_seq=%u",
+ view->log->files->hdr.prev_file_seq);
+ return 0;
+ }
+
+ min_file_seq = view->log->files->hdr.file_seq;
+ min_file_offset = 0;
+
+ if (max_file_seq == 0) {
+ max_file_seq = min_file_seq;
+ max_file_offset = min_file_offset;
+ }
+ }
+
+ for (file = view->log->files; file != NULL; file = file->next) {
+ if (file->hdr.prev_file_seq == min_file_seq)
+ break;
+ }
+
+ if (file != NULL && min_file_offset == file->hdr.prev_file_offset) {
+ /* we can (and sometimes must) skip to the next file */
+ min_file_seq = file->hdr.file_seq;
+ min_file_offset = file->hdr.hdr_size;
+ }
+
+ for (file = view->log->files; file != NULL; file = file->next) {
+ if (file->hdr.prev_file_seq == max_file_seq)
+ break;
+ }
+ if (file != NULL && max_file_offset == file->hdr.prev_file_offset) {
+ /* we can skip to the next file. we've delayed checking for
+ min_file_seq <= max_file_seq until now, because it's not
+ really an error to specify the same position twice (even if
+ in "wrong" order) */
+ i_assert(min_file_seq <= max_file_seq ||
+ min_file_seq <= file->hdr.file_seq);
+ max_file_seq = file->hdr.file_seq;
+ max_file_offset = file->hdr.hdr_size;
+ } else {
+ i_assert(min_file_seq <= max_file_seq);
+ }
+
+ if (min_file_seq == max_file_seq && min_file_offset > max_file_offset) {
+ /* log file offset is probably corrupted in the index file. */
+ *reason_r = t_strdup_printf(
+ "Invalid offset: file_seq=%u, min_file_offset (%"PRIuUOFF_T
+ ") > max_file_offset (%"PRIuUOFF_T")",
+ min_file_seq, min_file_offset, max_file_offset);
+ mail_transaction_log_view_set_corrupted(view, "%s", *reason_r);
+ return 0;
+ }
+
+ tail = head = file = NULL;
+ for (seq = min_file_seq; seq <= max_file_seq; seq++) {
+ const char *reason = NULL;
+
+ if (file == NULL || file->hdr.file_seq != seq) {
+ /* see if we could find the missing file. if we know
+ the max. file sequence or we don't have the the min.
+ file, make sure NFS attribute cache gets flushed if
+ necessary. */
+ bool nfs_flush = seq == min_file_seq ||
+ max_file_seq != (uint32_t)-1;
+
+ ret = mail_transaction_log_find_file(view->log, seq,
+ nfs_flush, &file, &reason);
+ if (ret <= 0) {
+ if (ret < 0) {
+ *reason_r = t_strdup_printf(
+ "Failed to find file seq=%u: %s",
+ seq, reason);
+ view_set_failed_unref(head, tail);
+ return -1;
+ }
+
+ /* not found / corrupted */
+ file = NULL;
+ }
+ }
+
+ if (file == NULL || file->hdr.file_seq != seq) {
+ i_assert(reason != NULL);
+ if (file == NULL && max_file_seq == (uint32_t)-1 &&
+ head == view->log->head) {
+ /* we just wanted to sync everything */
+ i_assert(max_file_offset == UOFF_T_MAX);
+ max_file_seq = seq-1;
+ break;
+ }
+ /* if any of the found files reset the index,
+ ignore any missing files up to it */
+ file = tail != NULL ? tail : view->log->files;
+ for (;; file = file->next) {
+ if (file == NULL ||
+ file->hdr.file_seq > max_file_seq) {
+ /* missing files in the middle */
+ *reason_r = t_strdup_printf(
+ "Missing middle file seq=%u (between %u..%u, we have seqs %s): %s",
+ seq, min_file_seq, max_file_seq,
+ mail_transaction_log_get_file_seqs(view->log), reason);
+ view_set_failed_unref(head, tail);
+ return 0;
+ }
+
+ if (file->hdr.file_seq >= seq &&
+ file->hdr.prev_file_seq == 0) {
+ /* we can ignore the missing file */
+ break;
+ }
+ }
+ /* we're going to rebuild the head/tail. remove the old
+ references first. */
+ view_set_failed_unref(head, tail);
+ seq = file->hdr.file_seq;
+ tail = NULL;
+ }
+
+ if (tail == NULL)
+ tail = file;
+ head = file;
+ /* NOTE: we need to reference immediately or it could become
+ freed by mail_transaction_log_find_file() */
+ file->refcount++;
+ file = file->next;
+ }
+ i_assert(tail != NULL);
+
+ if (min_file_offset == 0) {
+ /* beginning of the file */
+ min_file_offset = tail->hdr.hdr_size;
+ if (min_file_offset > max_file_offset &&
+ min_file_seq == max_file_seq) {
+ /* we don't actually want to show anything */
+ max_file_offset = min_file_offset;
+ }
+ }
+
+ if (min_file_offset < tail->hdr.hdr_size) {
+ /* log file offset is probably corrupted in the index file. */
+ *reason_r = t_strdup_printf(
+ "Invalid min_file_offset: file_seq=%u, min_file_offset (%"PRIuUOFF_T
+ ") < hdr_size (%u)",
+ min_file_seq, min_file_offset, tail->hdr.hdr_size);
+ mail_transaction_log_view_set_corrupted(view, "%s", *reason_r);
+ view_set_failed_unref(head, tail);
+ return 0;
+ }
+ if (max_file_offset < head->hdr.hdr_size) {
+ /* log file offset is probably corrupted in the index file. */
+ *reason_r = t_strdup_printf(
+ "Invalid max_file_offset: file_seq=%u, min_file_offset (%"PRIuUOFF_T
+ ") < hdr_size (%u)",
+ max_file_seq, max_file_offset, head->hdr.hdr_size);
+ mail_transaction_log_view_set_corrupted(view, "%s", *reason_r);
+ view_set_failed_unref(head, tail);
+ return 0;
+ }
+
+ /* we have all of them. update refcounts. */
+ mail_transaction_log_view_unref_all(view);
+
+ /* Reference all used files. */
+ view->tail = tail;
+ view->head = head;
+ for (file = view->tail; ; file = file->next) {
+ array_push_back(&view->file_refs, &file);
+
+ if (file == head)
+ break;
+ }
+
+ view->cur = view->tail;
+ view->cur_offset = view->cur->hdr.file_seq == min_file_seq ?
+ min_file_offset : view->cur->hdr.hdr_size;
+
+ /* Map the files only after we've found them all. Otherwise if we map
+ one file and then another file just happens to get rotated, we could
+ include both files in the view but skip the last transactions from
+ the first file.
+
+ We're mapping the files in reverse order so that _log_file_map()
+ can verify that prev_file_offset matches how far it actually managed
+ to sync the file. */
+ files = array_front(&view->file_refs);
+ for (i = array_count(&view->file_refs); i > 0; i--) {
+ file = files[i-1];
+ start_offset = file->hdr.file_seq == min_file_seq ?
+ min_file_offset : file->hdr.hdr_size;
+ end_offset = file->hdr.file_seq == max_file_seq ?
+ max_file_offset : UOFF_T_MAX;
+ ret = mail_transaction_log_file_map(file, start_offset,
+ end_offset, reason_r);
+ if (ret <= 0) {
+ *reason_r = t_strdup_printf(
+ "Failed to map file seq=%u "
+ "offset=%"PRIuUOFF_T"..%"PRIuUOFF_T" (ret=%d): %s",
+ file->hdr.file_seq, start_offset, end_offset, ret, *reason_r);
+ return ret;
+ }
+
+ if (file->hdr.prev_file_seq == 0) {
+ /* this file resets the index.
+ don't bother reading the others. */
+ if (view->cur != file ||
+ view->cur_offset == file->hdr.hdr_size) {
+ view->cur = file;
+ view->cur_offset = file->hdr.hdr_size;
+ *reset_r = TRUE;
+ break;
+ }
+ i_assert(i == 1);
+ }
+ }
+
+ if (min_file_seq == view->head->hdr.file_seq &&
+ min_file_offset > view->head->sync_offset) {
+ /* log file offset is probably corrupted in the index file. */
+ *reason_r = t_strdup_printf(
+ "Invalid offset: file_seq=%u, min_file_offset (%"PRIuUOFF_T
+ ") > sync_offset (%"PRIuUOFF_T")", min_file_seq,
+ min_file_offset, view->head->sync_offset);
+ mail_transaction_log_view_set_corrupted(view, "%s", *reason_r);
+ return 0;
+ }
+
+ i_assert(max_file_seq == (uint32_t)-1 ||
+ max_file_seq == view->head->hdr.file_seq);
+ i_assert(max_file_offset == UOFF_T_MAX ||
+ max_file_offset <= view->head->sync_offset);
+ i_assert(min_file_seq != max_file_seq ||
+ max_file_seq != view->head->hdr.file_seq ||
+ max_file_offset != UOFF_T_MAX ||
+ min_file_offset <= view->head->sync_offset);
+
+ view->prev_file_seq = view->cur->hdr.file_seq;
+ view->prev_file_offset = view->cur_offset;
+
+ view->min_file_seq = min_file_seq;
+ view->min_file_offset = min_file_offset;
+ view->max_file_seq = max_file_seq;
+ view->max_file_offset = I_MIN(max_file_offset, view->head->sync_offset);
+ view->broken = FALSE;
+
+ ret = mail_transaction_log_file_get_highest_modseq_at(view->cur,
+ view->cur_offset, &view->prev_modseq, reason_r);
+ if (ret <= 0)
+ return ret;
+
+ i_assert(view->cur_offset <= view->cur->sync_offset);
+ return 1;
+}
+
+int mail_transaction_log_view_set_all(struct mail_transaction_log_view *view)
+{
+ struct mail_transaction_log_file *file, *first;
+ const char *reason = NULL;
+ int ret;
+
+ /* make sure .log.2 file is opened */
+ (void)mail_transaction_log_find_file(view->log, 1, FALSE, &file, &reason);
+
+ first = view->log->files;
+ i_assert(first != NULL);
+
+ for (file = view->log->files; file != NULL; file = file->next) {
+ ret = mail_transaction_log_file_map(file, file->hdr.hdr_size,
+ UOFF_T_MAX, &reason);
+ if (ret < 0) {
+ first = NULL;
+ break;
+ }
+ if (ret == 0) {
+ /* corrupted */
+ first = NULL;
+ } else if (file->hdr.prev_file_seq == 0) {
+ /* this file resets the index. skip the old ones. */
+ first = file;
+ }
+ }
+ if (first == NULL) {
+ /* index wasn't reset after corruption was found */
+ i_assert(reason != NULL);
+ mail_index_set_error(view->log->index,
+ "Failed to map transaction log %s for all-view: %s",
+ view->log->filepath, reason);
+ return -1;
+ }
+
+ mail_transaction_log_view_unref_all(view);
+ for (file = first; file != NULL; file = file->next) {
+ array_push_back(&view->file_refs, &file);
+ file->refcount++;
+ }
+
+ view->tail = first;
+ view->cur = view->tail;
+ view->cur_offset = view->tail->hdr.hdr_size;
+
+ view->prev_file_seq = view->cur->hdr.file_seq;
+ view->prev_file_offset = view->cur_offset;
+
+ view->min_file_seq = view->cur->hdr.file_seq;
+ view->min_file_offset = view->cur_offset;
+ view->max_file_seq = view->head->hdr.file_seq;
+ view->max_file_offset = view->head->sync_offset;
+ view->broken = FALSE;
+
+ if (mail_transaction_log_file_get_highest_modseq_at(view->cur,
+ view->cur_offset, &view->prev_modseq, &reason) <= 0) {
+ mail_index_set_error(view->log->index,
+ "Failed to get modseq in %s for all-view: %s",
+ view->log->filepath, reason);
+ return -1;
+ }
+ return 0;
+}
+
+void mail_transaction_log_view_clear(struct mail_transaction_log_view *view,
+ uint32_t oldest_file_seq)
+{
+ struct mail_transaction_log_file *file;
+ const char *reason;
+
+ mail_transaction_log_view_unref_all(view);
+ if (oldest_file_seq != 0 &&
+ mail_transaction_log_find_file(view->log, oldest_file_seq, FALSE,
+ &file, &reason) > 0) {
+ for (; file != NULL; file = file->next) {
+ array_push_back(&view->file_refs, &file);
+ file->refcount++;
+ }
+ }
+
+ view->cur = view->head = view->tail = NULL;
+
+ view->mark_file = NULL;
+ view->mark_offset = 0;
+ view->mark_modseq = 0;
+
+ view->min_file_seq = view->max_file_seq = 0;
+ view->min_file_offset = view->max_file_offset = 0;
+ view->cur_offset = 0;
+
+ view->prev_file_seq = 0;
+ view->prev_file_offset = 0;
+ view->prev_modseq = 0;
+}
+
+void
+mail_transaction_log_view_get_prev_pos(struct mail_transaction_log_view *view,
+ uint32_t *file_seq_r,
+ uoff_t *file_offset_r)
+{
+ *file_seq_r = view->prev_file_seq;
+ *file_offset_r = view->prev_file_offset;
+}
+
+uint64_t
+mail_transaction_log_view_get_prev_modseq(struct mail_transaction_log_view *view)
+{
+ return view->prev_modseq;
+}
+
+static bool
+mail_transaction_log_view_get_last(struct mail_transaction_log_view *view,
+ struct mail_transaction_log_file **last_r,
+ uoff_t *last_offset_r)
+{
+ struct mail_transaction_log_file *cur = view->cur;
+ uoff_t cur_offset = view->cur_offset;
+ bool last = FALSE;
+
+ if (cur == NULL) {
+ *last_r = NULL;
+ return TRUE;
+ }
+
+ for (;;) {
+ if (cur->hdr.file_seq == view->max_file_seq) {
+ /* last file */
+ if (cur_offset == view->max_file_offset ||
+ cur_offset == cur->sync_offset) {
+ /* we're all finished */
+ last = TRUE;
+ }
+ } else if (cur_offset == cur->sync_offset) {
+ /* end of file, go to next one */
+ if (cur->next == NULL) {
+ last = TRUE;
+ } else {
+ cur = cur->next;
+ cur_offset = cur->hdr.hdr_size;
+ continue;
+ }
+ }
+
+ /* not EOF */
+ break;
+ }
+
+ *last_r = cur;
+ *last_offset_r = cur_offset;
+ return last;
+}
+
+bool mail_transaction_log_view_is_last(struct mail_transaction_log_view *view)
+{
+ struct mail_transaction_log_file *cur;
+ uoff_t cur_offset;
+
+ return mail_transaction_log_view_get_last(view, &cur, &cur_offset);
+}
+
+void
+mail_transaction_log_view_set_corrupted(struct mail_transaction_log_view *view,
+ const char *fmt, ...)
+{
+ va_list va;
+
+ view->broken = TRUE;
+
+ va_start(va, fmt);
+ T_BEGIN {
+ mail_transaction_log_file_set_corrupted(view->log->head, "%s",
+ t_strdup_vprintf(fmt, va));
+ } T_END;
+ va_end(va);
+}
+
+bool
+mail_transaction_log_view_is_corrupted(struct mail_transaction_log_view *view)
+{
+ return view->broken;
+}
+
+static bool
+log_view_is_uid_range_valid(struct mail_transaction_log_file *file,
+ enum mail_transaction_type rec_type,
+ const ARRAY_TYPE(seq_range) *uids)
+{
+ const struct seq_range *rec, *prev = NULL;
+ unsigned int i, count = array_count(uids);
+
+ if ((uids->arr.buffer->used % uids->arr.element_size) != 0) {
+ mail_transaction_log_file_set_corrupted(file,
+ "Invalid record size (type=0x%x)", rec_type);
+ return FALSE;
+ } else if (count == 0) {
+ mail_transaction_log_file_set_corrupted(file,
+ "No UID ranges (type=0x%x)", rec_type);
+ return FALSE;
+ }
+
+ for (i = 0; i < count; i++, prev = rec) {
+ rec = array_idx(uids, i);
+ if (rec->seq1 > rec->seq2 || rec->seq1 == 0) {
+ mail_transaction_log_file_set_corrupted(file,
+ "Invalid UID range (%u .. %u, type=0x%x)",
+ rec->seq1, rec->seq2, rec_type);
+ return FALSE;
+ }
+ if (prev != NULL && rec->seq1 <= prev->seq2) {
+ mail_transaction_log_file_set_corrupted(file,
+ "Non-sorted UID ranges (type=0x%x)", rec_type);
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+static bool
+log_view_is_record_valid(struct mail_transaction_log_file *file,
+ const struct mail_transaction_header *hdr,
+ const void *data)
+{
+ enum mail_transaction_type rec_type;
+ ARRAY_TYPE(seq_range) uids = ARRAY_INIT;
+ buffer_t uid_buf;
+ uint32_t rec_size;
+
+ rec_type = hdr->type & MAIL_TRANSACTION_TYPE_MASK;
+ rec_size = mail_index_offset_to_uint32(hdr->size) - sizeof(*hdr);
+
+ /* we want to be extra careful with expunges */
+ if ((hdr->type & MAIL_TRANSACTION_EXPUNGE) != 0) {
+ if (rec_type != (MAIL_TRANSACTION_EXPUNGE |
+ MAIL_TRANSACTION_EXPUNGE_PROT)) {
+ mail_transaction_log_file_set_corrupted(file,
+ "expunge record missing protection mask");
+ return FALSE;
+ }
+ rec_type &= ENUM_NEGATE(MAIL_TRANSACTION_EXPUNGE_PROT);
+ }
+ if ((hdr->type & MAIL_TRANSACTION_EXPUNGE_GUID) != 0) {
+ if (rec_type != (MAIL_TRANSACTION_EXPUNGE_GUID |
+ MAIL_TRANSACTION_EXPUNGE_PROT)) {
+ mail_transaction_log_file_set_corrupted(file,
+ "expunge guid record missing protection mask");
+ return FALSE;
+ }
+ rec_type &= ENUM_NEGATE(MAIL_TRANSACTION_EXPUNGE_PROT);
+ }
+
+ if (rec_size == 0) {
+ mail_transaction_log_file_set_corrupted(file,
+ "Empty record contents (type=0x%x)", rec_type);
+ return FALSE;
+ }
+
+ /* records that are exported by syncing and view syncing will be
+ checked here so that we don't have to implement the same validation
+ multiple times. other records are checked internally by
+ mail_index_sync_record(). */
+ switch (rec_type) {
+ case MAIL_TRANSACTION_APPEND:
+ if ((rec_size % sizeof(struct mail_index_record)) != 0) {
+ mail_transaction_log_file_set_corrupted(file,
+ "Invalid append record size");
+ return FALSE;
+ }
+ break;
+ case MAIL_TRANSACTION_EXPUNGE:
+ buffer_create_from_const_data(&uid_buf, data, rec_size);
+ array_create_from_buffer(&uids, &uid_buf,
+ sizeof(struct mail_transaction_expunge));
+ break;
+ case MAIL_TRANSACTION_EXPUNGE_GUID: {
+ const struct mail_transaction_expunge_guid *recs = data;
+ unsigned int i, count;
+
+ if ((rec_size % sizeof(*recs)) != 0) {
+ mail_transaction_log_file_set_corrupted(file,
+ "Invalid expunge guid record size");
+ return FALSE;
+ }
+ count = rec_size / sizeof(*recs);
+ for (i = 0; i < count; i++) {
+ if (recs[i].uid == 0) {
+ mail_transaction_log_file_set_corrupted(file,
+ "Expunge guid record with uid=0");
+ return FALSE;
+ }
+ }
+ break;
+ }
+ case MAIL_TRANSACTION_FLAG_UPDATE:
+ buffer_create_from_const_data(&uid_buf, data, rec_size);
+ array_create_from_buffer(&uids, &uid_buf,
+ sizeof(struct mail_transaction_flag_update));
+ break;
+ case MAIL_TRANSACTION_KEYWORD_UPDATE: {
+ const struct mail_transaction_keyword_update *rec = data;
+ unsigned int seqset_offset;
+
+ seqset_offset = sizeof(*rec) + rec->name_size;
+ if ((seqset_offset % 4) != 0)
+ seqset_offset += 4 - (seqset_offset % 4);
+
+ if (rec->name_size == 0) {
+ mail_transaction_log_file_set_corrupted(file,
+ "Trying to use empty keyword");
+ return FALSE;
+ }
+ if (seqset_offset > rec_size) {
+ mail_transaction_log_file_set_corrupted(file,
+ "Invalid keyword update record size");
+ return FALSE;
+ }
+
+ buffer_create_from_const_data(&uid_buf,
+ CONST_PTR_OFFSET(data, seqset_offset),
+ rec_size - seqset_offset);
+ array_create_from_buffer(&uids, &uid_buf,
+ sizeof(uint32_t)*2);
+ break;
+ }
+ case MAIL_TRANSACTION_KEYWORD_RESET:
+ buffer_create_from_const_data(&uid_buf, data, rec_size);
+ array_create_from_buffer(&uids, &uid_buf,
+ sizeof(struct mail_transaction_keyword_reset));
+ break;
+ case MAIL_TRANSACTION_EXT_INTRO: {
+ const struct mail_transaction_ext_intro *rec;
+ unsigned int i;
+
+ for (i = 0; i < rec_size; ) {
+ if (i + sizeof(*rec) > rec_size) {
+ /* should be just extra padding */
+ break;
+ }
+
+ rec = CONST_PTR_OFFSET(data, i);
+ if (i + sizeof(*rec) + rec->name_size > rec_size) {
+ mail_transaction_log_file_set_corrupted(file,
+ "ext intro: name_size too large");
+ return FALSE;
+ }
+ i += sizeof(*rec) + rec->name_size;
+ if ((i % 4) != 0)
+ i += 4 - (i % 4);
+ }
+ break;
+ }
+ case MAIL_TRANSACTION_ATTRIBUTE_UPDATE: {
+ const char *attr_changes = data;
+ unsigned int i;
+
+ for (i = 0; i+2 < rec_size && attr_changes[i] != '\0'; ) {
+ if (attr_changes[i] != '+' && attr_changes[i] != '-') {
+ mail_transaction_log_file_set_corrupted(file,
+ "attribute update: Invalid prefix 0x%02x",
+ attr_changes[i]);
+ return FALSE;
+ }
+ i++;
+ if (attr_changes[i] != 'p' && attr_changes[i] != 's') {
+ mail_transaction_log_file_set_corrupted(file,
+ "attribute update: Invalid type 0x%02x",
+ attr_changes[i]);
+ return FALSE;
+ }
+ i++;
+ if (attr_changes[i] == '\0') {
+ mail_transaction_log_file_set_corrupted(file,
+ "attribute update: Empty key");
+ return FALSE;
+ }
+ i += strlen(attr_changes+i) + 1;
+ }
+ if (i == 0 || (i < rec_size && attr_changes[i] != '\0')) {
+ mail_transaction_log_file_set_corrupted(file,
+ "attribute update doesn't end with NUL");
+ return FALSE;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (array_is_created(&uids)) {
+ if (!log_view_is_uid_range_valid(file, rec_type, &uids))
+ return FALSE;
+ }
+ return TRUE;
+}
+
+static int
+log_view_get_next(struct mail_transaction_log_view *view,
+ const struct mail_transaction_header **hdr_r,
+ const void **data_r)
+{
+ const struct mail_transaction_header *hdr;
+ struct mail_transaction_log_file *file;
+ const void *data;
+ enum mail_transaction_type rec_type;
+ uint32_t full_size;
+ size_t file_size;
+ int ret;
+
+ if (view->cur == NULL)
+ return 0;
+
+ /* prev_file_offset should point to beginning of previous log record.
+ when we reach EOF, it should be left there, not to beginning of the
+ next file that's not included inside the view. */
+ if (mail_transaction_log_view_get_last(view, &view->cur,
+ &view->cur_offset)) {
+ /* if the last file was the beginning of a file, we want to
+ move prev pointers there */
+ view->prev_file_seq = view->cur->hdr.file_seq;
+ view->prev_file_offset = view->cur_offset;
+ view->cur = NULL;
+ return 0;
+ }
+
+ view->prev_file_seq = view->cur->hdr.file_seq;
+ view->prev_file_offset = view->cur_offset;
+
+ file = view->cur;
+
+ data = file->buffer->data;
+ file_size = file->buffer->used + file->buffer_offset;
+
+ if (view->cur_offset + sizeof(*hdr) > file_size) {
+ mail_transaction_log_file_set_corrupted(file,
+ "offset points outside file "
+ "(%"PRIuUOFF_T" + %zu > %zu)",
+ view->cur_offset, sizeof(*hdr), file_size);
+ return -1;
+ }
+
+ i_assert(view->cur_offset >= file->buffer_offset);
+ hdr = CONST_PTR_OFFSET(data, view->cur_offset - file->buffer_offset);
+ data = CONST_PTR_OFFSET(hdr, sizeof(*hdr));
+
+ rec_type = hdr->type & MAIL_TRANSACTION_TYPE_MASK;
+ full_size = mail_index_offset_to_uint32(hdr->size);
+ if (full_size < sizeof(*hdr)) {
+ mail_transaction_log_file_set_corrupted(file,
+ "record size too small (type=0x%x, "
+ "offset=%"PRIuUOFF_T", size=%u)",
+ rec_type, view->cur_offset, full_size);
+ return -1;
+ }
+
+ if (file_size - view->cur_offset < full_size) {
+ mail_transaction_log_file_set_corrupted(file,
+ "record size too large (type=0x%x, "
+ "offset=%"PRIuUOFF_T", size=%u, end=%zu)",
+ rec_type, view->cur_offset, full_size, file_size);
+ return -1;
+ }
+
+ T_BEGIN {
+ ret = log_view_is_record_valid(file, hdr, data) ? 1 : -1;
+ } T_END;
+ if (ret > 0) {
+ mail_transaction_update_modseq(hdr, data, &view->prev_modseq,
+ MAIL_TRANSACTION_LOG_HDR_VERSION(&file->hdr));
+ *hdr_r = hdr;
+ *data_r = data;
+ view->cur_offset += full_size;
+ }
+ return ret;
+}
+
+int mail_transaction_log_view_next(struct mail_transaction_log_view *view,
+ const struct mail_transaction_header **hdr_r,
+ const void **data_r)
+{
+ const struct mail_transaction_header *hdr;
+ const void *data;
+ int ret = 0;
+
+ if (view->broken)
+ return -1;
+
+ ret = log_view_get_next(view, &hdr, &data);
+ if (ret <= 0) {
+ if (ret < 0)
+ view->cur_offset = view->cur->sync_offset;
+ return ret;
+ }
+
+ /* drop expunge protection */
+ if ((hdr->type & MAIL_TRANSACTION_TYPE_MASK) ==
+ (MAIL_TRANSACTION_EXPUNGE | MAIL_TRANSACTION_EXPUNGE_PROT) ||
+ (hdr->type & MAIL_TRANSACTION_TYPE_MASK) ==
+ (MAIL_TRANSACTION_EXPUNGE_GUID | MAIL_TRANSACTION_EXPUNGE_PROT))
+ view->tmp_hdr.type = hdr->type & ENUM_NEGATE(MAIL_TRANSACTION_EXPUNGE_PROT);
+ else
+ view->tmp_hdr.type = hdr->type;
+
+ /* return record's size */
+ view->tmp_hdr.size = mail_index_offset_to_uint32(hdr->size);
+ i_assert(view->tmp_hdr.size > sizeof(*hdr));
+ view->tmp_hdr.size -= sizeof(*hdr);
+
+ *hdr_r = &view->tmp_hdr;
+ *data_r = data;
+ return 1;
+}
+
+void mail_transaction_log_view_mark(struct mail_transaction_log_view *view)
+{
+ i_assert(view->cur->hdr.file_seq == view->prev_file_seq);
+
+ view->mark_file = view->cur;
+ view->mark_offset = view->prev_file_offset;
+ view->mark_next_offset = view->cur_offset;
+ view->mark_modseq = view->prev_modseq;
+}
+
+void mail_transaction_log_view_rewind(struct mail_transaction_log_view *view)
+{
+ i_assert(view->mark_file != NULL);
+
+ view->cur = view->mark_file;
+ view->cur_offset = view->mark_next_offset;
+ view->prev_file_seq = view->cur->hdr.file_seq;
+ view->prev_file_offset = view->mark_offset;
+ view->prev_modseq = view->mark_modseq;
+}
diff --git a/src/lib-index/mail-transaction-log.c b/src/lib-index/mail-transaction-log.c
new file mode 100644
index 0000000..6e9b1eb
--- /dev/null
+++ b/src/lib-index/mail-transaction-log.c
@@ -0,0 +1,664 @@
+/* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "ioloop.h"
+#include "buffer.h"
+#include "file-dotlock.h"
+#include "nfs-workarounds.h"
+#include "mmap-util.h"
+#include "mail-index-private.h"
+#include "mail-transaction-log-private.h"
+
+#include <stddef.h>
+#include <stdio.h>
+#include <sys/stat.h>
+
+static void
+mail_transaction_log_set_head(struct mail_transaction_log *log,
+ struct mail_transaction_log_file *file)
+{
+ i_assert(log->head != file);
+
+ file->refcount++;
+ log->head = file;
+
+ i_assert(log->files != NULL);
+ i_assert(log->files->next != NULL || log->files == file);
+}
+
+struct mail_transaction_log *
+mail_transaction_log_alloc(struct mail_index *index)
+{
+ struct mail_transaction_log *log;
+
+ log = i_new(struct mail_transaction_log, 1);
+ log->index = index;
+ return log;
+}
+
+static void mail_transaction_log_2_unlink_old(struct mail_transaction_log *log)
+{
+ struct stat st;
+ uint32_t log2_rotate_time = log->index->map->hdr.log2_rotate_time;
+
+ if (MAIL_INDEX_IS_IN_MEMORY(log->index))
+ return;
+
+ if (log2_rotate_time == 0) {
+ if (nfs_safe_stat(log->filepath2, &st) == 0)
+ log2_rotate_time = st.st_mtime;
+ else if (errno == ENOENT)
+ log2_rotate_time = (uint32_t)-1;
+ else {
+ mail_index_set_error(log->index,
+ "stat(%s) failed: %m", log->filepath2);
+ return;
+ }
+ }
+
+ if (log2_rotate_time != (uint32_t)-1 &&
+ ioloop_time - (time_t)log2_rotate_time >= (time_t)log->index->optimization_set.log.log2_max_age_secs &&
+ !log->index->readonly) {
+ i_unlink_if_exists(log->filepath2);
+ log2_rotate_time = (uint32_t)-1;
+ }
+
+ if (log2_rotate_time != log->index->map->hdr.log2_rotate_time) {
+ /* Either the log2_rotate_time in header was missing, or we
+ just deleted the .log.2 and need to set it as nonexistent.
+ Either way we need to update the header.
+
+ Write this as part of the next sync's transaction. We're
+ here because we're already opening a sync lock, so it'll
+ always happen. It's also required especially with mdbox map
+ index, which doesn't like changes done outside syncing. */
+ log->index->hdr_log2_rotate_time_delayed_update =
+ log2_rotate_time;
+ }
+}
+
+int mail_transaction_log_open(struct mail_transaction_log *log)
+{
+ struct mail_transaction_log_file *file;
+ const char *reason;
+ int ret;
+
+ i_free(log->filepath);
+ i_free(log->filepath2);
+ log->filepath = i_strconcat(log->index->filepath,
+ MAIL_TRANSACTION_LOG_SUFFIX, NULL);
+ log->filepath2 = i_strconcat(log->filepath, ".2", NULL);
+
+ if (log->open_file != NULL)
+ mail_transaction_log_file_free(&log->open_file);
+
+ if (MAIL_INDEX_IS_IN_MEMORY(log->index))
+ return 0;
+
+ file = mail_transaction_log_file_alloc(log, log->filepath);
+ if ((ret = mail_transaction_log_file_open(file, &reason)) <= 0) {
+ /* leave the file for _create() */
+ log->open_file = file;
+ return ret;
+ }
+ mail_transaction_log_set_head(log, file);
+ return 1;
+}
+
+int mail_transaction_log_create(struct mail_transaction_log *log, bool reset)
+{
+ struct mail_transaction_log_file *file;
+
+ if (MAIL_INDEX_IS_IN_MEMORY(log->index)) {
+ file = mail_transaction_log_file_alloc_in_memory(log);
+ mail_transaction_log_set_head(log, file);
+ return 0;
+ }
+
+ file = mail_transaction_log_file_alloc(log, log->filepath);
+ if (log->open_file != NULL) {
+ /* remember what file we tried to open. if someone else created
+ a new file, use it instead of recreating it */
+ file->st_ino = log->open_file->st_ino;
+ file->st_dev = log->open_file->st_dev;
+ file->last_size = log->open_file->last_size;
+ file->last_mtime = log->open_file->last_mtime;
+ mail_transaction_log_file_free(&log->open_file);
+ }
+
+ if (mail_transaction_log_file_create(file, reset) < 0) {
+ mail_transaction_log_file_free(&file);
+ return -1;
+ }
+
+ mail_transaction_log_set_head(log, file);
+ return 1;
+}
+
+void mail_transaction_log_close(struct mail_transaction_log *log)
+{
+ i_assert(log->views == NULL);
+
+ if (log->open_file != NULL)
+ mail_transaction_log_file_free(&log->open_file);
+ if (log->head != NULL)
+ log->head->refcount--;
+ mail_transaction_logs_clean(log);
+ i_assert(log->files == NULL);
+}
+
+void mail_transaction_log_free(struct mail_transaction_log **_log)
+{
+ struct mail_transaction_log *log = *_log;
+
+ *_log = NULL;
+
+ mail_transaction_log_close(log);
+ log->index->log = NULL;
+ i_free(log->filepath);
+ i_free(log->filepath2);
+ i_free(log);
+}
+
+int mail_transaction_log_move_to_memory(struct mail_transaction_log *log)
+{
+ struct mail_transaction_log_file *file;
+
+ if (!log->index->initial_mapped && log->files != NULL &&
+ log->files->hdr.prev_file_seq != 0) {
+ /* we couldn't read dovecot.index and we don't have the first
+ .log file, so just start from scratch */
+ mail_transaction_log_close(log);
+ }
+
+ i_free(log->filepath);
+ i_free(log->filepath2);
+ log->filepath = i_strconcat(log->index->filepath,
+ MAIL_TRANSACTION_LOG_SUFFIX, NULL);
+ log->filepath2 = i_strconcat(log->filepath, ".2", NULL);
+
+ if (log->head != NULL)
+ return mail_transaction_log_file_move_to_memory(log->head);
+ else {
+ file = mail_transaction_log_file_alloc_in_memory(log);
+ mail_transaction_log_set_head(log, file);
+ return 0;
+ }
+}
+
+void mail_transaction_log_indexid_changed(struct mail_transaction_log *log)
+{
+ struct mail_transaction_log_file *file;
+
+ mail_transaction_logs_clean(log);
+
+ for (file = log->files; file != NULL; file = file->next) {
+ if (file->hdr.indexid != log->index->indexid) {
+ mail_transaction_log_file_set_corrupted(file,
+ "indexid changed: %u -> %u",
+ file->hdr.indexid, log->index->indexid);
+ }
+ }
+
+ if (log->head != NULL &&
+ log->head->hdr.indexid != log->index->indexid) {
+ struct mail_transaction_log_file *old_head = log->head;
+
+ (void)mail_transaction_log_create(log, FALSE);
+ if (--old_head->refcount == 0) {
+ if (old_head == log->head) {
+ /* failed to create a new log */
+ log->head = NULL;
+ }
+ mail_transaction_log_file_free(&old_head);
+ }
+ }
+}
+
+void mail_transaction_logs_clean(struct mail_transaction_log *log)
+{
+ struct mail_transaction_log_file *file, *next;
+
+ /* remove only files from the beginning. this way if a view has
+ referenced an old file, it can still find the new files even if
+ there aren't any references to it currently. */
+ for (file = log->files; file != NULL; file = next) {
+ next = file->next;
+
+ i_assert(file->refcount >= 0);
+ if (file->refcount > 0)
+ break;
+
+ mail_transaction_log_file_free(&file);
+ }
+ /* sanity check: we shouldn't have locked refcount=0 files */
+ for (; file != NULL; file = file->next) {
+ i_assert(!file->locked || file->refcount > 0);
+ }
+ i_assert(log->head == NULL || log->files != NULL);
+}
+
+bool mail_transaction_log_want_rotate(struct mail_transaction_log *log,
+ const char **reason_r)
+{
+ struct mail_transaction_log_file *file = log->head;
+
+ if (file->need_rotate != NULL) {
+ *reason_r = t_strdup(file->need_rotate);
+ return TRUE;
+ }
+
+ if (file->hdr.major_version < MAIL_TRANSACTION_LOG_MAJOR_VERSION ||
+ (file->hdr.major_version == MAIL_TRANSACTION_LOG_MAJOR_VERSION &&
+ file->hdr.minor_version < MAIL_TRANSACTION_LOG_MINOR_VERSION)) {
+ /* upgrade immediately to a new log file format */
+ *reason_r = t_strdup_printf(
+ ".log file format version %u.%u is too old",
+ file->hdr.major_version, file->hdr.minor_version);
+ return TRUE;
+ }
+
+ if (file->sync_offset > log->index->optimization_set.log.max_size) {
+ /* file is too large, definitely rotate */
+ *reason_r = t_strdup_printf(
+ ".log file size %"PRIuUOFF_T" > max_size %"PRIuUOFF_T,
+ file->sync_offset, log->index->optimization_set.log.max_size);
+ return TRUE;
+ }
+ if (file->sync_offset < log->index->optimization_set.log.min_size) {
+ /* file is still too small */
+ return FALSE;
+ }
+ /* rotate if the timestamp is old enough */
+ if (file->hdr.create_stamp <
+ ioloop_time - log->index->optimization_set.log.min_age_secs) {
+ *reason_r = t_strdup_printf(
+ ".log create_stamp %u is older than %u secs",
+ file->hdr.create_stamp,
+ log->index->optimization_set.log.min_age_secs);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+int mail_transaction_log_rotate(struct mail_transaction_log *log, bool reset)
+{
+ struct mail_transaction_log_file *file, *old_head;
+ const char *path = log->head->filepath;
+ struct stat st;
+ int ret;
+
+ i_assert(log->head->locked);
+
+ if (MAIL_INDEX_IS_IN_MEMORY(log->index)) {
+ file = mail_transaction_log_file_alloc_in_memory(log);
+ if (reset) {
+ file->hdr.prev_file_seq = 0;
+ file->hdr.prev_file_offset = 0;
+ }
+ } else {
+ /* we're locked, we shouldn't need to worry about ESTALE
+ problems in here. */
+ if (fstat(log->head->fd, &st) < 0) {
+ mail_index_file_set_syscall_error(log->index,
+ log->head->filepath, "fstat()");
+ return -1;
+ }
+
+ file = mail_transaction_log_file_alloc(log, path);
+
+ file->st_dev = st.st_dev;
+ file->st_ino = st.st_ino;
+ file->last_mtime = st.st_mtime;
+ file->last_size = st.st_size;
+
+ if ((ret = mail_transaction_log_file_create(file, reset)) < 0) {
+ mail_transaction_log_file_free(&file);
+ return -1;
+ }
+ if (ret == 0) {
+ mail_index_set_error(log->index,
+ "Transaction log %s was recreated while we had it locked - "
+ "locking is broken (lock_method=%s)", path,
+ file_lock_method_to_str(log->index->set.lock_method));
+ mail_transaction_log_file_free(&file);
+ return -1;
+ }
+ i_assert(file->locked);
+ }
+
+ old_head = log->head;
+ mail_transaction_log_set_head(log, file);
+
+ e_debug(log->index->event, "Rotated transaction log %s (seq=%u, reset=%s)",
+ file->filepath, file->hdr.file_seq, reset ? "yes" : "no");
+
+ /* the newly created log file is already locked */
+ mail_transaction_log_file_unlock(old_head,
+ !log->index->log_sync_locked ? "rotating" :
+ "rotating while syncing");
+ if (--old_head->refcount == 0)
+ mail_transaction_logs_clean(log);
+ return 0;
+}
+
+static int
+mail_transaction_log_refresh(struct mail_transaction_log *log, bool nfs_flush,
+ const char **reason_r)
+{
+ struct mail_transaction_log_file *file;
+ struct stat st;
+
+ i_assert(log->head != NULL);
+
+ if (MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(log->head)) {
+ *reason_r = "Log is in memory";
+ return 0;
+ }
+
+ if (nfs_flush &&
+ (log->index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0)
+ nfs_flush_file_handle_cache(log->filepath);
+ if (nfs_safe_stat(log->filepath, &st) < 0) {
+ if (errno != ENOENT) {
+ mail_index_file_set_syscall_error(log->index,
+ log->filepath,
+ "stat()");
+ *reason_r = t_strdup_printf("stat(%s) failed: %m", log->filepath);
+ return -1;
+ }
+ /* We shouldn't lose dovecot.index.log unless the mailbox was
+ deleted or renamed. Just fail this and let the mailbox
+ opening code figure out whether to create a new log file
+ or not. Anything else can cause unwanted behavior (e.g.
+ mailbox deletion not fully finishing due to .nfs* files and
+ an IDLEing IMAP process creating the index back here). */
+ log->index->index_deleted = TRUE;
+ *reason_r = "Trasnaction log lost while it was open";
+ return -1;
+ } else if (log->head->st_ino == st.st_ino &&
+ CMP_DEV_T(log->head->st_dev, st.st_dev)) {
+ /* NFS: log files get rotated to .log.2 files instead
+ of being unlinked, so we don't bother checking if
+ the existing file has already been unlinked here
+ (in which case inodes could match but point to
+ different files) */
+ *reason_r = "Log inode is unchanged";
+ return 0;
+ }
+
+ file = mail_transaction_log_file_alloc(log, log->filepath);
+ if (mail_transaction_log_file_open(file, reason_r) <= 0) {
+ *reason_r = t_strdup_printf(
+ "Failed to refresh main transaction log: %s", *reason_r);
+ mail_transaction_log_file_free(&file);
+ return -1;
+ }
+
+ i_assert(!file->locked);
+
+ struct mail_transaction_log_file *old_head = log->head;
+ mail_transaction_log_set_head(log, file);
+ if (--old_head->refcount == 0)
+ mail_transaction_logs_clean(log);
+ *reason_r = "Log reopened";
+ return 0;
+}
+
+void mail_transaction_log_get_mailbox_sync_pos(struct mail_transaction_log *log,
+ uint32_t *file_seq_r,
+ uoff_t *file_offset_r)
+{
+ *file_seq_r = log->head->hdr.file_seq;
+ *file_offset_r = log->head->max_tail_offset;
+}
+
+void mail_transaction_log_set_mailbox_sync_pos(struct mail_transaction_log *log,
+ uint32_t file_seq,
+ uoff_t file_offset)
+{
+ i_assert(file_seq == log->head->hdr.file_seq);
+ i_assert(file_offset >= log->head->last_read_hdr_tail_offset);
+
+ if (file_offset >= log->head->max_tail_offset)
+ log->head->max_tail_offset = file_offset;
+}
+
+int mail_transaction_log_find_file(struct mail_transaction_log *log,
+ uint32_t file_seq, bool nfs_flush,
+ struct mail_transaction_log_file **file_r,
+ const char **reason_r)
+{
+ struct mail_transaction_log_file *file;
+ const char *reason;
+ int ret;
+
+ if (file_seq > log->head->hdr.file_seq) {
+ /* see if the .log file has been recreated */
+ if (log->head->locked) {
+ /* transaction log is locked. there's no way a newer
+ file exists. */
+ *reason_r = "Log is locked - newer log can't exist";
+ return 0;
+ }
+
+ if (mail_transaction_log_refresh(log, FALSE, &reason) < 0) {
+ *reason_r = reason;
+ return -1;
+ }
+ if (file_seq > log->head->hdr.file_seq) {
+ if (!nfs_flush ||
+ (log->index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) == 0) {
+ *reason_r = t_strdup_printf(
+ "Requested newer log than exists: %s", reason);
+ return 0;
+ }
+ /* try again, this time flush attribute cache */
+ if (mail_transaction_log_refresh(log, TRUE, &reason) < 0) {
+ *reason_r = t_strdup_printf(
+ "Log refresh with NFS flush failed: %s", reason);
+ return -1;
+ }
+ if (file_seq > log->head->hdr.file_seq) {
+ *reason_r = t_strdup_printf(
+ "Requested newer log than exists - "
+ "still after NFS flush: %s", reason);
+ return 0;
+ }
+ }
+ }
+
+ for (file = log->files; file != NULL; file = file->next) {
+ if (file->hdr.file_seq == file_seq) {
+ *file_r = file;
+ return 1;
+ }
+ if (file->hdr.file_seq > file_seq &&
+ file->hdr.prev_file_seq == 0) {
+ /* Fail here mainly to avoid unnecessarily trying to
+ open .log.2 that most likely doesn't even exist. */
+ *reason_r = "Log was reset after requested file_seq";
+ return 0;
+ }
+ }
+
+ if (MAIL_INDEX_IS_IN_MEMORY(log->index)) {
+ *reason_r = "Logs are only in memory";
+ return 0;
+ }
+
+ /* see if we have it in log.2 file */
+ file = mail_transaction_log_file_alloc(log, log->filepath2);
+ if ((ret = mail_transaction_log_file_open(file, reason_r)) <= 0) {
+ *reason_r = t_strdup_printf(
+ "Not found from .log.2: %s", *reason_r);
+ mail_transaction_log_file_free(&file);
+ return ret;
+ }
+
+ /* but is it what we expected? */
+ if (file->hdr.file_seq != file_seq) {
+ *reason_r = t_strdup_printf(".log.2 contains file_seq=%u",
+ file->hdr.file_seq);
+ return 0;
+ }
+
+ *file_r = file;
+ return 1;
+}
+
+int mail_transaction_log_lock_head(struct mail_transaction_log *log,
+ const char *lock_reason)
+{
+ struct mail_transaction_log_file *file;
+ time_t lock_wait_started, lock_secs = 0;
+ const char *reason;
+ int ret = 0;
+
+ /* we want to get the head file locked. this is a bit racy,
+ since by the time we have it locked a new log file may have been
+ created.
+
+ creating new log file requires locking the head file, so if we
+ can lock it and don't see another file, we can be sure no-one is
+ creating a new log at the moment */
+
+ lock_wait_started = time(NULL);
+ for (;;) {
+ file = log->head;
+ if (mail_transaction_log_file_lock(file) < 0)
+ return -1;
+
+ file->refcount++;
+ ret = mail_transaction_log_refresh(log, TRUE, &reason);
+ if (--file->refcount == 0) {
+ mail_transaction_log_file_unlock(file, t_strdup_printf(
+ "trying to lock head for %s", lock_reason));
+ mail_transaction_logs_clean(log);
+ file = NULL;
+ }
+
+ if (ret == 0 && log->head == file) {
+ /* success */
+ i_assert(file != NULL);
+ lock_secs = file->lock_create_time - lock_wait_started;
+ break;
+ }
+
+ if (file != NULL) {
+ mail_transaction_log_file_unlock(file, t_strdup_printf(
+ "trying to lock head for %s", lock_reason));
+ }
+ if (ret < 0)
+ break;
+
+ /* try again */
+ }
+ if (lock_secs > MAIL_TRANSACTION_LOG_LOCK_WARN_SECS) {
+ i_warning("Locking transaction log file %s took %ld seconds (%s)",
+ log->head->filepath, (long)lock_secs, lock_reason);
+ }
+
+ i_assert(ret < 0 || log->head != NULL);
+ return ret;
+}
+
+int mail_transaction_log_sync_lock(struct mail_transaction_log *log,
+ const char *lock_reason,
+ uint32_t *file_seq_r, uoff_t *file_offset_r)
+{
+ const char *reason;
+
+ i_assert(!log->index->log_sync_locked);
+
+ if (!log->log_2_unlink_checked) {
+ /* we need to check once in a while if .log.2 should be deleted
+ to avoid wasting space on such old files. but we also don't
+ want to waste time on checking it when the same mailbox
+ gets opened over and over again rapidly (e.g. pop3). so
+ do this only when there have actually been some changes
+ to mailbox (i.e. when it's being locked here) */
+ log->log_2_unlink_checked = TRUE;
+ mail_transaction_log_2_unlink_old(log);
+ }
+
+ if (mail_transaction_log_lock_head(log, lock_reason) < 0)
+ return -1;
+
+ /* update sync_offset */
+ if (mail_transaction_log_file_map(log->head, log->head->sync_offset,
+ UOFF_T_MAX, &reason) <= 0) {
+ mail_index_set_error(log->index,
+ "Failed to map transaction log %s at "
+ "sync_offset=%"PRIuUOFF_T" after locking: %s",
+ log->head->filepath, log->head->sync_offset, reason);
+ mail_transaction_log_file_unlock(log->head, t_strdup_printf(
+ "%s - map failed", lock_reason));
+ return -1;
+ }
+
+ log->index->log_sync_locked = TRUE;
+ *file_seq_r = log->head->hdr.file_seq;
+ *file_offset_r = log->head->sync_offset;
+ return 0;
+}
+
+void mail_transaction_log_sync_unlock(struct mail_transaction_log *log,
+ const char *lock_reason)
+{
+ i_assert(log->index->log_sync_locked);
+
+ log->index->log_sync_locked = FALSE;
+ mail_transaction_log_file_unlock(log->head, lock_reason);
+}
+
+void mail_transaction_log_get_head(struct mail_transaction_log *log,
+ uint32_t *file_seq_r, uoff_t *file_offset_r)
+{
+ *file_seq_r = log->head->hdr.file_seq;
+ *file_offset_r = log->head->sync_offset;
+}
+
+void mail_transaction_log_get_tail(struct mail_transaction_log *log,
+ uint32_t *file_seq_r)
+{
+ struct mail_transaction_log_file *tail, *file = log->files;
+
+ for (tail = file; file->next != NULL; file = file->next) {
+ if (file->hdr.file_seq + 1 != file->next->hdr.file_seq)
+ tail = file->next;
+ }
+ *file_seq_r = tail->hdr.file_seq;
+}
+
+bool mail_transaction_log_is_head_prev(struct mail_transaction_log *log,
+ uint32_t file_seq, uoff_t file_offset)
+{
+ return log->head->hdr.prev_file_seq == file_seq &&
+ log->head->hdr.prev_file_offset == file_offset;
+}
+
+int mail_transaction_log_unlink(struct mail_transaction_log *log)
+{
+ if (unlink(log->filepath) < 0 &&
+ errno != ENOENT && errno != ESTALE) {
+ mail_index_file_set_syscall_error(log->index, log->filepath,
+ "unlink()");
+ return -1;
+ }
+ return 0;
+}
+
+void mail_transaction_log_get_dotlock_set(struct mail_transaction_log *log,
+ struct dotlock_settings *set_r)
+{
+ struct mail_index *index = log->index;
+
+ i_zero(set_r);
+ set_r->timeout = I_MIN(MAIL_TRANSACTION_LOG_LOCK_TIMEOUT,
+ index->set.max_lock_timeout_secs);
+ set_r->stale_timeout = MAIL_TRANSACTION_LOG_DOTLOCK_CHANGE_TIMEOUT;
+ set_r->nfs_flush = (index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0;
+ set_r->use_excl_lock =
+ (index->flags & MAIL_INDEX_OPEN_FLAG_DOTLOCK_USE_EXCL) != 0;
+}
diff --git a/src/lib-index/mail-transaction-log.h b/src/lib-index/mail-transaction-log.h
new file mode 100644
index 0000000..c19bb20
--- /dev/null
+++ b/src/lib-index/mail-transaction-log.h
@@ -0,0 +1,494 @@
+#ifndef MAIL_TRANSACTION_LOG_H
+#define MAIL_TRANSACTION_LOG_H
+
+#include "mail-index.h"
+
+#define MAIL_TRANSACTION_LOG_SUFFIX ".log"
+
+#define MAIL_TRANSACTION_LOG_MAJOR_VERSION 1
+#define MAIL_TRANSACTION_LOG_MINOR_VERSION 3
+/* Minimum allowed mail_transaction_log_header.hdr_size. If it's smaller,
+ assume the file is corrupted. */
+#define MAIL_TRANSACTION_LOG_HEADER_MIN_SIZE 24
+
+/* Helper macro for other MAIL_TRANSACTION_LOG_VERSION_*() macros */
+#define MAIL_TRANSACTION_LOG_VERSION_FULL(major, minor) \
+ ((major) << 8 | (minor))
+/* Returns TRUE if the transaction log version supports the given feature.
+ The wanted_feature is one of the MAIL_TRANSACTION_LOG_VERSION_FEATURE_*
+ macros without the macro prefix, e.g. just COMPAT_FLAGS. */
+#define MAIL_TRANSACTION_LOG_VERSION_HAVE(version, wanted_feature) \
+ ((version) >= MAIL_TRANSACTION_LOG_VERSION_FEATURE_##wanted_feature)
+/* Returns transaction log version from the given mail_transaction_log_header
+ which is compatible for the MAIL_TRANSACTION_LOG_VERSION_HAVE() macro. */
+#define MAIL_TRANSACTION_LOG_HDR_VERSION(hdr) \
+ MAIL_TRANSACTION_LOG_VERSION_FULL((hdr)->major_version, (hdr)->minor_version)
+
+/* Log feature: mail_transaction_log_header.compat_flags is filled. */
+#define MAIL_TRANSACTION_LOG_VERSION_FEATURE_COMPAT_FLAGS \
+ MAIL_TRANSACTION_LOG_VERSION_FULL(1, 2)
+/* Log feature: Don't increase modseq when reading internal flag updates
+ (because they're not client-visible anyway).
+ See MAIL_TRANSACTION_FLAG_UPDATE_IS_INTERNAL(). */
+#define MAIL_TRANSACTION_LOG_VERSION_FEATURE_HIDE_INTERNAL_MODSEQS \
+ MAIL_TRANSACTION_LOG_VERSION_FULL(1, 3)
+
+struct mail_transaction_log_header {
+ /* Major version is increased only when you can't have backwards
+ compatibility. If the field doesn't match
+ MAIL_TRANSACTION_LOG_MAJOR_VERSION, don't even try to read it. */
+ uint8_t major_version;
+ /* Minor version is increased when the file format changes in a
+ backwards compatible way. */
+ uint8_t minor_version;
+ /* Size of the header. If it's larger than this struct, ignore any
+ unknown fields. If it's smaller, assume the rest of the fields
+ are 0. */
+ uint16_t hdr_size;
+
+ /* Unique index file ID, which must match the main index's indexid.
+ See mail_index_header.indexid. This is overwritten to be 0 if the
+ log file is marked as corrupted. */
+ uint32_t indexid;
+ /* Log file sequence number. Increased every time the log is rotated
+ and a new log is created. Using (file_seq, offset) uniquely
+ identifies a position in the transaction log. */
+ uint32_t file_seq;
+ /* The previous log file's sequence and offset when the log was
+ rotated. The offset should be the same as the previous log file's
+ size. If there was no previous log file, or if the index is being
+ reset, these are 0.
+
+ These are mainly useful to optimize syncing when the start position
+ is (prev_file_seq, prev_file_offset). Then it's it's already known
+ that the syncing can be started from this log file wihtout having
+ to open the previous log file only to realize that there is nothing
+ to sync. (Which could have also lead to an error if the .log.2 was
+ already deleted.) */
+ uint32_t prev_file_seq;
+ uint32_t prev_file_offset;
+ /* UNIX timestamp when this file was created. Used in determining when
+ to rotate the log file. */
+ uint32_t create_stamp;
+ /* Modseq value at the beginning of this file. Some transaction records
+ increase the modseq value. (Only with log format v1.1+) */
+ uint64_t initial_modseq;
+
+ /* Same as enum mail_index_header_compat_flags. Needs
+ MAIL_TRANSACTION_LOG_VERSION_FEATURE_COMPAT_FLAGS. */
+ uint8_t compat_flags;
+ /* Unused fields to make the struct 64bit aligned. These can be used
+ to add more fields to the header. */
+ uint8_t unused[3];
+ uint32_t unused2;
+};
+
+enum mail_transaction_type {
+ /* struct mail_transaction_expunge[] - Expunge the UIDs.
+ Must have MAIL_TRANSACTION_EXPUNGE_PROT ORed to this. Avoid using
+ this, use MAIL_TRANSACTION_EXPUNGE_GUID instead. */
+ MAIL_TRANSACTION_EXPUNGE = 0x00000001,
+ /* struct mail_index_record[] - Save new mails with given flags. */
+ MAIL_TRANSACTION_APPEND = 0x00000002,
+ /* struct mail_transaction_flag_update[] - Update message flags
+ (or just modseq). */
+ MAIL_TRANSACTION_FLAG_UPDATE = 0x00000004,
+ /* struct mail_transaction_header_update[] - Update the index's base
+ header (struct mail_index_header). */
+ MAIL_TRANSACTION_HEADER_UPDATE = 0x00000020,
+ /* struct mail_transaction_ext_intro - Start operations for the given
+ extension. This can be used to create a new extension or resize an
+ existing extension, but usually it is just used in front of the
+ other MAIL_TRANSACTION_EXT_* records to specify which extension
+ they're working with. */
+ MAIL_TRANSACTION_EXT_INTRO = 0x00000040,
+ /* struct mail_transaction_ext_reset - Reset the last intro extension
+ by changing its reset_id and optionally zeroing out its old data. */
+ MAIL_TRANSACTION_EXT_RESET = 0x00000080,
+ /* struct mail_transaction_ext_hdr_update[] - Update the last intro
+ extension's header. This might later become deprecated in favor of
+ supporting only MAIL_TRANSACTION_EXT_HDR_UPDATE32, but for now
+ it's still used for <64kB headers. */
+ MAIL_TRANSACTION_EXT_HDR_UPDATE = 0x00000100,
+ /* struct mail_transaction_ext_rec_update[] - Update the last intro
+ extension records for the given UIDs with given content. */
+ MAIL_TRANSACTION_EXT_REC_UPDATE = 0x00000200,
+ /* struct mail_transaction_keyword_update - Add/remove the specified
+ keyword to messages. */
+ MAIL_TRANSACTION_KEYWORD_UPDATE = 0x00000400,
+ /* struct mail_transaction_keyword_reset[] - Clear out all keywords
+ in specified messages. */
+ MAIL_TRANSACTION_KEYWORD_RESET = 0x00000800,
+ /* struct mail_transaction_ext_atomic_inc[] - Atomically increase or
+ decrease the last intro extension record. The record must be 1, 2,
+ 4 or 8 bytes. This can be used e.g. for refcount extensions. */
+ MAIL_TRANSACTION_EXT_ATOMIC_INC = 0x00001000,
+ /* struct mail_transaction_expunge_guid[] - Expunge given UID, but
+ first verify that it matches the given GUID. Must have
+ MAIL_TRANSACTION_EXPUNGE_PROT ORed to this. */
+ MAIL_TRANSACTION_EXPUNGE_GUID = 0x00002000,
+ MAIL_TRANSACTION_MODSEQ_UPDATE = 0x00008000,
+ /* struct mail_transaction_ext_hdr_update32[] - Update the last intro
+ extension's header. Used for >=64kB headers. See also
+ MAIL_TRANSACTION_EXT_HDR_UPDATE. This was added in Dovecot v2.0. */
+ MAIL_TRANSACTION_EXT_HDR_UPDATE32 = 0x00010000,
+ /* Index was marked as deleted using mail_index_set_deleted().
+ There is no record content for this. */
+ MAIL_TRANSACTION_INDEX_DELETED = 0x00020000,
+ /* Index was marked as undeleted using mail_index_set_undeleted().
+ There is no record content for this. */
+ MAIL_TRANSACTION_INDEX_UNDELETED = 0x00040000,
+ /* struct mail_transaction_boundary - Specifies a size of the following
+ records that must be treated as a single transaction. This works
+ so that the transaction log reading code stops if it finds that
+ there is a transaction whose size points outside the currently
+ existing file. An unfinished transaction is truncated away after the
+ next write to the log. FIXME: it would be better to rotate the
+ log instead of truncating it. */
+ MAIL_TRANSACTION_BOUNDARY = 0x00080000,
+ /* Mailbox attribute update. This is a bit complicated format:
+ - [+-][p-s]<name><NUL>
+ - "+" means attribute is set, "-" means unset
+ - "p" means private attribute, "s" means shared
+ - <name> is the attribute name
+ - This can repeat multiple times
+ - <NUL>
+ - 0..3 bytes padding for 32bit alignment
+ - For each attribute update an array of uint32_t integers:
+ - Update timestamp
+ - For each "+" only: Length of the attribute value.
+ */
+ MAIL_TRANSACTION_ATTRIBUTE_UPDATE = 0x00100000,
+
+ /* Mask to get the attribute type only (excluding flags). */
+ MAIL_TRANSACTION_TYPE_MASK = 0x0fffffff,
+
+#define MAIL_TRANSACTION_EXT_MASK \
+ (MAIL_TRANSACTION_EXT_INTRO | MAIL_TRANSACTION_EXT_RESET | \
+ MAIL_TRANSACTION_EXT_HDR_UPDATE | MAIL_TRANSACTION_EXT_HDR_UPDATE32 | \
+ MAIL_TRANSACTION_EXT_REC_UPDATE | MAIL_TRANSACTION_EXT_ATOMIC_INC)
+
+ /* Since we'll expunge mails based on data read from transaction log,
+ try to avoid the possibility of corrupted transaction log expunging
+ messages. This value is ORed to the actual MAIL_TRANSACTION_EXPUNGE*
+ flag. If it's not present, assume corrupted log. */
+ MAIL_TRANSACTION_EXPUNGE_PROT = 0x0000cd90,
+
+ /* External transactions have a bit different meanings depending on the
+ transaction type. Generally they mean to indicate changes that have
+ already occurred, instead of changes that are only being requested
+ to happen on next sync. For example expunges are first requested
+ to be done with internal transactions, and then there's a separate
+ external transaction to indicate that they were actually done. */
+ MAIL_TRANSACTION_EXTERNAL = 0x10000000,
+ /* This change syncs the state with another mailbox (dsync),
+ i.e. the change isn't something that a user requested locally. */
+ MAIL_TRANSACTION_SYNC = 0x20000000
+};
+
+struct mail_transaction_header {
+ /* Size of this header and the following records. This size can be
+ used to calculate how many records there are. The size is written
+ via mail_index_uint32_to_offset(). */
+ uint32_t size;
+ uint32_t type; /* enum mail_transaction_type */
+ /* Header is followed by the type-specific records. */
+};
+
+/* See MAIL_TRANSACTION_MODSEQ_UPDATE. */
+struct mail_transaction_modseq_update {
+ uint32_t uid;
+ /* don't use uint64_t here. it adds extra 32 bits of padding and also
+ causes problems with CPUs that require alignment */
+ uint32_t modseq_low32;
+ uint32_t modseq_high32;
+};
+
+/* See MAIL_TRANSACTION_EXPUNGE. */
+struct mail_transaction_expunge {
+ /* Expunge all mails between uid1..uid2. */
+ uint32_t uid1, uid2;
+};
+/* See MAIL_TRANSACTION_EXPUNGE_GUID. */
+struct mail_transaction_expunge_guid {
+ /* Expunge uid, but only if it matches guid_128. */
+ uint32_t uid;
+ /* GUID of the mail. If it's not 128 bit GUID, first pass it through
+ mail_generate_guid_128_hash() to get 128 bit SHA1 of it. */
+ guid_128_t guid_128;
+};
+
+/* See MAIL_TRANSACTION_FLAG_UPDATE. */
+struct mail_transaction_flag_update {
+ /* Change the flags for all mails between uid1..uid2. */
+ uint32_t uid1, uid2;
+ /* Add these flags to the mails. */
+ uint8_t add_flags;
+ /* Remove these flags to the mails. To replace all existing flags,
+ just set this to 0xff and specify the wanted flags in add_flags. */
+ uint8_t remove_flags;
+ /* If non-0, MAIL_INDEX_MAIL_FLAG_UPDATE_MODSEQ was used to force
+ increasing modseq update to the mails even though no flags were
+ actually changed. This differs from MAIL_TRANSACTION_MODSEQ_UPDATE
+ in that the modseq is just wanted to be increased, doesn't matter
+ to which value specifically. */
+ uint8_t modseq_inc_flag;
+ /* Unused padding */
+ uint8_t padding;
+};
+
+/* See MAIL_TRANSACTION_KEYWORD_UPDATE. */
+struct mail_transaction_keyword_update {
+ /* enum modify_type : MODIFY_ADD / MODIFY_REMOVE */
+ uint8_t modify_type;
+ uint8_t padding;
+ /* Size of name[] */
+ uint16_t name_size;
+ /* unsigned char name[name_size]; */
+ /* Update keywords for the given UIDs. The array's size is calculated
+ from mail_transaction_header.size. */
+ /* array of { uint32_t uid1, uid2; } */
+};
+
+/* See MAIL_TRANSACTION_KEYWORD_RESET. */
+struct mail_transaction_keyword_reset {
+ /* Clear out all keywords for uid1..uid2. */
+ uint32_t uid1, uid2;
+};
+
+/* See MAIL_TRANSACTION_HEADER_UPDATE. */
+struct mail_transaction_header_update {
+ /* Update start offset. */
+ uint16_t offset;
+ /* Size of the following data[] to update. */
+ uint16_t size;
+ /* unsigned char data[size]; */
+ /* 0..3 bytes of padding to get to 32bit alignment. */
+ /* unsigned char padding[]; */
+};
+
+enum {
+ /* Don't shrink hdr_size, record_size or record_align but grow them
+ if necessary. */
+ MAIL_TRANSACTION_EXT_INTRO_FLAG_NO_SHRINK = 0x01
+};
+
+/* See MAIL_TRANSACTION_EXT_INTRO. Also see struct mail_index_ext_header for
+ more explanations of these fields. */
+struct mail_transaction_ext_intro {
+ /* If extension is already known to exist in the index file,
+ set ext_id, but use empty name. If this is a new extension, set
+ name, but use ext_id=(uint32_t)-1. */
+ uint32_t ext_id;
+ uint32_t reset_id;
+ /* Size of the extension header. When growing the header size, it's
+ initially filled with zeros. The header can be written to with
+ ext-hdr-update records. */
+ uint32_t hdr_size;
+ uint16_t record_size;
+ uint16_t record_align;
+ uint16_t flags;
+ uint16_t name_size;
+ /* unsigned char name[]; */
+};
+
+/* See MAIL_TRANSACTION_EXT_RESET. */
+struct mail_transaction_ext_reset {
+ /* New value for extension's reset_id */
+ uint32_t new_reset_id;
+ /* Non-0 if the old extension header and record data should be
+ preserved. Normally all of it is zeroed out. */
+ uint8_t preserve_data;
+ uint8_t unused_padding[3];
+};
+
+/* See MAIL_TRANSACTION_EXT_HDR_UPDATE. */
+struct mail_transaction_ext_hdr_update {
+ /* Update start offset. */
+ uint16_t offset;
+ /* Size of the following data[] to update. */
+ uint16_t size;
+ /* unsigned char data[size]; */
+ /* 0..3 bytes of padding to get to 32bit alignment. */
+ /* unsigned char padding[]; */
+};
+/* See MAIL_TRANSACTION_EXT_HDR_UPDATE32. */
+struct mail_transaction_ext_hdr_update32 {
+ /* Update start offset. */
+ uint32_t offset;
+ /* Size of the following data[] to update. */
+ uint32_t size;
+ /* unsigned char data[size]; */
+ /* 0..3 bytes of padding to get to 32bit alignment. */
+ /* unsigned char padding[]; */
+};
+
+/* See MAIL_TRANSACTION_EXT_REC_UPDATE. */
+struct mail_transaction_ext_rec_update {
+ uint32_t uid;
+ /* unsigned char data[mail_transaction_ext_intro.record_size]; */
+ /* 0..3 bytes of padding to get to 32bit alignment. */
+ /* unsigned char padding[]; */
+};
+
+/* See MAIL_TRANSACTION_EXT_ATOMIC_INC. */
+struct mail_transaction_ext_atomic_inc {
+ uint32_t uid;
+ /* Add this value to the extension record data. Can be negative. */
+ int32_t diff;
+};
+
+/* See MAIL_TRANSACTION_BOUNDARY. */
+struct mail_transaction_boundary {
+ /* Size of the whole transaction, including this record and header. */
+ uint32_t size;
+};
+
+struct mail_transaction_log_append_ctx {
+ struct mail_transaction_log *log;
+ /* All the changes that will be written to the transaction log. */
+ buffer_t *output;
+
+ /* Transaction flags as given to mail_transaction_log_append_begin(). */
+ enum mail_transaction_type trans_flags;
+
+ /* Tracking the current highest_modseq after the changes. This will
+ be used to update mail_transaction_log_file.sync_highest_modseq. */
+ uint64_t new_highest_modseq;
+ /* Number of transaction records added so far. */
+ unsigned int transaction_count;
+
+ /* Copied from mail_index_transaction.sync_transaction */
+ bool index_sync_transaction:1;
+ /* Copied from mail_index_transaction.tail_offset_changed */
+ bool tail_offset_changed:1;
+ /* TRUE if the mail_transaction_log_file has been synced up to the
+ current write offset, and we're writing a syncing transaction
+ (index_sync_transaction=TRUE). This means that the just written
+ transaction can be assumed to be synced already. */
+ bool sync_includes_this:1;
+ /* fdatasync() after writing the transaction. */
+ bool want_fsync:1;
+};
+
+#define LOG_IS_BEFORE(seq1, offset1, seq2, offset2) \
+ (((offset1) < (offset2) && (seq1) == (seq2)) || (seq1) < (seq2))
+
+struct mail_transaction_log *
+mail_transaction_log_alloc(struct mail_index *index);
+void mail_transaction_log_free(struct mail_transaction_log **log);
+
+/* Open the transaction log. Returns 1 if ok, 0 if file doesn't exist or it's
+ is corrupted, -1 if there was some I/O error. */
+int mail_transaction_log_open(struct mail_transaction_log *log);
+/* Create, or recreate, the transaction log. Returns 0 if ok, -1 if error. */
+int mail_transaction_log_create(struct mail_transaction_log *log, bool reset);
+/* Close all the open transactions log files. */
+void mail_transaction_log_close(struct mail_transaction_log *log);
+
+/* Notify of indexid change */
+void mail_transaction_log_indexid_changed(struct mail_transaction_log *log);
+
+/* Returns the file seq/offset where the mailbox is currently synced at.
+ Since the log is rotated only when mailbox is fully synced, the sequence
+ points always to the latest file. This function doesn't actually find the
+ latest sync position, so you'll need to use eg. log_view_set() before
+ calling this. */
+void mail_transaction_log_get_mailbox_sync_pos(struct mail_transaction_log *log,
+ uint32_t *file_seq_r,
+ uoff_t *file_offset_r);
+/* Set the current mailbox sync position. file_seq must always be the latest
+ log file's sequence. The offset written automatically to the log when
+ other transactions are being written. */
+void mail_transaction_log_set_mailbox_sync_pos(struct mail_transaction_log *log,
+ uint32_t file_seq,
+ uoff_t file_offset);
+
+struct mail_transaction_log_view *
+mail_transaction_log_view_open(struct mail_transaction_log *log);
+void mail_transaction_log_view_close(struct mail_transaction_log_view **view);
+
+/* Set view boundaries. Returns 1 if ok, 0 if files are lost, corrupted or the
+ offsets are broken, -1 if I/O error. reset_r=TRUE if the whole index should
+ be reset before applying any changes. */
+int mail_transaction_log_view_set(struct mail_transaction_log_view *view,
+ uint32_t min_file_seq, uoff_t min_file_offset,
+ uint32_t max_file_seq, uoff_t max_file_offset,
+ bool *reset_r, const char **reason_r);
+/* Scan through all of the log files that we can find.
+ Returns -1 if error, 0 if ok. */
+int mail_transaction_log_view_set_all(struct mail_transaction_log_view *view);
+/* Clear the view. If oldest_file_seq > 0, keep it and newer log files
+ referenced so we don't get desynced. */
+void mail_transaction_log_view_clear(struct mail_transaction_log_view *view,
+ uint32_t oldest_file_seq);
+
+/* Read next transaction record from current position. The position is updated.
+ Returns -1 if error, 0 if we're at end of the view, 1 if ok. */
+int mail_transaction_log_view_next(struct mail_transaction_log_view *view,
+ const struct mail_transaction_header **hdr_r,
+ const void **data_r);
+/* Mark the current view's position to the record returned previously with
+ _log_view_next(). */
+void mail_transaction_log_view_mark(struct mail_transaction_log_view *view);
+/* Seek to previously marked position. */
+void mail_transaction_log_view_rewind(struct mail_transaction_log_view *view);
+
+/* Returns the position of the record returned previously with
+ mail_transaction_log_view_next() */
+void
+mail_transaction_log_view_get_prev_pos(struct mail_transaction_log_view *view,
+ uint32_t *file_seq_r,
+ uoff_t *file_offset_r);
+/* Return the modseq of the change returned previously with _view_next(). */
+uint64_t
+mail_transaction_log_view_get_prev_modseq(struct mail_transaction_log_view *view);
+/* Returns TRUE if we're at the end of the view window. */
+bool mail_transaction_log_view_is_last(struct mail_transaction_log_view *view);
+
+/* Marks the log file in current position to be corrupted. */
+void
+mail_transaction_log_view_set_corrupted(struct mail_transaction_log_view *view,
+ const char *fmt, ...)
+ ATTR_FORMAT(2, 3) ATTR_COLD;
+bool
+mail_transaction_log_view_is_corrupted(struct mail_transaction_log_view *view);
+
+int mail_transaction_log_append_begin(struct mail_index *index,
+ enum mail_transaction_type flags,
+ struct mail_transaction_log_append_ctx **ctx_r);
+void mail_transaction_log_append_add(struct mail_transaction_log_append_ctx *ctx,
+ enum mail_transaction_type type,
+ const void *data, size_t size);
+int mail_transaction_log_append_commit(struct mail_transaction_log_append_ctx **ctx);
+
+/* Lock transaction log for index synchronization. This is used as the main
+ exclusive lock for index changes. The index/log can still be read since they
+ don't use locking, but the log can't be written to while it's locked.
+ Returns 0 on success, -1 if locking failed for any reason.
+
+ After successfully locking the transaction log, the log file is also fully
+ mapped into memory and its sync_offset updated. The locked file's sequence
+ and sync_offset are returned. */
+int mail_transaction_log_sync_lock(struct mail_transaction_log *log,
+ const char *lock_reason,
+ uint32_t *file_seq_r, uoff_t *file_offset_r);
+void mail_transaction_log_sync_unlock(struct mail_transaction_log *log,
+ const char *lock_reason);
+/* Returns the current head. Works only when log is locked. */
+void mail_transaction_log_get_head(struct mail_transaction_log *log,
+ uint32_t *file_seq_r, uoff_t *file_offset_r);
+/* Returns the current tail from which all files are open to head. */
+void mail_transaction_log_get_tail(struct mail_transaction_log *log,
+ uint32_t *file_seq_r);
+/* Returns TRUE if given seq/offset is current head log's rotate point. */
+bool mail_transaction_log_is_head_prev(struct mail_transaction_log *log,
+ uint32_t file_seq, uoff_t file_offset);
+
+/* Move currently opened log head file to memory (called by
+ mail_index_move_to_memory()) */
+int mail_transaction_log_move_to_memory(struct mail_transaction_log *log);
+/* Unlink transaction log files */
+int mail_transaction_log_unlink(struct mail_transaction_log *log);
+
+#endif
diff --git a/src/lib-index/mailbox-log.c b/src/lib-index/mailbox-log.c
new file mode 100644
index 0000000..433ba29
--- /dev/null
+++ b/src/lib-index/mailbox-log.c
@@ -0,0 +1,292 @@
+/* Copyright (c) 2009-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "ioloop.h"
+#include "eacces-error.h"
+#include "mailbox-log.h"
+
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+
+/* How often to reopen the log file to make sure that the changes are written
+ to the latest file. The main problem here is if the value is too high the
+ changes could be written to a file that was already rotated and deleted.
+ That wouldn't happen in any real world situations though, since the file
+ rotation time is probably measured in months or years. Still, each session
+ rarely writes anything here, so the value can just as well be a pretty small
+ one without any performance problems. */
+#define MAILBOX_LOG_REOPEN_SECS (60)
+#define MAILBOX_LOG_ROTATE_SIZE (1024*4)
+
+struct mailbox_log {
+ char *filepath, *filepath2;
+ int fd;
+ struct event *event;
+ time_t open_timestamp;
+
+ mode_t mode;
+ gid_t gid;
+ char *gid_origin;
+};
+
+struct mailbox_log_iter {
+ struct mailbox_log *log;
+
+ int fd;
+ const char *filepath;
+
+ struct mailbox_log_record buf[128];
+ unsigned int idx, count;
+ uoff_t offset;
+ bool failed;
+};
+
+static void mailbox_log_close(struct mailbox_log *log);
+
+struct mailbox_log *
+mailbox_log_alloc(struct event *parent_event, const char *path)
+{
+ struct mailbox_log *log;
+
+ log = i_new(struct mailbox_log, 1);
+ log->event = event_create(parent_event);
+ log->filepath = i_strdup(path);
+ log->filepath2 = i_strconcat(path, ".2", NULL);
+ log->mode = 0644;
+ log->gid = (gid_t)-1;
+ log->fd = -1;
+ return log;
+}
+
+void mailbox_log_free(struct mailbox_log **_log)
+{
+ struct mailbox_log *log = *_log;
+
+ *_log = NULL;
+
+ mailbox_log_close(log);
+ event_unref(&log->event);
+ i_free(log->gid_origin);
+ i_free(log->filepath);
+ i_free(log->filepath2);
+ i_free(log);
+}
+
+static void mailbox_log_close(struct mailbox_log *log)
+{
+ i_close_fd_path(&log->fd, log->filepath);
+}
+
+void mailbox_log_set_permissions(struct mailbox_log *log, mode_t mode,
+ gid_t gid, const char *gid_origin)
+{
+ log->mode = mode;
+ log->gid = gid;
+ i_free(log->gid_origin);
+ log->gid_origin = i_strdup(gid_origin);
+}
+
+static int mailbox_log_open(struct mailbox_log *log)
+{
+ mode_t old_mode;
+
+ i_assert(log->fd == -1);
+
+ log->open_timestamp = ioloop_time;
+ log->fd = open(log->filepath, O_RDWR | O_APPEND);
+ if (log->fd != -1)
+ return 0;
+
+ /* try to create it */
+ old_mode = umask(0666 ^ log->mode);
+ log->fd = open(log->filepath, O_RDWR | O_APPEND | O_CREAT, 0666);
+ umask(old_mode);
+
+ if (log->fd == -1) {
+ if (errno != EACCES)
+ e_error(log->event, "creat(%s) failed: %m",
+ log->filepath);
+ else
+ e_error(log->event, "%s",
+ eacces_error_get("creat", log->filepath));
+ return -1;
+ }
+ if (fchown(log->fd, (uid_t)-1, log->gid) < 0) {
+ if (errno != EPERM)
+ e_error(log->event, "fchown(%s) failed: %m",
+ log->filepath);
+ else {
+ e_error(log->event, "%s",
+ eperm_error_get_chgrp("fchown",
+ log->filepath, log->gid,
+ log->gid_origin));
+ }
+ }
+ return 0;
+}
+
+static int mailbox_log_rotate_if_needed(struct mailbox_log *log)
+{
+ struct stat st;
+
+ if (fstat(log->fd, &st) < 0) {
+ e_error(log->event, "fstat(%s) failed: %m", log->filepath);
+ return -1;
+ }
+
+ if (st.st_size < MAILBOX_LOG_ROTATE_SIZE)
+ return 0;
+
+ if (rename(log->filepath, log->filepath2) < 0 && errno != ENOENT) {
+ e_error(log->event, "rename(%s, %s) failed: %m",
+ log->filepath, log->filepath2);
+ return -1;
+ }
+ return 0;
+}
+
+void mailbox_log_record_set_timestamp(struct mailbox_log_record *rec,
+ time_t stamp)
+{
+ cpu32_to_be_unaligned(stamp, rec->timestamp);
+}
+
+time_t mailbox_log_record_get_timestamp(const struct mailbox_log_record *rec)
+{
+ return (time_t) be32_to_cpu_unaligned(rec->timestamp);
+}
+
+int mailbox_log_append(struct mailbox_log *log,
+ const struct mailbox_log_record *rec)
+{
+ struct stat st;
+ ssize_t ret;
+
+ /* we don't have to be too strict about appending to the latest log
+ file. the records' ordering doesn't matter and iteration goes
+ through both logs anyway. still, if there's a long running session
+ it shouldn't keep writing to a rotated log forever. */
+ if (log->open_timestamp/MAILBOX_LOG_REOPEN_SECS !=
+ ioloop_time/MAILBOX_LOG_REOPEN_SECS)
+ mailbox_log_close(log);
+ if (log->fd == -1) {
+ if (mailbox_log_open(log) < 0)
+ return -1;
+ i_assert(log->fd != -1);
+ }
+
+ /* We don't bother with locking, atomic appends will protect us.
+ If they don't (NFS), the worst that can happen is that a few
+ records get overwritten (because they're all the same size).
+ This whole log isn't supposed to be super-reliable anyway. */
+ ret = write(log->fd, rec, sizeof(*rec));
+ if (ret < 0) {
+ e_error(log->event, "write(%s) failed: %m", log->filepath);
+ return -1;
+ } else if (ret != sizeof(*rec)) {
+ e_error(log->event, "write(%s) wrote %d/%u bytes", log->filepath,
+ (int)ret, (unsigned int)sizeof(*rec));
+ if (fstat(log->fd, &st) == 0) {
+ if (ftruncate(log->fd, st.st_size - ret) < 0) {
+ e_error(log->event, "ftruncate(%s) failed: %m",
+ log->filepath);
+ }
+ }
+ return -1;
+ }
+
+ (void)mailbox_log_rotate_if_needed(log);
+ return 0;
+}
+
+static bool mailbox_log_iter_open_next(struct mailbox_log_iter *iter)
+{
+ i_close_fd_path(&iter->fd, iter->filepath);
+ if (iter->filepath == NULL)
+ iter->filepath = iter->log->filepath2;
+ else if (iter->filepath == iter->log->filepath2)
+ iter->filepath = iter->log->filepath;
+ else
+ return FALSE;
+
+ iter->fd = open(iter->filepath, O_RDONLY | O_APPEND);
+ if (iter->fd != -1)
+ return TRUE;
+ else if (errno == ENOENT) {
+ if (iter->filepath == iter->log->filepath2)
+ return mailbox_log_iter_open_next(iter);
+ } else {
+ e_error(iter->log->event, "open(%s) failed: %m", iter->filepath);
+ iter->failed = TRUE;
+ }
+ return FALSE;
+}
+
+struct mailbox_log_iter *mailbox_log_iter_init(struct mailbox_log *log)
+{
+ struct mailbox_log_iter *iter;
+
+ iter = i_new(struct mailbox_log_iter, 1);
+ iter->log = log;
+ iter->fd = -1;
+ (void)mailbox_log_iter_open_next(iter);
+ return iter;
+}
+
+const struct mailbox_log_record *
+mailbox_log_iter_next(struct mailbox_log_iter *iter)
+{
+ const struct mailbox_log_record *rec;
+ uoff_t offset;
+ ssize_t ret;
+
+ if (iter->idx == iter->count) {
+ if (iter->fd == -1)
+ return NULL;
+
+ ret = pread(iter->fd, iter->buf, sizeof(iter->buf),
+ iter->offset);
+ if (ret < 0) {
+ e_error(iter->log->event, "pread(%s) failed: %m",
+ iter->filepath);
+ iter->failed = TRUE;
+ return NULL;
+ }
+ if (ret == 0) {
+ if (!mailbox_log_iter_open_next(iter))
+ return NULL;
+ iter->idx = iter->count = 0;
+ iter->offset = 0;
+ return mailbox_log_iter_next(iter);
+ }
+ iter->idx = 0;
+ iter->count = ret / sizeof(iter->buf[0]);
+ iter->offset += iter->count * sizeof(iter->buf[0]);
+ }
+ rec = &iter->buf[iter->idx++];
+ if (rec->type < MAILBOX_LOG_RECORD_DELETE_MAILBOX ||
+ rec->type > MAILBOX_LOG_RECORD_UNSUBSCRIBE) {
+ offset = iter->offset -
+ (iter->count - iter->idx) * sizeof(iter->buf[0]);
+ e_error(iter->log->event,
+ "Corrupted mailbox log %s at offset %"PRIuUOFF_T": "
+ "type=%d", iter->filepath, offset, rec->type);
+ i_unlink(iter->filepath);
+ return NULL;
+ }
+ return rec;
+}
+
+int mailbox_log_iter_deinit(struct mailbox_log_iter **_iter)
+{
+ struct mailbox_log_iter *iter = *_iter;
+ int ret = iter->failed ? -1 : 0;
+
+ *_iter = NULL;
+
+ i_close_fd_path(&iter->fd, iter->filepath);
+ i_free(iter);
+ return ret;
+}
diff --git a/src/lib-index/mailbox-log.h b/src/lib-index/mailbox-log.h
new file mode 100644
index 0000000..ef19fc2
--- /dev/null
+++ b/src/lib-index/mailbox-log.h
@@ -0,0 +1,44 @@
+#ifndef MAILBOX_LOG_H
+#define MAILBOX_LOG_H
+
+#include "guid.h"
+
+enum mailbox_log_record_type {
+ MAILBOX_LOG_RECORD_DELETE_MAILBOX = 1,
+ MAILBOX_LOG_RECORD_DELETE_DIR,
+ MAILBOX_LOG_RECORD_RENAME,
+ MAILBOX_LOG_RECORD_SUBSCRIBE,
+ MAILBOX_LOG_RECORD_UNSUBSCRIBE,
+ MAILBOX_LOG_RECORD_CREATE_DIR
+};
+
+struct mailbox_log_record {
+ uint8_t type;
+ uint8_t padding[3];
+ guid_128_t mailbox_guid;
+ uint8_t timestamp[4];
+};
+
+struct mailbox_log *
+mailbox_log_alloc(struct event *parent_event, const char *path);
+void mailbox_log_free(struct mailbox_log **log);
+
+void mailbox_log_set_permissions(struct mailbox_log *log, mode_t mode,
+ gid_t gid, const char *gid_origin);
+
+void mailbox_log_record_set_timestamp(struct mailbox_log_record *rec,
+ time_t stamp);
+time_t mailbox_log_record_get_timestamp(const struct mailbox_log_record *rec);
+
+/* Append a new record to mailbox log. Returns 0 if ok, -1 if error. */
+int mailbox_log_append(struct mailbox_log *log,
+ const struct mailbox_log_record *rec);
+
+/* Iterate through all records in mailbox log. */
+struct mailbox_log_iter *mailbox_log_iter_init(struct mailbox_log *log);
+const struct mailbox_log_record *
+mailbox_log_iter_next(struct mailbox_log_iter *iter);
+/* Returns 0 if ok, -1 if I/O error. */
+int mailbox_log_iter_deinit(struct mailbox_log_iter **iter);
+
+#endif
diff --git a/src/lib-index/test-mail-cache-common.c b/src/lib-index/test-mail-cache-common.c
new file mode 100644
index 0000000..ee34c04
--- /dev/null
+++ b/src/lib-index/test-mail-cache-common.c
@@ -0,0 +1,166 @@
+/* Copyright (c) 2020 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "test-common.h"
+#include "test-mail-cache.h"
+
+static const struct mail_cache_field cache_field_foo = {
+ .name = "foo",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_YES,
+};
+static const struct mail_cache_field cache_field_bar = {
+ .name = "bar",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_YES,
+};
+static const struct mail_cache_field cache_field_baz = {
+ .name = "baz",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_YES,
+};
+
+void test_mail_cache_init(struct mail_index *index,
+ struct test_mail_cache_ctx *ctx_r)
+{
+ i_zero(ctx_r);
+ ctx_r->index = index;
+ ctx_r->cache = index->cache;
+ ctx_r->view = mail_index_view_open(index);
+
+ ctx_r->cache_field = cache_field_foo;
+ ctx_r->cache_field2 = cache_field_bar;
+ ctx_r->cache_field3 = cache_field_baz;
+ /* Try to use different file_field_maps for different index instances
+ by randomizing the registration order. This only works for the 2nd
+ index that is opened, because the initial cache is always created
+ with all cache fields in the same order. */
+ if (i_rand_limit(2) == 0) {
+ mail_cache_register_fields(ctx_r->cache, &ctx_r->cache_field, 1);
+ mail_cache_register_fields(ctx_r->cache, &ctx_r->cache_field2, 1);
+ mail_cache_register_fields(ctx_r->cache, &ctx_r->cache_field3, 1);
+ } else {
+ mail_cache_register_fields(ctx_r->cache, &ctx_r->cache_field3, 1);
+ mail_cache_register_fields(ctx_r->cache, &ctx_r->cache_field2, 1);
+ mail_cache_register_fields(ctx_r->cache, &ctx_r->cache_field, 1);
+ }
+}
+
+void test_mail_cache_deinit(struct test_mail_cache_ctx *ctx)
+{
+ if (ctx->view != NULL)
+ mail_index_view_close(&ctx->view);
+ test_mail_index_close(&ctx->index);
+}
+
+unsigned int test_mail_cache_get_purge_count(struct test_mail_cache_ctx *ctx)
+{
+ const struct mail_cache_header *hdr = ctx->cache->hdr;
+
+ return hdr->file_seq - hdr->indexid;
+}
+
+void test_mail_cache_index_sync(struct test_mail_cache_ctx *ctx)
+{
+ struct mail_index_sync_ctx *sync_ctx;
+ struct mail_index_view *view;
+ struct mail_index_transaction *trans;
+ struct mail_index_sync_rec sync_rec;
+
+ test_assert(mail_index_sync_begin(ctx->index, &sync_ctx,
+ &view, &trans, 0) == 1);
+ while (mail_index_sync_next(sync_ctx, &sync_rec)) {
+ if (sync_rec.type == MAIL_INDEX_SYNC_TYPE_EXPUNGE) {
+ /* we're a bit kludgily assuming that there's only
+ one UID and also that uid==seq */
+ mail_index_expunge(trans, sync_rec.uid1);
+ }
+ }
+ test_assert(mail_index_sync_commit(&sync_ctx) == 0);
+}
+
+void test_mail_cache_view_sync(struct test_mail_cache_ctx *ctx)
+{
+ struct mail_index_view_sync_ctx *sync_ctx;
+ struct mail_index_view_sync_rec sync_rec;
+ bool delayed_expunges;
+
+ sync_ctx = mail_index_view_sync_begin(ctx->view, MAIL_INDEX_VIEW_SYNC_FLAG_FIX_INCONSISTENT);
+ while (mail_index_view_sync_next(sync_ctx, &sync_rec)) ;
+ test_assert(mail_index_view_sync_commit(&sync_ctx, &delayed_expunges) == 0);
+}
+
+void test_mail_cache_purge(void)
+{
+ struct test_mail_cache_ctx ctx;
+
+ test_mail_cache_init(test_mail_index_open(), &ctx);
+ test_assert(mail_cache_purge(ctx.cache, (uint32_t)-1, "test") == 0);
+ test_mail_cache_deinit(&ctx);
+}
+
+void test_mail_cache_add_mail(struct test_mail_cache_ctx *ctx,
+ unsigned int cache_field_idx,
+ const char *cache_data)
+{
+ const struct mail_index_header *hdr = mail_index_get_header(ctx->view);
+ struct mail_index_transaction *trans;
+ struct mail_index_view *updated_view;
+ struct mail_cache_view *cache_view;
+ struct mail_cache_transaction_ctx *cache_trans;
+ uint32_t seq, uid_validity = 12345;
+
+ trans = mail_index_transaction_begin(ctx->view, 0);
+ updated_view = mail_index_transaction_open_updated_view(trans);
+ cache_view = mail_cache_view_open(ctx->cache, updated_view);
+ cache_trans = mail_cache_get_transaction(cache_view, trans);
+
+ if (hdr->uid_validity == 0) {
+ mail_index_update_header(trans,
+ offsetof(struct mail_index_header, uid_validity),
+ &uid_validity, sizeof(uid_validity), TRUE);
+ }
+
+ mail_index_append(trans, hdr->next_uid, &seq);
+ if (cache_field_idx != UINT_MAX) {
+ mail_cache_add(cache_trans, seq, cache_field_idx,
+ cache_data, strlen(cache_data));
+ }
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ mail_index_view_close(&updated_view);
+ mail_cache_view_close(&cache_view);
+
+ /* View needs to have the latest changes before purge transaction
+ is created. */
+ test_mail_cache_view_sync(ctx);
+}
+
+void test_mail_cache_add_field(struct test_mail_cache_ctx *ctx, uint32_t seq,
+ unsigned int cache_field_idx,
+ const char *cache_data)
+{
+ struct mail_index_transaction *trans;
+ struct mail_cache_view *cache_view;
+ struct mail_cache_transaction_ctx *cache_trans;
+
+ cache_view = mail_cache_view_open(ctx->cache, ctx->view);
+ trans = mail_index_transaction_begin(ctx->view, 0);
+ cache_trans = mail_cache_get_transaction(cache_view, trans);
+ mail_cache_add(cache_trans, seq, cache_field_idx,
+ cache_data, strlen(cache_data));
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ mail_cache_view_close(&cache_view);
+}
+
+void test_mail_cache_update_day_first_uid7(struct test_mail_cache_ctx *ctx,
+ uint32_t first_new_uid)
+{
+ struct mail_index_transaction *trans;
+
+ trans = mail_index_transaction_begin(ctx->view, 0);
+ mail_index_update_header(trans,
+ offsetof(struct mail_index_header, day_first_uid[7]),
+ &first_new_uid, sizeof(first_new_uid), FALSE);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ test_mail_cache_view_sync(ctx);
+}
diff --git a/src/lib-index/test-mail-cache-fields.c b/src/lib-index/test-mail-cache-fields.c
new file mode 100644
index 0000000..18b6a33
--- /dev/null
+++ b/src/lib-index/test-mail-cache-fields.c
@@ -0,0 +1,112 @@
+/* Copyright (c) 2021 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "test-common.h"
+#include "test-mail-cache.h"
+
+static void test_mail_cache_fields_read_write(void)
+{
+ struct mail_cache_field cache_field = {
+ .name = "testfield",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_NO,
+ .last_used = 0x12345678,
+ };
+ struct mail_cache_field cache_field2 = {
+ .name = "testfield2",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_NO,
+ .last_used = 0xaabbccdd,
+ };
+ struct test_mail_cache_ctx ctx;
+
+ test_begin("mail cache fields read-write");
+
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ mail_cache_register_fields(ctx.cache, &cache_field, 1);
+ test_assert(mail_cache_purge(ctx.cache, (uint32_t)-1, "test") == 0);
+ /* after writing the initial cache file, register another cache field
+ that doesn't exist in it. */
+ mail_cache_register_fields(ctx.cache, &cache_field2, 1);
+
+ struct mail_cache_field_private *priv =
+ &ctx.cache->fields[cache_field.idx];
+ struct mail_cache_field_private *priv2 =
+ &ctx.cache->fields[cache_field2.idx];
+
+ /* No changes */
+ test_assert(mail_cache_header_fields_update(ctx.cache) == 0);
+ test_assert(mail_cache_header_fields_read(ctx.cache) == 0);
+ test_assert(cache_field.last_used == priv->field.last_used &&
+ cache_field.decision == priv->field.decision);
+ test_assert(cache_field2.last_used == priv2->field.last_used &&
+ cache_field2.decision == priv2->field.decision);
+
+ /* Replace decision without marking it dirty. Make sure reading
+ overwrites it. Also make sure an old last_used is overwritten. */
+ priv->field.decision = MAIL_CACHE_DECISION_YES;
+ priv->field.last_used = cache_field.last_used - 1;
+ test_assert(mail_cache_header_fields_read(ctx.cache) == 0);
+ test_assert(cache_field.last_used == priv->field.last_used &&
+ cache_field.decision == priv->field.decision);
+ test_assert(cache_field2.last_used == priv2->field.last_used &&
+ cache_field2.decision == priv2->field.decision);
+
+ /* Replace decision and set it dirty. Make sure reading doesn't
+ overwrite it. Also make sure an old last_used is overwritten. */
+ priv->decision_dirty = TRUE;
+ priv2->decision_dirty = TRUE;
+ priv->field.last_used = cache_field.last_used - 1;
+ priv->field.decision = MAIL_CACHE_DECISION_YES;
+ cache_field.decision = MAIL_CACHE_DECISION_YES;
+ priv2->field.decision = MAIL_CACHE_DECISION_YES;
+ cache_field2.decision = MAIL_CACHE_DECISION_YES;
+ test_assert(mail_cache_header_fields_read(ctx.cache) == 0);
+ test_assert(cache_field.last_used == priv->field.last_used &&
+ cache_field.decision == priv->field.decision);
+ test_assert(cache_field2.last_used == priv2->field.last_used &&
+ cache_field2.decision == priv2->field.decision);
+ test_assert(priv->decision_dirty);
+ test_assert(priv2->decision_dirty);
+
+ /* Make sure a new last_used won't get overwritten by read. */
+ priv->field.last_used = ++cache_field.last_used;
+ priv2->field.last_used = ++cache_field2.last_used;
+ test_assert(mail_cache_header_fields_read(ctx.cache) == 0);
+ test_assert(cache_field.last_used == priv->field.last_used &&
+ cache_field.decision == priv->field.decision);
+ test_assert(cache_field2.last_used == priv2->field.last_used &&
+ cache_field2.decision == priv2->field.decision);
+
+ /* Write the new decision and last_used. Note that cache_field2
+ isn't written, because it doesn't exist in the cache file. */
+ test_assert(mail_cache_header_fields_update(ctx.cache) == 0);
+ test_assert(!priv->decision_dirty);
+ test_assert(priv2->decision_dirty);
+ /* make sure reading reads them back, even if they're changed */
+ priv->field.decision = MAIL_CACHE_DECISION_NO;
+ priv->field.last_used = 1;
+ priv2->field.decision = MAIL_CACHE_DECISION_TEMP;
+ priv2->field.last_used = 2;
+ cache_field2.decision = MAIL_CACHE_DECISION_TEMP;
+ cache_field2.last_used = 2;
+ test_assert(mail_cache_header_fields_read(ctx.cache) == 0);
+ test_assert(cache_field.last_used == priv->field.last_used &&
+ cache_field.decision == priv->field.decision);
+ test_assert(cache_field2.last_used == priv2->field.last_used &&
+ cache_field2.decision == priv2->field.decision);
+
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+
+ test_end();
+}
+
+int main(void)
+{
+ static void (*const test_functions[])(void) = {
+ test_mail_cache_fields_read_write,
+ NULL
+ };
+ return test_run(test_functions);
+}
diff --git a/src/lib-index/test-mail-cache-purge.c b/src/lib-index/test-mail-cache-purge.c
new file mode 100644
index 0000000..525754b
--- /dev/null
+++ b/src/lib-index/test-mail-cache-purge.c
@@ -0,0 +1,1076 @@
+/* Copyright (c) 2020 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "str.h"
+#include "array.h"
+#include "test-common.h"
+#include "test-mail-cache.h"
+
+#include <stdio.h>
+#include <sys/wait.h>
+
+static void test_mail_cache_read_during_purge2(void)
+{
+ struct test_mail_cache_ctx ctx;
+ struct mail_cache_view *cache_view;
+ string_t *str = t_str_new(16);
+
+ i_set_failure_prefix("index2: ");
+
+ /* read from cache via 2nd index */
+ test_mail_cache_init(test_mail_index_open(), &ctx);
+
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field.idx) == 1);
+ test_assert(strcmp(str_c(str), "foo1") == 0);
+ mail_cache_view_close(&cache_view);
+
+ test_mail_cache_deinit(&ctx);
+}
+
+static void test_mail_cache_read_during_purge(void)
+{
+ struct test_mail_cache_ctx ctx;
+ struct mail_index_transaction *trans;
+ int status;
+
+ test_begin("mail cache read during purge");
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, "foo1");
+
+ /* lock the index for cache purge */
+ uint32_t log_seq;
+ uoff_t log_offset;
+ test_assert(mail_transaction_log_sync_lock(ctx.index->log, "purge", &log_seq, &log_offset) == 0);
+
+ /* start purging cache using the 1st index, but don't commit yet */
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ test_assert(mail_cache_purge_with_trans(ctx.cache, trans, (uint32_t)-1, "test") == 0);
+
+ switch (fork()) {
+ case (pid_t)-1:
+ i_fatal("fork() failed: %m");
+ case 0:
+ test_mail_cache_read_during_purge2();
+ /* cleanup so valgrind doesn't complain about memory leaks */
+ mail_index_transaction_rollback(&trans);
+ mail_transaction_log_sync_unlock(ctx.index->log, "purge");
+ test_mail_cache_deinit(&ctx);
+ test_exit(test_has_failed() ? 10 : 0);
+ default:
+ break;
+ }
+
+ /* Wait a bit to make sure the child function has had a chance to run.
+ It's supposed to be waiting on the locked .log file. */
+ usleep(100000);
+ /* finish cache purging */
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ mail_transaction_log_sync_unlock(ctx.index->log, "purge");
+ mail_index_view_close(&ctx.view);
+
+ /* wait for child to finish execution */
+ if (wait(&status) == -1)
+ i_error("wait() failed: %m");
+ test_assert(status == 0);
+
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 1);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+ test_end();
+}
+
+static void test_mail_cache_write_during_purge2(void)
+{
+ struct test_mail_cache_ctx ctx;
+
+ i_set_failure_prefix("index2: ");
+
+ /* add to cache via 2nd index */
+ test_mail_cache_init(test_mail_index_open(), &ctx);
+ test_mail_cache_add_field(&ctx, 1, ctx.cache_field2.idx, "bar2");
+ test_mail_cache_deinit(&ctx);
+}
+
+static void test_mail_cache_write_during_purge(void)
+{
+ struct test_mail_cache_ctx ctx;
+ struct mail_index_view *view;
+ struct mail_cache_view *cache_view;
+ struct mail_index_transaction *trans;
+ string_t *str = t_str_new(16);
+ int status;
+
+ test_begin("mail cache write during purge");
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, "foo1");
+
+ /* lock the index for cache purge */
+ uint32_t log_seq;
+ uoff_t log_offset;
+ test_assert(mail_transaction_log_sync_lock(ctx.index->log, "purge", &log_seq, &log_offset) == 0);
+
+ /* start purging cache using the 1st index, but don't commit yet */
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ test_assert(mail_cache_purge_with_trans(ctx.cache, trans, (uint32_t)-1, "test") == 0);
+
+ switch (fork()) {
+ case (pid_t)-1:
+ i_fatal("fork() failed: %m");
+ case 0:
+ test_mail_cache_write_during_purge2();
+ /* cleanup so valgrind doesn't complain about memory leaks */
+ mail_index_transaction_rollback(&trans);
+ mail_transaction_log_sync_unlock(ctx.index->log, "purge");
+ test_mail_cache_deinit(&ctx);
+ test_exit(test_has_failed() ? 10 : 0);
+ default:
+ break;
+ }
+
+ /* Wait a bit to make sure the child function has had a chance to run.
+ It's supposed to be waiting on the locked .log file. */
+ usleep(100000);
+ /* finish cache purge */
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ mail_transaction_log_sync_unlock(ctx.index->log, "purge");
+ mail_index_view_close(&ctx.view);
+
+ /* wait for child to finish execution */
+ if (wait(&status) == -1)
+ i_error("wait() failed: %m");
+ test_assert(status == 0);
+
+ /* make sure both cache fields are visible */
+ test_assert(mail_index_refresh(ctx.index) == 0);
+
+ view = mail_index_view_open(ctx.index);
+ cache_view = mail_cache_view_open(ctx.cache, view);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field.idx) == 1);
+ test_assert(strcmp(str_c(str), "foo1") == 0);
+ str_truncate(str, 0);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field2.idx) == 1);
+ test_assert(strcmp(str_c(str), "bar2") == 0);
+ mail_cache_view_close(&cache_view);
+ mail_index_view_close(&view);
+
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 1);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+ test_end();
+}
+
+static void test_mail_cache_purge_while_cache_locked(void)
+{
+ struct test_mail_cache_ctx ctx;
+ struct mail_cache_view *cache_view;
+ string_t *str = t_str_new(16);
+ int status;
+
+ test_begin("mail cache purge while cache locked");
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, "foo1");
+
+ /* lock the cache */
+ test_assert(mail_cache_lock(ctx.cache) == 1);
+
+ /* purge the cache in another process */
+ switch (fork()) {
+ case (pid_t)-1:
+ i_fatal("fork() failed: %m");
+ case 0:
+ test_mail_cache_purge();
+ test_mail_cache_deinit(&ctx);
+ test_exit(test_has_failed() ? 10 : 0);
+ default:
+ break;
+ }
+
+ /* Wait a bit to make sure the child function has had a chance to run.
+ It should start purging, which would wait for our cache lock. */
+ usleep(100000);
+
+ mail_cache_unlock(ctx.cache);
+
+ /* wait for child to finish execution */
+ if (wait(&status) == -1)
+ i_error("wait() failed: %m");
+ test_assert(status == 0);
+
+ /* make sure the cache is still usable */
+ test_assert(mail_index_refresh(ctx.index) == 0);
+ test_mail_cache_view_sync(&ctx);
+
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field.idx) == 1);
+ test_assert(strcmp(str_c(str), "foo1") == 0);
+ mail_cache_view_close(&cache_view);
+
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 1);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+ test_end();
+}
+
+static bool cache_equals(struct mail_cache_view *cache_view, uint32_t seq,
+ unsigned int field_idx, const char *value)
+{
+ string_t *str = str_new(default_pool, 128);
+ int ret = mail_cache_lookup_field(cache_view, str, seq, field_idx);
+ bool match;
+
+ if (value != NULL) {
+ test_assert_idx(ret == 1, seq);
+ match = strcmp(str_c(str), value) == 0;
+ test_assert_idx(match, seq);
+ } else {
+ test_assert_idx(ret == 0, seq);
+ match = ret == 0;
+ }
+ str_free(&str);
+ return match;
+}
+
+static void test_mail_cache_purge_during_write_n(unsigned int num_mails,
+ bool commit_saves)
+{
+ const struct mail_index_optimization_settings optimization_set = {
+ .cache = {
+ .record_max_size = 1024*1024,
+ },
+ };
+ struct test_mail_cache_ctx ctx;
+ struct mail_index_view *updated_view;
+ struct mail_cache_view *cache_view;
+ struct mail_cache_transaction_ctx *cache_trans;
+ struct mail_index_transaction *trans;
+ uint32_t seq;
+
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ mail_index_set_optimization_settings(ctx.index, &optimization_set);
+
+ /* Add mails */
+ test_mail_cache_add_mail(&ctx, UINT_MAX, "");
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ for (seq = 2; seq <= num_mails; seq++)
+ mail_index_append(trans, seq, &seq);
+
+ if (commit_saves) {
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ test_mail_cache_view_sync(&ctx);
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ }
+ /* start adding a small cached field to mail1 */
+ updated_view = mail_index_transaction_open_updated_view(trans);
+ cache_view = mail_cache_view_open(ctx.cache, updated_view);
+ cache_trans = mail_cache_get_transaction(cache_view, trans);
+ mail_cache_add(cache_trans, 1, ctx.cache_field.idx, "foo1", 4);
+
+ /* add a huge field to mail2, which triggers flushing */
+ size_t huge_field_size = MAIL_CACHE_MAX_WRITE_BUFFER + 1024;
+ char *huge_field = i_malloc(huge_field_size + 1);
+ memset(huge_field, 'x', huge_field_size);
+ mail_cache_add(cache_trans, 2, ctx.cache_field.idx,
+ huge_field, huge_field_size);
+
+ /* verify that cached fields are still accessible */
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field.idx, "foo1"));
+ test_assert(cache_equals(cache_view, 2, ctx.cache_field.idx, huge_field));
+
+ /* purge using a 2nd index */
+ test_mail_cache_purge();
+
+ if (num_mails == 2) {
+ /* the mails are still accessible after purge */
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field.idx, "foo1"));
+ test_assert(cache_equals(cache_view, 2, ctx.cache_field.idx, huge_field));
+ } else if (!commit_saves) {
+ /* add 3rd mail, which attempts to flush 2nd mail and finds
+ that the first mail is already lost */
+ test_expect_error_string("Purging lost 1 written cache records");
+ mail_cache_add(cache_trans, 3, ctx.cache_field.idx, "foo3", 4);
+ test_expect_no_more_errors();
+
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field.idx, NULL));
+ test_assert(cache_equals(cache_view, 2, ctx.cache_field.idx, huge_field));
+ test_assert(cache_equals(cache_view, 3, ctx.cache_field.idx, "foo3"));
+ } else {
+ /* add 3rd mail, which commits the first two mails */
+ mail_cache_add(cache_trans, 3, ctx.cache_field.idx, "foo3", 4);
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field.idx, "foo1"));
+ test_assert(cache_equals(cache_view, 2, ctx.cache_field.idx, huge_field));
+ test_assert(cache_equals(cache_view, 3, ctx.cache_field.idx, "foo3"));
+ }
+
+ /* finish committing cached fields */
+ if (num_mails == 2 && !commit_saves)
+ test_expect_error_string("Purging lost 1 written cache records");
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ test_expect_no_more_errors();
+ mail_index_view_close(&updated_view);
+ mail_cache_view_close(&cache_view);
+
+ /* see that we lost the first flush without commit_saves, but not the others */
+ test_mail_cache_view_sync(&ctx);
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ if (commit_saves)
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field.idx, "foo1"));
+ else
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field.idx, NULL));
+ test_assert(cache_equals(cache_view, 2, ctx.cache_field.idx, huge_field));
+ if (num_mails >= 3)
+ test_assert(cache_equals(cache_view, 3, ctx.cache_field.idx, "foo3"));
+ mail_cache_view_close(&cache_view);
+
+ mail_index_view_close(&ctx.view);
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 1);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+ i_free(huge_field);
+}
+
+static void test_mail_cache_write_lost_during_purge(void)
+{
+ test_begin("mail cache write lost during purge");
+ test_mail_cache_purge_during_write_n(2, FALSE);
+ test_end();
+}
+
+static void test_mail_cache_write_lost_during_purge2(void)
+{
+ test_begin("mail cache write lost during purge (2)");
+ test_mail_cache_purge_during_write_n(3, FALSE);
+ test_end();
+}
+
+static void test_mail_cache_write_autocommit(void)
+{
+ test_begin("mail cache write autocommit");
+ test_mail_cache_purge_during_write_n(2, TRUE);
+ test_end();
+}
+
+static void test_mail_cache_write_autocommit2(void)
+{
+ test_begin("mail cache write autocommit");
+ test_mail_cache_purge_during_write_n(3, TRUE);
+ test_end();
+}
+
+static size_t max_field_size(size_t max_size, size_t current_size)
+{
+ return max_size - current_size
+ - sizeof(struct mail_cache_record)
+ - sizeof(uint32_t) /* field_idx */
+ - sizeof(uint32_t); /* data_size */
+}
+
+static void test_mail_cache_delete_too_large_int(bool exceed_on_first_write)
+{
+ const struct mail_index_optimization_settings optimization_set = {
+ .cache = {
+ .max_size = 1024,
+ },
+ };
+ struct test_mail_cache_ctx ctx;
+ struct stat st;
+
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ mail_index_set_optimization_settings(ctx.index, &optimization_set);
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, "foo1");
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, "foo2");
+
+ test_assert(stat(ctx.index->cache->filepath, &st) == 0);
+
+ /* create cache file that is exactly max_size */
+ size_t field_size =
+ max_field_size(optimization_set.cache.max_size, st.st_size);
+ if (exceed_on_first_write) {
+ test_expect_error_string("Cache file too large");
+ field_size++;
+ }
+ char *field = i_malloc(field_size + 1);
+ memset(field, 'x', field_size);
+ test_mail_cache_add_field(&ctx, 1, ctx.cache_field2.idx, field);
+ test_expect_no_more_errors();
+ i_free(field);
+
+ if (!exceed_on_first_write) {
+ test_assert(stat(ctx.index->cache->filepath, &st) == 0);
+ test_assert(st.st_size == 1024);
+
+ /* adding anything more will delete the cache. */
+ test_expect_error_string("Cache file too large");
+ test_mail_cache_add_field(&ctx, 1, ctx.cache_field2.idx, "bar1");
+ test_expect_no_more_errors();
+ }
+ test_assert(stat(ctx.index->cache->filepath, &st) < 0 && errno == ENOENT);
+
+ mail_index_view_close(&ctx.view);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+}
+
+static void test_mail_cache_delete_too_large(void)
+{
+ test_begin("mail cache delete too large");
+ test_mail_cache_delete_too_large_int(FALSE);
+ test_end();
+}
+
+static void test_mail_cache_delete_too_large2(void)
+{
+ test_begin("mail cache delete too large (2)");
+ test_mail_cache_delete_too_large_int(TRUE);
+ test_end();
+}
+
+static void test_mail_cache_purge_too_large_int(bool exceed_size)
+{
+ const struct mail_index_optimization_settings optimization_set = {
+ .cache = {
+ .max_size = 1024,
+ },
+ };
+ struct mail_index_transaction *trans;
+ struct mail_cache_view *cache_view;
+ struct test_mail_cache_ctx ctx;
+ struct stat st;
+
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ mail_index_set_optimization_settings(ctx.index, &optimization_set);
+
+ /* add two mails with some cache field and expunge the first mail */
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, "foo1");
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, "bar2");
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ mail_index_expunge(trans, 1);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ test_mail_cache_index_sync(&ctx);
+
+ /* Add a second mail whose cache field size is exactly the
+ max_size [+1 if exceed_size] */
+ test_assert(stat(ctx.index->cache->filepath, &st) == 0);
+ size_t field_size = (exceed_size ? 1 : 0) +
+ max_field_size(optimization_set.cache.max_size, st.st_size);
+ char *field = i_malloc(field_size + 1);
+ memset(field, 'x', field_size);
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, field);
+
+ test_assert(stat(ctx.index->cache->filepath, &st) == 0);
+ if (exceed_size)
+ test_assert((uoff_t)st.st_size < optimization_set.cache.max_size);
+ else
+ test_assert((uoff_t)st.st_size == optimization_set.cache.max_size);
+
+ /* make sure we still find the cache fields */
+ test_mail_cache_view_sync(&ctx);
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field.idx, "bar2"));
+ test_assert(cache_equals(cache_view, 2, ctx.cache_field.idx, field));
+ mail_cache_view_close(&cache_view);
+
+ i_free(field);
+ if (exceed_size)
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 1);
+ else
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 0);
+ mail_index_view_close(&ctx.view);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+}
+
+static void test_mail_cache_purge_too_large(void)
+{
+ test_begin("mail cache purge too large");
+ test_mail_cache_purge_too_large_int(FALSE);
+ test_end();
+}
+
+static void test_mail_cache_purge_too_large2(void)
+{
+ test_begin("mail cache purge too large (2)");
+ test_mail_cache_purge_too_large_int(TRUE);
+ test_end();
+}
+
+static void test_mail_cache_unexpectedly_lost_int(bool read_first)
+{
+ struct test_mail_cache_ctx ctx;
+ struct mail_cache_view *cache_view;
+
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, "foo1");
+
+ test_mail_cache_purge();
+
+ /* Unexpectedly delete the cache file under us */
+ i_unlink(ctx.cache->filepath);
+
+ if (read_first) {
+ /* the cache file is already open, so initial reading should
+ work without errors */
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field.idx, "foo1"));
+ mail_cache_view_close(&cache_view);
+
+ /* if we refresh the index we get new reset_id, which requires
+ reopening the cache and that fails */
+ test_assert(mail_index_refresh(ctx.index) == 0);
+ test_mail_cache_view_sync(&ctx);
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ test_expect_error_string("test.dovecot.index.cache: No such file or directory");
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field.idx, NULL));
+ test_expect_no_more_errors();
+ mail_cache_view_close(&cache_view);
+ } else {
+ test_expect_error_string("test.dovecot.index.cache: No such file or directory");
+ }
+
+ /* writing after losing the cache should still work */
+ test_mail_cache_add_field(&ctx, 1, ctx.cache_field2.idx, "bar1");
+ test_expect_no_more_errors();
+
+ /* verify that the second cache field is found, but first is lost */
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field.idx, NULL));
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field2.idx, "bar1"));
+ mail_cache_view_close(&cache_view);
+
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 2);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+}
+
+static void test_mail_cache_unexpectedly_lost(void)
+{
+ test_begin("mail cache unexpectedly lost");
+ test_mail_cache_unexpectedly_lost_int(FALSE);
+ test_end();
+}
+
+static void test_mail_cache_unexpectedly_lost2(void)
+{
+ test_begin("mail cache unexpectedly lost (2)");
+ test_mail_cache_unexpectedly_lost_int(TRUE);
+ test_end();
+}
+
+static void test_mail_cache_resetid_mismatch_int(bool read_first)
+{
+ struct test_mail_cache_ctx ctx;
+ struct mail_cache_view *cache_view;
+ const char *temp_cache_path;
+
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, "foo1");
+
+ /* make a copy of the first cache file */
+ temp_cache_path = t_strdup_printf("%s.test", ctx.cache->filepath);
+ test_assert(link(ctx.cache->filepath, temp_cache_path) == 0);
+
+ if (read_first) {
+ /* use a secondary index to purge the cache */
+ test_mail_cache_purge();
+
+ /* Replace the new cache file with an old one */
+ test_assert(rename(temp_cache_path, ctx.cache->filepath) == 0);
+
+ /* the cache file is already open, so initial reading should
+ work without errors */
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field.idx, "foo1"));
+ mail_cache_view_close(&cache_view);
+
+ /* if we refresh the index we get new reset_id, which requires
+ reopening the cache and that fails */
+ test_assert(mail_index_refresh(ctx.index) == 0);
+ test_mail_cache_view_sync(&ctx);
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+
+ test_expect_error_string("reset_id mismatch even after locking");
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field.idx, NULL));
+ test_expect_no_more_errors();
+ mail_cache_view_close(&cache_view);
+ } else {
+ /* purge cache to update reset_id in index */
+ test_assert(mail_cache_purge(ctx.cache, (uint32_t)-1, "test") == 0);
+
+ /* Replace the new cache file with an old one */
+ test_assert(rename(temp_cache_path, ctx.cache->filepath) == 0);
+
+ test_expect_error_string("reset_id mismatch even after locking");
+ }
+
+ /* writing should automatically fix the reset_id mismatch */
+ test_mail_cache_add_field(&ctx, 1, ctx.cache_field2.idx, "bar1");
+ test_expect_no_more_errors();
+
+ /* verify that the second cache field is found, but first is lost */
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field.idx, NULL));
+ test_assert(cache_equals(cache_view, 1, ctx.cache_field2.idx, "bar1"));
+ mail_cache_view_close(&cache_view);
+
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 2);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+}
+
+static void test_mail_cache_resetid_mismatch(void)
+{
+ test_begin("mail cache resetid mismatch");
+ test_mail_cache_resetid_mismatch_int(FALSE);
+ test_end();
+}
+
+static void test_mail_cache_resetid_mismatch2(void)
+{
+ test_begin("mail cache resetid mismatch (2)");
+ test_mail_cache_resetid_mismatch_int(TRUE);
+ test_end();
+}
+
+enum test_drop {
+ TEST_DROP_NOTHING,
+ TEST_DROP_YES_TO_TEMP_FIRST,
+ TEST_DROP_YES_TO_TEMP_LAST,
+ TEST_DROP_TEMP_TO_NO,
+};
+
+static void test_mail_cache_purge_field_changes_int(enum test_drop drop)
+{
+ enum {
+ TEST_FIELD_NO,
+ TEST_FIELD_NO_FORCED,
+ TEST_FIELD_TEMP,
+ TEST_FIELD_TEMP_FORCED,
+ TEST_FIELD_YES,
+ TEST_FIELD_YES_FORCED,
+ };
+ struct mail_cache_field cache_fields[] = {
+ {
+ .name = "no",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_NO,
+ },
+ {
+ .name = "no-forced",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_NO | MAIL_CACHE_DECISION_FORCED,
+ },
+ {
+ .name = "temp",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_TEMP,
+ },
+ {
+ .name = "temp-forced",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_TEMP | MAIL_CACHE_DECISION_FORCED,
+ },
+ {
+ .name = "yes",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_YES,
+ },
+ {
+ .name = "yes-forced",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_YES | MAIL_CACHE_DECISION_FORCED,
+ },
+ };
+ const struct mail_index_optimization_settings optimization_set = {
+ .cache = {
+ .unaccessed_field_drop_secs = 61,
+ },
+ };
+ struct test_mail_cache_ctx ctx;
+ struct mail_cache_view *cache_view;
+ struct mail_cache_transaction_ctx *cache_trans;
+ struct mail_index_transaction *trans;
+ unsigned int i;
+
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ mail_index_set_optimization_settings(ctx.index, &optimization_set);
+
+ /* add two mails with all of the cache fields */
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+
+ /* Create the cache file before registering any of the cache_fields
+ that we're testing. Otherwise our caching decisions are messed up
+ by purging (which is called to auto-create the cache). */
+ test_assert(mail_cache_purge(ctx.cache, (uint32_t)-1, "test") == 0);
+ mail_cache_register_fields(ctx.cache, cache_fields,
+ N_ELEMENTS(cache_fields));
+
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ cache_trans = mail_cache_get_transaction(cache_view, trans);
+ for (i = 0; i < N_ELEMENTS(cache_fields); i++) {
+ const char *value = t_strdup_printf("%s-value",
+ cache_fields[i].name);
+ if ((cache_fields[i].decision & ENUM_NEGATE(MAIL_CACHE_DECISION_FORCED)) !=
+ MAIL_CACHE_DECISION_NO) {
+ mail_cache_add(cache_trans, 1, cache_fields[i].idx,
+ value, strlen(value));
+ mail_cache_add(cache_trans, 2, cache_fields[i].idx,
+ value, strlen(value));
+ }
+ }
+
+ /* day_stamp in index is used for deciding when a cache field needs to
+ be dropped. */
+ uint32_t day_stamp = 123456789;
+ mail_index_update_header(trans,
+ offsetof(struct mail_index_header, day_stamp),
+ &day_stamp, sizeof(day_stamp), FALSE);
+ /* day_first_uid[7] is used to determine which mails are "old" and
+ which mails are "new". [7] is the first "new" mail. */
+ uint32_t first_new_uid = 2;
+ mail_index_update_header(trans,
+ offsetof(struct mail_index_header, day_first_uid[7]),
+ &first_new_uid, sizeof(first_new_uid), FALSE);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+
+ /* set the last_used time just at the boundary of being dropped or
+ being kept */
+ for (i = 0; i < ctx.cache->fields_count; i++) {
+ unsigned int secs = optimization_set.cache.unaccessed_field_drop_secs;
+ switch (drop) {
+ case TEST_DROP_NOTHING:
+ break;
+ case TEST_DROP_YES_TO_TEMP_FIRST:
+ secs++;
+ break;
+ case TEST_DROP_YES_TO_TEMP_LAST:
+ secs *= 2;
+ break;
+ case TEST_DROP_TEMP_TO_NO:
+ secs *= 2;
+ secs++;
+ break;
+ }
+ ctx.cache->fields[i].field.last_used = day_stamp - secs;
+ }
+ test_assert(mail_cache_purge(ctx.cache, (uint32_t)-1, "test") == 0);
+ test_mail_cache_view_sync(&ctx);
+
+ /* verify that caching decisions are as expected after purging */
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_NO].idx].field.decision ==
+ MAIL_CACHE_DECISION_NO);
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_NO_FORCED].idx].field.decision ==
+ (MAIL_CACHE_DECISION_NO | MAIL_CACHE_DECISION_FORCED));
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_TEMP_FORCED].idx].field.decision ==
+ (MAIL_CACHE_DECISION_TEMP | MAIL_CACHE_DECISION_FORCED));
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_YES_FORCED].idx].field.decision ==
+ (MAIL_CACHE_DECISION_YES | MAIL_CACHE_DECISION_FORCED));
+
+ switch (drop) {
+ case TEST_DROP_NOTHING:
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_TEMP].idx].field.decision ==
+ MAIL_CACHE_DECISION_TEMP);
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_YES].idx].field.decision ==
+ MAIL_CACHE_DECISION_YES);
+ break;
+ case TEST_DROP_YES_TO_TEMP_FIRST:
+ case TEST_DROP_YES_TO_TEMP_LAST:
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_TEMP].idx].field.decision ==
+ MAIL_CACHE_DECISION_TEMP);
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_YES].idx].field.decision ==
+ MAIL_CACHE_DECISION_TEMP);
+ break;
+ case TEST_DROP_TEMP_TO_NO:
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_TEMP].idx].field.decision ==
+ MAIL_CACHE_DECISION_NO);
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_YES].idx].field.decision ==
+ MAIL_CACHE_DECISION_NO);
+ break;
+ }
+
+ /* verify that cache fields exist as expected after purging */
+ test_assert(cache_equals(cache_view, 1, cache_fields[TEST_FIELD_NO].idx, NULL));
+ test_assert(cache_equals(cache_view, 2, cache_fields[TEST_FIELD_NO].idx, NULL));
+ test_assert(cache_equals(cache_view, 1, cache_fields[TEST_FIELD_NO_FORCED].idx, NULL));
+ test_assert(cache_equals(cache_view, 2, cache_fields[TEST_FIELD_NO_FORCED].idx, NULL));
+ test_assert(cache_equals(cache_view, 1, cache_fields[TEST_FIELD_TEMP].idx, NULL));
+ if (drop == TEST_DROP_TEMP_TO_NO)
+ test_assert(cache_equals(cache_view, 2, cache_fields[TEST_FIELD_TEMP].idx, NULL));
+ else
+ test_assert(cache_equals(cache_view, 2, cache_fields[TEST_FIELD_TEMP].idx, "temp-value"));
+ test_assert(cache_equals(cache_view, 1, cache_fields[TEST_FIELD_TEMP_FORCED].idx, NULL));
+ test_assert(cache_equals(cache_view, 2, cache_fields[TEST_FIELD_TEMP_FORCED].idx, "temp-forced-value"));
+ if (drop != TEST_DROP_NOTHING)
+ test_assert(cache_equals(cache_view, 1, cache_fields[TEST_FIELD_YES].idx, NULL));
+ else
+ test_assert(cache_equals(cache_view, 1, cache_fields[TEST_FIELD_YES].idx, "yes-value"));
+ test_assert(cache_equals(cache_view, 2, cache_fields[TEST_FIELD_YES_FORCED].idx, "yes-forced-value"));
+
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 1);
+ mail_cache_view_close(&cache_view);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+}
+
+static void test_mail_cache_purge_field_changes(void)
+{
+ test_begin("mail cache purge field changes (nothing)");
+ test_mail_cache_purge_field_changes_int(TEST_DROP_NOTHING);
+ test_end();
+}
+
+static void test_mail_cache_purge_field_changes2(void)
+{
+ test_begin("mail cache purge field changes (yes -> temp, first)");
+ test_mail_cache_purge_field_changes_int(TEST_DROP_YES_TO_TEMP_FIRST);
+ test_end();
+}
+
+static void test_mail_cache_purge_field_changes3(void)
+{
+ test_begin("mail cache purge field changes (yes -> temp, last)");
+ test_mail_cache_purge_field_changes_int(TEST_DROP_YES_TO_TEMP_LAST);
+ test_end();
+}
+
+static void test_mail_cache_purge_field_changes4(void)
+{
+ test_begin("mail cache purge field changes (temp -> no)");
+ test_mail_cache_purge_field_changes_int(TEST_DROP_TEMP_TO_NO);
+ test_end();
+}
+
+static void test_mail_cache_purge_already_done(void)
+{
+ struct test_mail_cache_ctx ctx;
+
+ test_begin("mail cache purge already done");
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, "foo1");
+
+ test_mail_cache_purge();
+ test_assert(mail_cache_purge(ctx.cache, 1, "test") == 0);
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 1);
+
+ test_assert(mail_cache_purge(ctx.cache, 2, "test") == 0);
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 2);
+
+ test_assert(mail_cache_purge(ctx.cache, 2, "test") == 0);
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 2);
+
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+ test_end();
+}
+
+static void test_mail_cache_purge_bitmask(void)
+{
+ struct mail_index_optimization_settings optimization_set = {
+ .cache = {
+ .unaccessed_field_drop_secs = 60,
+ },
+ };
+ struct mail_cache_field bitmask_field = {
+ .name = "bitmask",
+ .type = MAIL_CACHE_FIELD_BITMASK,
+ .field_size = 1,
+ .decision = MAIL_CACHE_DECISION_TEMP,
+ };
+ struct test_mail_cache_ctx ctx;
+ struct mail_cache_view *cache_view;
+
+ test_begin("mail cache purge bitmask");
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ mail_index_set_optimization_settings(ctx.index, &optimization_set);
+ ioloop_time = 1000000;
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+ test_assert(mail_cache_purge(ctx.cache, (uint32_t)-1, "test") == 0);
+ mail_cache_register_fields(ctx.cache, &bitmask_field, 1);
+
+ test_mail_cache_update_day_first_uid7(&ctx, 3);
+
+ test_mail_cache_add_field(&ctx, 1, bitmask_field.idx, "\x01");
+ test_mail_cache_add_field(&ctx, 1, bitmask_field.idx, "\x02");
+ test_mail_cache_add_field(&ctx, 1, bitmask_field.idx, "\x04");
+ test_mail_cache_add_field(&ctx, 2, bitmask_field.idx, "\x01");
+ test_mail_cache_add_field(&ctx, 2, bitmask_field.idx, "\x02");
+ test_mail_cache_add_field(&ctx, 2, bitmask_field.idx, "\x04");
+
+ /* avoid dropping the field */
+ ctx.cache->fields[bitmask_field.idx].field.last_used = ioloop_time;
+
+ /* purge with TEMP decision, which causes the bitmask to be dropped */
+ test_assert(mail_cache_purge(ctx.cache, (uint32_t)-1, "test") == 0);
+
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ test_assert(cache_equals(cache_view, 1, bitmask_field.idx, NULL));
+ test_assert(cache_equals(cache_view, 2, bitmask_field.idx, NULL));
+ mail_cache_view_close(&cache_view);
+
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+ test_end();
+}
+
+
+static void
+test_mail_cache_update_need_purge_continued_records_int(bool big_min_size)
+{
+ struct mail_index_optimization_settings optimization_set = {
+ .cache = {
+ .purge_min_size = big_min_size ? 1024*1024 : 1,
+ .purge_continued_percentage = 30,
+ },
+ };
+ char value[30];
+ struct test_mail_cache_ctx ctx;
+ uint32_t seq;
+
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ mail_index_set_optimization_settings(ctx.index, &optimization_set);
+
+ for (seq = 1; seq <= 100; seq++) {
+ i_snprintf(value, sizeof(value), "foo%d", seq);
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, value);
+ }
+
+ /* up to 29% no need to purge */
+ for (seq = 1; seq <= 29; seq++) {
+ i_snprintf(value, sizeof(value), "bar%d", seq);
+ test_mail_cache_add_field(&ctx, seq, ctx.cache_field2.idx, value);
+ }
+ test_assert(ctx.cache->need_purge_file_seq == 0);
+
+ /* at 30% need to purge */
+ test_mail_cache_add_field(&ctx, 30, ctx.cache_field2.idx, "bar30");
+ if (big_min_size)
+ test_assert(ctx.cache->need_purge_file_seq == 0);
+ else
+ test_assert(ctx.cache->need_purge_file_seq == ctx.cache->hdr->file_seq);
+
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 0);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+}
+
+static void test_mail_cache_update_need_purge_continued_records(void)
+{
+ test_begin("mail cache update need purge continued records");
+ test_mail_cache_update_need_purge_continued_records_int(FALSE);
+ test_end();
+}
+
+static void test_mail_cache_update_need_purge_continued_records2(void)
+{
+ test_begin("mail cache update need purge continued records (2)");
+ test_mail_cache_update_need_purge_continued_records_int(TRUE);
+ test_end();
+}
+
+static void
+test_mail_cache_update_need_purge_deleted_records_int(bool big_min_size)
+{
+ struct mail_index_optimization_settings optimization_set = {
+ .cache = {
+ .purge_min_size = big_min_size ? 1024*1024 : 1,
+ .purge_delete_percentage = 30,
+ },
+ };
+ char value[30];
+ struct mail_index_transaction *trans;
+ struct test_mail_cache_ctx ctx;
+ uint32_t seq;
+
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ mail_index_set_optimization_settings(ctx.index, &optimization_set);
+
+ for (seq = 1; seq <= 100; seq++) {
+ i_snprintf(value, sizeof(value), "foo%d", seq);
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, value);
+ }
+
+ /* up to 29% no need to purge */
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ for (seq = 1; seq <= 29; seq++) {
+ i_snprintf(value, sizeof(value), "bar%d", seq);
+ mail_index_expunge(trans, seq);
+ }
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ test_mail_cache_index_sync(&ctx);
+
+ test_assert(ctx.cache->need_purge_file_seq == 0);
+ test_assert(mail_cache_reopen(ctx.cache) == 1);
+ test_assert(ctx.cache->need_purge_file_seq == 0);
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 0);
+
+ /* at 30% need to purge */
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ mail_index_expunge(trans, 1);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ /* syncing will internally purge if !big_min_size */
+ test_mail_cache_index_sync(&ctx);
+
+ test_assert(ctx.cache->need_purge_file_seq == 0);
+ test_assert(mail_cache_reopen(ctx.cache) == 1);
+ test_assert(ctx.cache->need_purge_file_seq == 0);
+ if (big_min_size)
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 0);
+ else
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 1);
+
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+}
+
+static void test_mail_cache_update_need_purge_deleted_records(void)
+{
+ test_begin("mail cache update need purge deleted records");
+ test_mail_cache_update_need_purge_deleted_records_int(FALSE);
+ test_end();
+}
+
+static void test_mail_cache_update_need_purge_deleted_records2(void)
+{
+ test_begin("mail cache update need purge deleted records (2)");
+ test_mail_cache_update_need_purge_deleted_records_int(TRUE);
+ test_end();
+}
+
+int main(void)
+{
+ static void (*const test_functions[])(void) = {
+ test_mail_cache_read_during_purge,
+ test_mail_cache_write_during_purge,
+ test_mail_cache_purge_while_cache_locked,
+ test_mail_cache_write_lost_during_purge,
+ test_mail_cache_write_lost_during_purge2,
+ test_mail_cache_write_autocommit,
+ test_mail_cache_write_autocommit2,
+ test_mail_cache_delete_too_large,
+ test_mail_cache_delete_too_large2,
+ test_mail_cache_purge_too_large,
+ test_mail_cache_purge_too_large2,
+ test_mail_cache_unexpectedly_lost,
+ test_mail_cache_unexpectedly_lost2,
+ test_mail_cache_resetid_mismatch,
+ test_mail_cache_resetid_mismatch2,
+ test_mail_cache_purge_field_changes,
+ test_mail_cache_purge_field_changes2,
+ test_mail_cache_purge_field_changes3,
+ test_mail_cache_purge_field_changes4,
+ test_mail_cache_purge_already_done,
+ test_mail_cache_purge_bitmask,
+ test_mail_cache_update_need_purge_continued_records,
+ test_mail_cache_update_need_purge_continued_records2,
+ test_mail_cache_update_need_purge_deleted_records,
+ test_mail_cache_update_need_purge_deleted_records2,
+ NULL
+ };
+ return test_run(test_functions);
+}
diff --git a/src/lib-index/test-mail-cache.c b/src/lib-index/test-mail-cache.c
new file mode 100644
index 0000000..14b3fb6
--- /dev/null
+++ b/src/lib-index/test-mail-cache.c
@@ -0,0 +1,764 @@
+/* Copyright (c) 2020 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "str.h"
+#include "write-full.h"
+#include "test-common.h"
+#include "test-mail-cache.h"
+
+struct test_header_data {
+ uint32_t line1, line2;
+ uint32_t end_of_lines;
+ char headers[8];
+};
+
+enum {
+ TEST_FIELD_NO,
+ TEST_FIELD_NO_FORCED,
+ TEST_FIELD_TEMP,
+ TEST_FIELD_TEMP_FORCED,
+ TEST_FIELD_YES,
+ TEST_FIELD_YES_FORCED,
+ TEST_FIELD_COUNT,
+};
+static const struct mail_cache_field decision_cache_fields[TEST_FIELD_COUNT] = {
+ {
+ .name = "no",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_NO,
+ },
+ {
+ .name = "no-forced",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_NO | MAIL_CACHE_DECISION_FORCED,
+ },
+ {
+ .name = "temp",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_TEMP,
+ },
+ {
+ .name = "temp-forced",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_TEMP | MAIL_CACHE_DECISION_FORCED,
+ },
+ {
+ .name = "yes",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_YES,
+ },
+ {
+ .name = "yes-forced",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_YES | MAIL_CACHE_DECISION_FORCED,
+ },
+};
+
+static void test_mail_cache_fields(void)
+{
+ enum {
+ TEST_FIELD_FIXED,
+ TEST_FIELD_VARIABLE,
+ TEST_FIELD_STRING,
+ TEST_FIELD_BITMASK,
+ TEST_FIELD_HEADER1,
+ TEST_FIELD_HEADER2,
+ };
+ struct mail_cache_field cache_fields[] = {
+ {
+ .name = "fixed",
+ .type = MAIL_CACHE_FIELD_FIXED_SIZE,
+ .field_size = 4,
+ .decision = MAIL_CACHE_DECISION_YES,
+ },
+ {
+ .name = "variable",
+ .type = MAIL_CACHE_FIELD_VARIABLE_SIZE,
+ .decision = MAIL_CACHE_DECISION_YES,
+ },
+ {
+ .name = "string",
+ .type = MAIL_CACHE_FIELD_STRING,
+ .decision = MAIL_CACHE_DECISION_YES,
+ },
+ {
+ .name = "bitmask",
+ .type = MAIL_CACHE_FIELD_BITMASK,
+ .field_size = 4,
+ .decision = MAIL_CACHE_DECISION_YES,
+ },
+ {
+ .name = "header1",
+ .type = MAIL_CACHE_FIELD_HEADER,
+ .decision = MAIL_CACHE_DECISION_YES,
+ },
+ {
+ .name = "header2",
+ .type = MAIL_CACHE_FIELD_HEADER,
+ .decision = MAIL_CACHE_DECISION_YES,
+ },
+ };
+ struct test_mail_cache_ctx ctx;
+ struct mail_index_transaction *trans;
+ struct mail_cache_view *cache_view;
+ struct mail_cache_transaction_ctx *cache_trans;
+ string_t *str = t_str_new(16);
+
+ test_begin("mail cache uncommitted lookups");
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ mail_cache_register_fields(ctx.cache, cache_fields,
+ N_ELEMENTS(cache_fields));
+
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+
+ /* add the cache fields */
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ cache_trans = mail_cache_get_transaction(cache_view, trans);
+
+ const uint8_t fixed_data[] = { 0x12, 0x34, 0x56, 0x78 };
+ mail_cache_add(cache_trans, 1, cache_fields[TEST_FIELD_FIXED].idx,
+ fixed_data, sizeof(fixed_data));
+ const uint8_t variable_data[] = { 0xab, 0xcd, 0xef };
+ mail_cache_add(cache_trans, 1, cache_fields[TEST_FIELD_VARIABLE].idx,
+ variable_data, sizeof(variable_data));
+ const char string_data[] = { 's', 't', 'r' };
+ mail_cache_add(cache_trans, 1, cache_fields[TEST_FIELD_STRING].idx,
+ string_data, sizeof(string_data));
+ uint8_t bitmask_data[] = { 0x00, 0x01, 0x10, 0x11 };
+ mail_cache_add(cache_trans, 1, cache_fields[TEST_FIELD_BITMASK].idx,
+ bitmask_data, sizeof(bitmask_data));
+ struct test_header_data header_data1 = {
+ .line1 = 15,
+ .line2 = 30,
+ .headers = "foo\nbar\n",
+ };
+ struct test_header_data header_data2 = {
+ .line1 = 10,
+ .line2 = 20,
+ .headers = "123\n456\n",
+ };
+ mail_cache_add(cache_trans, 1, cache_fields[TEST_FIELD_HEADER1].idx,
+ &header_data1, sizeof(header_data1));
+ mail_cache_add(cache_trans, 1, cache_fields[TEST_FIELD_HEADER2].idx,
+ &header_data2, sizeof(header_data2));
+
+ /* make sure the fields can be looked up even though they're
+ not committed */
+ for (int i = 0;; i++) {
+ str_truncate(str, 0);
+ test_assert_idx(mail_cache_lookup_field(cache_view, str, 1,
+ cache_fields[TEST_FIELD_FIXED].idx) == 1, i);
+ test_assert_idx(str_len(str) == sizeof(fixed_data) &&
+ memcmp(str_data(str), fixed_data, str_len(str)) == 0, i);
+ str_truncate(str, 0);
+ test_assert_idx(mail_cache_lookup_field(cache_view, str, 1,
+ cache_fields[TEST_FIELD_VARIABLE].idx) == 1, i);
+ test_assert_idx(str_len(str) == sizeof(variable_data) &&
+ memcmp(str_data(str), variable_data, str_len(str)) == 0, i);
+ str_truncate(str, 0);
+ test_assert_idx(mail_cache_lookup_field(cache_view, str, 1,
+ cache_fields[TEST_FIELD_STRING].idx) == 1, i);
+ test_assert_idx(str_len(str) == sizeof(string_data) &&
+ memcmp(str_data(str), string_data, str_len(str)) == 0, i);
+ str_truncate(str, 0);
+ test_assert_idx(mail_cache_lookup_field(cache_view, str, 1,
+ cache_fields[TEST_FIELD_BITMASK].idx) == 1, i);
+ test_assert_idx(str_len(str) == sizeof(bitmask_data) &&
+ memcmp(str_data(str), bitmask_data, str_len(str)) == 0, i);
+ const unsigned int lookup_header_fields[] = {
+ cache_fields[TEST_FIELD_HEADER2].idx,
+ cache_fields[TEST_FIELD_HEADER1].idx,
+ };
+ str_truncate(str, 0);
+ test_assert_idx(mail_cache_lookup_headers(cache_view, str, 1,
+ lookup_header_fields,
+ N_ELEMENTS(lookup_header_fields)) == 1, i);
+ test_assert_strcmp(str_c(str), "123\nfoo\n456\nbar\n");
+
+ if (trans == NULL)
+ break;
+
+ /* add more bitmask data within the same transaction */
+ uint8_t bitmask_add[4] = { 0x20, 0x20, 0x20, 0x20 };
+ for (unsigned int j = 0; j < sizeof(bitmask_data); j++)
+ bitmask_data[j] |= bitmask_add[j];
+ mail_cache_add(cache_trans, 1, cache_fields[TEST_FIELD_BITMASK].idx,
+ bitmask_add, sizeof(bitmask_add));
+ /* check that we can still read it */
+ str_truncate(str, 0);
+ test_assert_idx(mail_cache_lookup_field(cache_view, str, 1,
+ cache_fields[TEST_FIELD_BITMASK].idx) == 1, i);
+ test_assert_idx(str_len(str) == sizeof(bitmask_data) &&
+ memcmp(str_data(str), bitmask_data, str_len(str)) == 0, i);
+
+ /* commit the transaction and lookup the fields again */
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ }
+
+ /* add more bitmask data in separate transactions */
+ for (unsigned int i = 0; i < 4; i++) {
+ uint8_t bitmask_add[4] = { 0, 0, 0, 0 };
+ bitmask_add[i] = 0x40;
+ bitmask_data[i] |= 0x40;
+
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ cache_trans = mail_cache_get_transaction(cache_view, trans);
+ mail_cache_add(cache_trans, 1, cache_fields[TEST_FIELD_BITMASK].idx,
+ bitmask_add, sizeof(bitmask_add));
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ }
+
+ /* verify that bitmask is still as expected */
+ str_truncate(str, 0);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ cache_fields[TEST_FIELD_BITMASK].idx) == 1);
+ test_assert(str_len(str) == sizeof(bitmask_data) &&
+ memcmp(str_data(str), bitmask_data, str_len(str)) == 0);
+
+ /* verify that bitmask is still as expected after purging */
+ test_assert(mail_cache_purge(ctx.cache, (uint32_t)-1, "test") == 0);
+ test_mail_cache_view_sync(&ctx);
+
+ str_truncate(str, 0);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ cache_fields[TEST_FIELD_BITMASK].idx) == 1);
+ test_assert(str_len(str) == sizeof(bitmask_data) &&
+ memcmp(str_data(str), bitmask_data, str_len(str)) == 0);
+
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 1);
+ mail_cache_view_close(&cache_view);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+ test_end();
+}
+
+static void test_mail_cache_record_max_size_int(unsigned int field3_size)
+{
+ const struct mail_index_optimization_settings optimization_set = {
+ .cache = {
+ /* lets assume we can write 2 cache fields,
+ each containing 8 bytes */
+ .record_max_size = sizeof(struct mail_cache_record) +
+ 2 * (sizeof(uint32_t) + /* field_idx */
+ sizeof(uint32_t) + /* data_size */
+ 8), /* content max length */
+ },
+ };
+ struct test_mail_cache_ctx ctx;
+ struct mail_index_transaction *trans;
+ struct mail_cache_view *cache_view;
+ struct mail_cache_transaction_ctx *cache_trans;
+ string_t *str = t_str_new(16);
+
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ mail_index_set_optimization_settings(ctx.index, &optimization_set);
+
+ /* Add the first cache field. In a chain of cache records each one
+ has independent max size. Although this isn't really ideal, because
+ purging merges them and drops the records entirely if the combined
+ length is too large. But for now test least test what is
+ implemented. */
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, "12345678");
+
+ /* add the other field(s) */
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ cache_trans = mail_cache_get_transaction(cache_view, trans);
+ mail_cache_add(cache_trans, 1, ctx.cache_field2.idx, "abcdefgh", 8);
+ if (field3_size > 0)
+ mail_cache_add(cache_trans, 1, ctx.cache_field3.idx, "ijklmnopq", field3_size);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ mail_cache_view_close(&cache_view);
+
+ /* make sure all the fields are visible */
+ test_mail_cache_view_sync(&ctx);
+
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field.idx) == 1);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field2.idx) == 1);
+ if (field3_size == 8) {
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field3.idx) == 1);
+ test_assert_strcmp(str_c(str), "12345678abcdefghijklmnop");
+ } else {
+ test_assert_strcmp(str_c(str), "12345678abcdefgh");
+ }
+ mail_cache_view_close(&cache_view);
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 0);
+
+ /* if there are 3 fields, purging realizes that the record is too
+ large and drops it */
+ test_assert(mail_cache_purge(ctx.cache, (uint32_t)-1, "test") == 0);
+ test_assert(mail_cache_reopen(ctx.cache) == 1);
+
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ if (field3_size == 8) {
+ /* test that none of the fields are in cache */
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field.idx) == 0);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field2.idx) == 0);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field3.idx) == 0);
+ } else {
+ str_truncate(str, 0);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field.idx) == 1);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field2.idx) == 1);
+ test_assert_strcmp(str_c(str), "12345678abcdefgh");
+ }
+ mail_cache_view_close(&cache_view);
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 1);
+
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+}
+
+static void test_mail_cache_record_max_size(void)
+{
+ test_begin("mail cache record max size");
+ test_mail_cache_record_max_size_int(0);
+ test_end();
+}
+
+static void test_mail_cache_record_max_size2(void)
+{
+ test_begin("mail cache record max size (2)");
+ test_mail_cache_record_max_size_int(8);
+ test_end();
+}
+
+static void test_mail_cache_record_max_size3(void)
+{
+ test_begin("mail cache record max size (3)");
+ test_mail_cache_record_max_size_int(9);
+ test_end();
+}
+
+static void test_mail_cache_record_max_size4(void)
+{
+ const struct mail_index_optimization_settings optimization_set = {
+ .cache = {
+ .record_max_size = sizeof(struct mail_cache_record) +
+ sizeof(uint32_t) + /* field_idx */
+ sizeof(uint32_t) + /* data_size */
+ 8, /* content max length */
+ },
+ };
+ struct test_mail_cache_ctx ctx;
+ struct mail_index_transaction *trans;
+ struct mail_cache_view *cache_view;
+ struct mail_cache_transaction_ctx *cache_trans;
+ string_t *str = t_str_new(16);
+
+ test_begin("mail cache record max size (4)");
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ mail_index_set_optimization_settings(ctx.index, &optimization_set);
+
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ cache_trans = mail_cache_get_transaction(cache_view, trans);
+ mail_cache_add(cache_trans, 1, ctx.cache_field.idx, "123456789", 9);
+ mail_cache_add(cache_trans, 2, ctx.cache_field.idx, "123456789", 9);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ mail_cache_view_close(&cache_view);
+
+ /* make sure none of the fields are visible */
+ test_mail_cache_view_sync(&ctx);
+
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field.idx) == 0);
+ test_assert(mail_cache_lookup_field(cache_view, str, 2,
+ ctx.cache_field.idx) == 0);
+ mail_cache_view_close(&cache_view);
+ test_assert(ctx.cache->hdr == NULL); /* never created */
+
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+ test_end();
+}
+
+static void test_mail_cache_add_decisions(void)
+{
+ struct mail_cache_field cache_fields[TEST_FIELD_COUNT];
+ enum mail_cache_decision_type expected_decisions[TEST_FIELD_COUNT];
+ struct test_mail_cache_ctx ctx;
+ struct mail_index_transaction *trans;
+ struct mail_cache_view *cache_view;
+ struct mail_cache_transaction_ctx *cache_trans;
+ unsigned int i;
+
+ test_begin("mail cache add decisions");
+
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ memcpy(cache_fields, decision_cache_fields, sizeof(cache_fields));
+ mail_cache_register_fields(ctx.cache, cache_fields, TEST_FIELD_COUNT);
+ for (i = 0; i < TEST_FIELD_COUNT; i++)
+ expected_decisions[i] = cache_fields[i].decision;
+
+ /* create the initial cache file */
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+ test_assert(mail_cache_purge(ctx.cache, (uint32_t)-1, "test") == 0);
+
+ /* check that decisions haven't changed */
+ for (i = 0; i < TEST_FIELD_COUNT; i++)
+ test_assert_idx(ctx.cache->fields[cache_fields[i].idx].field.decision == expected_decisions[i], i);
+
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ cache_trans = mail_cache_get_transaction(cache_view, trans);
+
+ /* test that when cache decisions are disabled, it doesn't affect the
+ NO state change */
+ mail_cache_view_update_cache_decisions(cache_view, FALSE);
+ mail_cache_add(cache_trans, 2, cache_fields[TEST_FIELD_NO].idx, "bar", 3);
+ mail_cache_view_update_cache_decisions(cache_view, TRUE);
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_NO].idx].field.decision == MAIL_CACHE_DECISION_NO);
+
+ /* add a cache field of each type */
+ for (i = 0; i < TEST_FIELD_COUNT; i++)
+ mail_cache_add(cache_trans, 1, cache_fields[i].idx, "foo", 3);
+ /* quick check before commit that the state is as expected */
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_NO].idx].field.decision == MAIL_CACHE_DECISION_TEMP);
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_NO].idx].decision_dirty);
+ test_assert(ctx.cache->fields[cache_fields[TEST_FIELD_NO].idx].uid_highwater == 1);
+
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ mail_cache_view_close(&cache_view);
+
+ /* verify the state: NO state becomes TEMP, others are unchanged */
+ expected_decisions[TEST_FIELD_NO] = MAIL_CACHE_DECISION_TEMP;
+ test_assert(!ctx.cache->field_header_write_pending);
+ for (i = 0; i < TEST_FIELD_COUNT; i++) {
+ const struct mail_cache_field_private *priv =
+ &ctx.cache->fields[cache_fields[i].idx];
+ test_assert_idx(priv->field.decision == expected_decisions[i], i);
+ test_assert_idx(!priv->decision_dirty, i);
+ uint32_t uid_highwater = priv->uid_highwater;
+ if (i != TEST_FIELD_NO_FORCED)
+ test_assert_idx(uid_highwater == 1, i);
+ else
+ test_assert_idx(uid_highwater == 0, i);
+ }
+
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 0);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+ test_end();
+}
+
+static void test_mail_cache_lookup_decisions_int(bool header_lookups)
+{
+ struct mail_cache_field cache_fields[TEST_FIELD_COUNT];
+ enum mail_cache_decision_type expected_decisions[TEST_FIELD_COUNT];
+ uint32_t expected_uid_highwater[TEST_FIELD_COUNT];
+ time_t expected_last_used[TEST_FIELD_COUNT];
+ struct test_mail_cache_ctx ctx;
+ struct mail_index_transaction *trans;
+ struct mail_cache_view *cache_view;
+ unsigned int i;
+ string_t *str = t_str_new(16);
+
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ /* create the initial mails and the cache file */
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+ test_assert(mail_cache_purge(ctx.cache, (uint32_t)-1, "test") == 0);
+
+ /* register fields after the initial purge create the cache */
+ memcpy(cache_fields, decision_cache_fields, sizeof(cache_fields));
+ mail_cache_register_fields(ctx.cache, cache_fields, TEST_FIELD_COUNT);
+ for (i = 0; i < TEST_FIELD_COUNT; i++) {
+ expected_decisions[i] = cache_fields[i].decision;
+ expected_uid_highwater[i] = 0;
+ }
+
+ /* day_first_uid[7] is used to determine which mails are "old" and
+ which mails are "new". [7] is the first "new" mail. */
+ test_mail_cache_update_day_first_uid7(&ctx, 2);
+
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+
+ /* test that nothing changes when cache decision updates are disabled */
+ mail_cache_view_update_cache_decisions(cache_view, FALSE);
+ for (i = 0; i < TEST_FIELD_COUNT; i++) T_BEGIN {
+ const struct mail_cache_field_private *priv =
+ &ctx.cache->fields[cache_fields[i].idx];
+ if (!header_lookups) {
+ test_assert_idx(mail_cache_lookup_field(cache_view,
+ str, 1, cache_fields[i].idx) == 0, i);
+ } else {
+ /* it's a bit wrong to lookup headers using a STRING
+ type cache field, but this is simpler and at least
+ currently there's no assert for it.. */
+ test_assert_idx(mail_cache_lookup_headers(cache_view,
+ str, 2, &cache_fields[i].idx, 1) == 0, i);
+ }
+ test_assert_idx(priv->field.decision == expected_decisions[i], i);
+ test_assert_idx(!priv->decision_dirty, i);
+ test_assert_idx(priv->uid_highwater == 0, i);
+ test_assert_idx(priv->field.last_used == 0, i);
+ } T_END;
+ test_assert(!ctx.cache->field_header_write_pending);
+ mail_cache_view_update_cache_decisions(cache_view, TRUE);
+
+ /* set cache fields for the first "new" mail (seq/UID 2) */
+ ioloop_time = 123456789;
+ for (i = 0; i < TEST_FIELD_COUNT; i++) T_BEGIN {
+ const struct mail_cache_field_private *priv =
+ &ctx.cache->fields[cache_fields[i].idx];
+
+ time_t prev_last_used = priv->field.last_used;
+ ioloop_time++;
+ if (!header_lookups) {
+ test_assert_idx(mail_cache_lookup_field(cache_view,
+ str, 2, cache_fields[i].idx) == 0, i);
+ } else {
+ test_assert_idx(mail_cache_lookup_headers(cache_view,
+ str, 2, &cache_fields[i].idx, 1) == 0, i);
+ }
+ expected_last_used[i] = ioloop_time;
+ switch (i) {
+ case TEST_FIELD_NO_FORCED:
+ expected_last_used[i] = 0;
+ /* fall through */
+ case TEST_FIELD_NO:
+ /* Note that just doing a cache lookup won't change
+ caching decision. Higher level code needs to figure
+ out itself if it wants the field to become cached.
+ This happens only by calling mail_cache_add(). */
+ break;
+ case TEST_FIELD_TEMP:
+ /* Note that uid_highwater isn't permanently saved to
+ the cache file. It's used only within a single
+ session. */
+ expected_uid_highwater[i] = 2;
+ break;
+ case TEST_FIELD_YES:
+ /* YES decision doesn't change last_used until the
+ cache decision has been confirmed again. */
+ expected_last_used[i] = prev_last_used;
+ expected_uid_highwater[i] = 2;
+ break;
+ }
+ test_assert_idx(priv->field.decision == expected_decisions[i], i);
+ test_assert_idx(priv->uid_highwater == expected_uid_highwater[i], i);
+ test_assert_idx(priv->field.last_used == expected_last_used[i], i);
+ test_assert_idx(!priv->decision_dirty, i);
+ } T_END;
+ test_assert(!ctx.cache->field_header_write_pending);
+
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+
+ /* test that after commit and reopening the decisions are still the
+ same. */
+ test_assert(mail_cache_reopen(ctx.cache) == 1);
+ for (i = 0; i < TEST_FIELD_COUNT; i++) {
+ const struct mail_cache_field_private *priv =
+ &ctx.cache->fields[cache_fields[i].idx];
+ test_assert_idx(priv->field.decision == expected_decisions[i], i);
+ test_assert_idx(priv->uid_highwater == expected_uid_highwater[i], i);
+ test_assert_idx(priv->field.last_used == expected_last_used[i], i);
+ test_assert_idx(!priv->decision_dirty, i);
+ }
+
+ /* update the day_first_uid so all mails are now "old" */
+ test_mail_cache_update_day_first_uid7(&ctx, 4);
+
+ for (uint32_t seq = 2; seq >= 1; seq--) {
+ /* Reading a 3rd mail, which is also now "old". It causes
+ TEMP -> YES cache decision (don't read backwards yet,
+ that's a separate test). */
+ expected_decisions[TEST_FIELD_TEMP] = MAIL_CACHE_DECISION_YES;
+ for (i = 0; i < TEST_FIELD_COUNT; i++) T_BEGIN {
+ const struct mail_cache_field_private *priv =
+ &ctx.cache->fields[cache_fields[i].idx];
+
+ /* Keep increasing ioloop_time just to make sure that
+ last_used doesn't change. (It changes only once per
+ 24h) */
+ ioloop_time++;
+ if (!header_lookups) {
+ test_assert_idx(mail_cache_lookup_field(
+ cache_view, str, seq,
+ cache_fields[i].idx) == 0, i);
+ } else {
+ test_assert_idx(mail_cache_lookup_headers(
+ cache_view, str, seq,
+ &cache_fields[i].idx, 1) == 0, i);
+ }
+ if (i == TEST_FIELD_YES && seq == 2) {
+ /* YES decision is confirmed now. The last_used
+ timestamp was updated for the first old
+ mail. */
+ expected_last_used[i] = ioloop_time;
+ }
+ test_assert_idx(priv->field.decision == expected_decisions[i], i);
+ test_assert_idx(priv->uid_highwater == expected_uid_highwater[i], i);
+ test_assert_idx(priv->field.last_used == expected_last_used[i], i);
+ test_assert_idx(priv->decision_dirty == (i == TEST_FIELD_TEMP), i);
+ } T_END;
+ /* restore caching decision */
+ ctx.cache->fields[cache_fields[TEST_FIELD_TEMP].idx].field.decision =
+ MAIL_CACHE_DECISION_TEMP;
+ /* reading mails backwards also causes TEMP -> YES cache
+ decision, even if all mails are "new" */
+ test_mail_cache_update_day_first_uid7(&ctx, 1);
+ }
+
+ test_assert(test_mail_cache_get_purge_count(&ctx) == 0);
+ mail_cache_view_close(&cache_view);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+}
+
+static void test_mail_cache_lookup_decisions(void)
+{
+ test_begin("mail cache lookup decisions");
+ test_mail_cache_lookup_decisions_int(FALSE);
+ test_end();
+}
+
+static void test_mail_cache_lookup_decisions2(void)
+{
+ test_begin("mail cache lookup decisions (2)");
+ test_mail_cache_lookup_decisions_int(TRUE);
+ test_end();
+}
+
+static void test_mail_cache_in_memory(void)
+{
+ const struct mail_index_optimization_settings optimization_set = {
+ .cache = {
+ .record_max_size = MAIL_CACHE_MAX_WRITE_BUFFER*2,
+ },
+ };
+ struct test_mail_cache_ctx ctx;
+ struct mail_index *index;
+ struct mail_index_transaction *trans;
+ struct mail_cache_view *cache_view;
+ struct mail_cache_transaction_ctx *cache_trans;
+
+ test_begin("mail cache add in-memory");
+
+ index = mail_index_alloc(NULL, NULL, "(in-memory)");
+ test_assert(mail_index_open_or_create(index, MAIL_INDEX_OPEN_FLAG_CREATE) == 0);
+ test_mail_cache_init(index, &ctx);
+ mail_index_set_optimization_settings(ctx.index, &optimization_set);
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+ test_mail_cache_add_mail(&ctx, UINT_MAX, NULL);
+
+ trans = mail_index_transaction_begin(ctx.view, 0);
+ cache_trans = mail_cache_get_transaction(cache_view, trans);
+
+ size_t blob_size = 1024*130;
+ char *blob = i_malloc(blob_size);
+ memset(blob, 'x', blob_size);
+ mail_cache_add(cache_trans, 1, ctx.cache_field.idx, blob, blob_size);
+ mail_cache_add(cache_trans, 1, ctx.cache_field2.idx, "foo1", 4);
+ mail_cache_add(cache_trans, 2, ctx.cache_field2.idx, "foo2", 4);
+
+ /* all fields are still available */
+ string_t *str = str_new(default_pool, blob_size + 1024);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field.idx) == 1);
+ test_assert(str_len(str) == blob_size);
+ str_truncate(str, 0);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field2.idx) == 1);
+ test_assert_strcmp(str_c(str), "foo1");
+ str_truncate(str, 0);
+ test_assert(mail_cache_lookup_field(cache_view, str, 2,
+ ctx.cache_field2.idx) == 1);
+ test_assert_strcmp(str_c(str), "foo2");
+
+ /* adding a second blob grows memory usage beyond
+ MAIL_CACHE_MAX_WRITE_BUFFER and frees the first cached mail
+ entirely (although in theory it could drop just the big blob) */
+ mail_cache_add(cache_trans, 2, ctx.cache_field.idx, blob, blob_size);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field.idx) == 0);
+ test_assert(mail_cache_lookup_field(cache_view, str, 1,
+ ctx.cache_field2.idx) == 0);
+ str_truncate(str, 0);
+ test_assert(mail_cache_lookup_field(cache_view, str, 2,
+ ctx.cache_field.idx) == 1);
+ test_assert(str_len(str) == blob_size);
+ str_truncate(str, 0);
+ test_assert(mail_cache_lookup_field(cache_view, str, 2,
+ ctx.cache_field2.idx) == 1);
+ test_assert_strcmp(str_c(str), "foo2");
+
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+
+ str_free(&str);
+ i_free(blob);
+
+ mail_cache_view_close(&cache_view);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+ test_end();
+}
+
+static void test_mail_cache_size_corruption(void)
+{
+ struct test_mail_cache_ctx ctx;
+ struct mail_cache_view *cache_view;
+ struct mail_cache_lookup_iterate_ctx iter;
+ struct mail_cache_iterate_field field;
+
+ test_begin("mail cache size corruption");
+
+ test_mail_cache_init(test_mail_index_init(), &ctx);
+ test_mail_cache_add_mail(&ctx, ctx.cache_field.idx, "12345678");
+ cache_view = mail_cache_view_open(ctx.cache, ctx.view);
+
+ /* lookup the added cache field */
+ mail_cache_lookup_iter_init(cache_view, 1, &iter);
+ test_assert(iter.offset > 0);
+
+ uoff_t size_offset = iter.offset +
+ offsetof(struct mail_cache_record, size);
+ uint32_t new_size = 0x10000000;
+ test_assert(pwrite_full(ctx.cache->fd, &new_size, sizeof(new_size),
+ size_offset) == 0);
+ test_expect_error_string("record points outside file");
+ test_assert(mail_cache_lookup_iter_next(&iter, &field) == -1);
+ test_expect_no_more_errors();
+
+ mail_cache_view_close(&cache_view);
+ test_mail_cache_deinit(&ctx);
+ test_mail_index_delete();
+ test_end();
+}
+
+int main(void)
+{
+ static void (*const test_functions[])(void) = {
+ test_mail_cache_fields,
+ test_mail_cache_record_max_size,
+ test_mail_cache_record_max_size2,
+ test_mail_cache_record_max_size3,
+ test_mail_cache_record_max_size4,
+ test_mail_cache_add_decisions,
+ test_mail_cache_lookup_decisions,
+ test_mail_cache_lookup_decisions2,
+ test_mail_cache_in_memory,
+ test_mail_cache_size_corruption,
+ NULL
+ };
+ return test_run(test_functions);
+}
diff --git a/src/lib-index/test-mail-cache.h b/src/lib-index/test-mail-cache.h
new file mode 100644
index 0000000..2a274b5
--- /dev/null
+++ b/src/lib-index/test-mail-cache.h
@@ -0,0 +1,32 @@
+#ifndef TEST_MAIL_CACHE_H
+#define TEST_MAIL_CACHE_H
+
+#include "test-mail-index.h"
+#include "mail-cache-private.h"
+
+struct test_mail_cache_ctx {
+ struct mail_index *index;
+ struct mail_cache *cache;
+ struct mail_index_view *view;
+
+ struct mail_cache_field cache_field, cache_field2, cache_field3;
+};
+
+void test_mail_cache_init(struct mail_index *index,
+ struct test_mail_cache_ctx *ctx_r);
+void test_mail_cache_deinit(struct test_mail_cache_ctx *ctx);
+
+unsigned int test_mail_cache_get_purge_count(struct test_mail_cache_ctx *ctx);
+void test_mail_cache_index_sync(struct test_mail_cache_ctx *ctx);
+void test_mail_cache_view_sync(struct test_mail_cache_ctx *ctx);
+void test_mail_cache_purge(void);
+void test_mail_cache_add_mail(struct test_mail_cache_ctx *ctx,
+ unsigned int cache_field_idx,
+ const char *cache_data);
+void test_mail_cache_add_field(struct test_mail_cache_ctx *ctx, uint32_t seq,
+ unsigned int cache_field_idx,
+ const char *cache_data);
+void test_mail_cache_update_day_first_uid7(struct test_mail_cache_ctx *ctx,
+ uint32_t first_new_uid);
+
+#endif
diff --git a/src/lib-index/test-mail-index-map.c b/src/lib-index/test-mail-index-map.c
new file mode 100644
index 0000000..0b0b3ad
--- /dev/null
+++ b/src/lib-index/test-mail-index-map.c
@@ -0,0 +1,57 @@
+/* Copyright (c) 2016-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "test-common.h"
+#include "mail-index-private.h"
+#include "mail-index-modseq.h"
+#include "mail-index-transaction-private.h"
+
+static void test_mail_index_map_lookup_seq_range_count(unsigned int messages_count)
+{
+ struct mail_index_record_map rec_map;
+ struct mail_index_map map;
+ uint32_t seq, first_uid, last_uid, first_seq, last_seq, max_uid;
+
+ i_zero(&map);
+ i_zero(&rec_map);
+ map.rec_map = &rec_map;
+ map.hdr.messages_count = messages_count;
+ map.hdr.record_size = sizeof(struct mail_index_record);
+ rec_map.records_count = map.hdr.messages_count;
+ rec_map.records = i_new(struct mail_index_record, map.hdr.messages_count);
+
+ for (seq = 1; seq <= map.hdr.messages_count; seq++)
+ MAIL_INDEX_REC_AT_SEQ(&map, seq)->uid = seq*2;
+ max_uid = (seq-1)*2;
+ map.hdr.next_uid = max_uid + 1;
+
+ for (first_uid = 2; first_uid <= max_uid; first_uid++) {
+ for (last_uid = first_uid; last_uid <= max_uid; last_uid++) {
+ if (first_uid == last_uid && first_uid%2 != 0)
+ continue;
+ mail_index_map_lookup_seq_range(&map, first_uid, last_uid, &first_seq, &last_seq);
+ test_assert((first_uid+1)/2 == first_seq && last_uid/2 == last_seq);
+ }
+ }
+ i_free(rec_map.records);
+}
+
+static void test_mail_index_map_lookup_seq_range(void)
+{
+ unsigned int i;
+
+ test_begin("mail index map lookup seq range");
+ for (i = 1; i < 20; i++)
+ test_mail_index_map_lookup_seq_range_count(i);
+ test_end();
+}
+
+int main(void)
+{
+ static void (*const test_functions[])(void) = {
+ test_mail_index_map_lookup_seq_range,
+ NULL
+ };
+ return test_run(test_functions);
+}
diff --git a/src/lib-index/test-mail-index-modseq.c b/src/lib-index/test-mail-index-modseq.c
new file mode 100644
index 0000000..4a2d524
--- /dev/null
+++ b/src/lib-index/test-mail-index-modseq.c
@@ -0,0 +1,77 @@
+/* Copyright (c) 2016-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "test-common.h"
+#include "test-mail-index.h"
+#include "mail-index-modseq.h"
+#include "mail-transaction-log-private.h"
+
+static void test_mail_index_modseq_get_next_log_offset(void)
+{
+ static const struct {
+ uint32_t log_seq;
+ uoff_t log_offset;
+ } tests[] = {
+ { 0, 0 },
+ { 2, 40 },
+ { 2, 148 },
+ { 2, 164 },
+ { 3, 40 },
+ { 3, 56 },
+ { 3, 72 },
+ { 3, 88 },
+ };
+ struct mail_index *index;
+ struct mail_index_view *view, *view2;
+ struct mail_index_transaction *trans;
+ uint32_t seq, uid;
+
+ test_begin("mail_transaction_log_file_get_modseq_next_offset()");
+ index = test_mail_index_init();
+ view = mail_index_view_open(index);
+ mail_index_modseq_enable(index);
+
+ trans = mail_index_transaction_begin(view, 0);
+ uid = 1234;
+ mail_index_update_header(trans,
+ offsetof(struct mail_index_header, uid_validity),
+ &uid, sizeof(uid), TRUE);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+
+ for (uid = 1; uid <= 3; uid++) {
+ trans = mail_index_transaction_begin(view, 0);
+ mail_index_append(trans, uid, &seq);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ }
+ test_assert(mail_transaction_log_file_lock(index->log->head) == 0);
+ test_assert(mail_transaction_log_rotate(index->log, FALSE) == 0);
+ mail_transaction_log_file_unlock(index->log->head, "rotating");
+ for (uid = 4; uid <= 6; uid++) {
+ trans = mail_index_transaction_begin(view, 0);
+ mail_index_append(trans, uid, &seq);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ }
+
+ view2 = mail_index_view_open(index);
+ for (uint64_t modseq = 1; modseq <= 7; modseq++) {
+ uint32_t log_seq = 0;
+ uoff_t log_offset;
+
+ test_assert_idx(mail_index_modseq_get_next_log_offset(view2, modseq, &log_seq, &log_offset) == (tests[modseq].log_seq != 0), modseq);
+ test_assert_idx(tests[modseq].log_seq == log_seq && tests[modseq].log_offset == log_offset, modseq);
+ }
+
+ mail_index_view_close(&view);
+ mail_index_view_close(&view2);
+ test_mail_index_deinit(&index);
+ test_end();
+}
+
+int main(void)
+{
+ static void (*const test_functions[])(void) = {
+ test_mail_index_modseq_get_next_log_offset,
+ NULL
+ };
+ return test_run(test_functions);
+}
diff --git a/src/lib-index/test-mail-index-sync-ext.c b/src/lib-index/test-mail-index-sync-ext.c
new file mode 100644
index 0000000..156d50f
--- /dev/null
+++ b/src/lib-index/test-mail-index-sync-ext.c
@@ -0,0 +1,86 @@
+/* Copyright (c) 2010-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "test-common.h"
+#include "mail-transaction-log-view-private.h"
+#include "mail-index-sync-private.h"
+#include "mail-index-modseq.h"
+
+static void test_lookup_seq_range(struct mail_index_view *view ATTR_UNUSED,
+ uint32_t first_uid, uint32_t last_uid,
+ uint32_t *first_seq_r, uint32_t *last_seq_r)
+{
+ *first_seq_r = first_uid;
+ *last_seq_r = last_uid;
+}
+
+static void test_mail_index_sync_ext_atomic_inc(void)
+{
+ struct mail_index_sync_map_ctx ctx;
+ struct mail_transaction_ext_atomic_inc u;
+ struct mail_index_ext *ext;
+ void *ptr;
+
+ test_begin("mail index sync ext atomic inc");
+
+ i_zero(&ctx);
+ ctx.view = t_new(struct mail_index_view, 1);
+ ctx.view->log_view = t_new(struct mail_transaction_log_view, 1);
+ ctx.view->index = t_new(struct mail_index, 1);
+ ctx.view->index->fsck_log_head_file_seq = 10; /* silence errors */
+ ctx.view->v.lookup_seq_range = test_lookup_seq_range;
+ ctx.view->map = t_new(struct mail_index_map, 1);
+ ctx.view->map->hdr.next_uid = 2;
+ ctx.view->map->hdr.record_size = sizeof(struct mail_index_record) + 16;
+ ctx.view->map->rec_map = t_new(struct mail_index_record_map, 1);
+ ctx.view->map->rec_map->records =
+ t_malloc0(ctx.view->map->hdr.record_size);
+ t_array_init(&ctx.view->map->extensions, 4);
+ ext = array_append_space(&ctx.view->map->extensions);
+ ext->record_offset = sizeof(struct mail_index_record);
+ ptr = PTR_OFFSET(ctx.view->map->rec_map->records, ext->record_offset);
+
+ i_zero(&u);
+ test_assert(mail_index_sync_ext_atomic_inc(&ctx, &u) == -1);
+
+ u.uid = 2;
+ test_assert(mail_index_sync_ext_atomic_inc(&ctx, &u) == -1);
+
+ u.uid = 1;
+#define TEST_ATOMIC(_type, _value, _diff, _ret) \
+ { _type *n = ptr; *n = _value; } \
+ ctx.cur_ext_record_size = sizeof(_type); \
+ u.diff = _diff; \
+ test_assert(mail_index_sync_ext_atomic_inc(&ctx, &u) == _ret);
+
+#define TEST_ATOMIC_BLOCK(_type, _max) \
+ TEST_ATOMIC(_type, 1, -1, 1); \
+ TEST_ATOMIC(_type, 1, -2, -1); \
+ TEST_ATOMIC(_type, 0, -1, -1); \
+ TEST_ATOMIC(_type, 0, _max, 1); \
+ TEST_ATOMIC(_type, 1, _max, -1); \
+ TEST_ATOMIC(_type, 0, (_max+1), -1); \
+ TEST_ATOMIC(_type, _max, 1, -1); \
+ TEST_ATOMIC(_type, _max, -_max, 1); \
+ TEST_ATOMIC(_type, _max, -(_max+1), -1);
+
+ TEST_ATOMIC_BLOCK(uint8_t, 255);
+ TEST_ATOMIC_BLOCK(uint16_t, 65535);
+
+ ctx.cur_ext_record_size = 5;
+ u.diff = 0;
+ test_assert(mail_index_sync_ext_atomic_inc(&ctx, &u) == -1);
+
+ i_free(ctx.view->index->need_recreate);
+ test_end();
+}
+
+int main(void)
+{
+ static void (*const test_functions[])(void) = {
+ test_mail_index_sync_ext_atomic_inc,
+ NULL
+ };
+ return test_run(test_functions);
+}
diff --git a/src/lib-index/test-mail-index-transaction-finish.c b/src/lib-index/test-mail-index-transaction-finish.c
new file mode 100644
index 0000000..e32467e
--- /dev/null
+++ b/src/lib-index/test-mail-index-transaction-finish.c
@@ -0,0 +1,297 @@
+/* Copyright (c) 2009-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "test-common.h"
+#include "mail-index-private.h"
+#include "mail-index-modseq.h"
+#include "mail-index-transaction-private.h"
+
+
+static struct mail_index_record recs[20];
+static uint64_t modseqs[N_ELEMENTS(recs)];
+
+bool mail_index_map_get_ext_idx(struct mail_index_map *map ATTR_UNUSED,
+ uint32_t ext_id ATTR_UNUSED,
+ uint32_t *idx_r ATTR_UNUSED) { return FALSE; }
+void mail_index_ext_set_reset_id(struct mail_index_transaction *t ATTR_UNUSED,
+ uint32_t ext_id ATTR_UNUSED,
+ uint32_t reset_id ATTR_UNUSED) { }
+void mail_index_transaction_set_log_updates(struct mail_index_transaction *t ATTR_UNUSED) { }
+void mail_index_update_day_headers(struct mail_index_transaction *t ATTR_UNUSED, time_t day_stamp ATTR_UNUSED) {}
+bool mail_index_cancel_flag_updates(struct mail_index_transaction *t ATTR_UNUSED,
+ uint32_t seq ATTR_UNUSED) { return TRUE; }
+bool mail_index_cancel_keyword_updates(struct mail_index_transaction *t ATTR_UNUSED,
+ uint32_t seq ATTR_UNUSED) { return TRUE; }
+void mail_index_transaction_sort_appends(struct mail_index_transaction *t ATTR_UNUSED) {}
+int mail_index_map(struct mail_index *index ATTR_UNUSED,
+ enum mail_index_sync_handler_type type ATTR_UNUSED) { return 1; }
+void mail_index_update_modseq(struct mail_index_transaction *t ATTR_UNUSED, uint32_t seq ATTR_UNUSED,
+ uint64_t min_modseq ATTR_UNUSED) {}
+
+const struct mail_index_record *
+mail_index_lookup(struct mail_index_view *view ATTR_UNUSED, uint32_t seq)
+{
+ i_assert(seq < N_ELEMENTS(recs));
+ return &recs[seq];
+}
+
+struct mail_index_record *
+mail_index_transaction_lookup(struct mail_index_transaction *t ATTR_UNUSED,
+ uint32_t seq)
+{
+ i_assert(seq < N_ELEMENTS(recs));
+ return &recs[seq];
+}
+
+uint64_t mail_index_modseq_lookup(struct mail_index_view *view ATTR_UNUSED,
+ uint32_t seq)
+{
+ i_assert(seq < N_ELEMENTS(modseqs));
+ return modseqs[seq];
+}
+
+uint64_t mail_index_modseq_get_highest(struct mail_index_view *view ATTR_UNUSED)
+{
+ return modseqs[0];
+}
+
+#define MAIL_INDEX_TRANSACTION_FINISH(t, n_so_far) \
+ for (unsigned int sofar = 0; sofar < n_so_far; sofar++) \
+ mail_index_transaction_finish_so_far(t); \
+ mail_index_transaction_finish(t);
+
+static void
+test_mail_index_transaction_finish_flag_updates(unsigned int n_so_far)
+{
+ struct mail_index_transaction *t;
+ const struct mail_index_flag_update *updates;
+ struct mail_index_flag_update u;
+ unsigned int count;
+
+ t = t_new(struct mail_index_transaction, 1);
+ t->drop_unnecessary_flag_updates = TRUE;
+
+ i_zero(&u);
+ u.add_flags = MAIL_SEEN; u.remove_flags = MAIL_DRAFT;
+
+ test_begin(t_strdup_printf("mail index transaction finish flag updates n_so_far=%u", n_so_far));
+
+ /* test fast path: all changed */
+ t_array_init(&t->updates, 10);
+ u.uid1 = 1; u.uid2 = 2;
+ array_push_back(&t->updates, &u);
+ u.uid1 = 4; u.uid2 = 5;
+ array_push_back(&t->updates, &u);
+ MAIL_INDEX_TRANSACTION_FINISH(t, n_so_far);
+
+ updates = array_get(&t->updates, &count);
+ test_assert(count == 4);
+ test_assert(updates[0].uid1 == 1*2 && updates[0].uid2 == 1*2);
+ test_assert(updates[1].uid1 == 2*2 && updates[1].uid2 == 2*2);
+ test_assert(updates[2].uid1 == 4*2 && updates[2].uid2 == 4*2);
+ test_assert(updates[3].uid1 == 5*2 && updates[3].uid2 == 5*2);
+
+ /* nothing changed */
+ t_array_init(&t->updates, 10);
+ u.uid1 = 1; u.uid2 = 2;
+ array_push_back(&t->updates, &u);
+ u.uid1 = 4; u.uid2 = 5;
+ array_push_back(&t->updates, &u);
+ recs[1].flags = MAIL_SEEN;
+ recs[2].flags = MAIL_SEEN;
+ recs[4].flags = MAIL_SEEN;
+ recs[5].flags = MAIL_SEEN;
+ MAIL_INDEX_TRANSACTION_FINISH(t, n_so_far);
+ test_assert(!array_is_created(&t->updates));
+
+ /* some changes */
+ t_array_init(&t->updates, 10);
+ u.uid1 = 2; u.uid2 = 3;
+ array_push_back(&t->updates, &u);
+ u.uid1 = 5; u.uid2 = 6;
+ array_push_back(&t->updates, &u);
+ MAIL_INDEX_TRANSACTION_FINISH(t, n_so_far);
+
+ updates = array_get(&t->updates, &count);
+ test_assert(count == 2);
+ test_assert(updates[0].uid1 == 3*2 && updates[0].uid2 == 3*2);
+ test_assert(updates[1].uid1 == 6*2 && updates[1].uid2 == 6*2);
+
+ test_end();
+}
+
+static void
+test_mail_index_transaction_finish_check_conflicts(unsigned int n_so_far)
+{
+ struct mail_index_transaction *t;
+ const struct seq_range *conflicts;
+ ARRAY_TYPE(seq_range) conflict_seqs = ARRAY_INIT;
+ unsigned int count;
+
+ t = t_new(struct mail_index_transaction, 1);
+ t->view = t_new(struct mail_index_view, 1);
+ t->min_flagupdate_seq = 5;
+ t->max_flagupdate_seq = 8;
+ t->conflict_seqs = &conflict_seqs;
+
+ modseqs[0] = 1234;
+ modseqs[5] = 5;
+ modseqs[6] = 8;
+ modseqs[7] = 6;
+ modseqs[8] = 7;
+
+ test_begin(t_strdup_printf("mail index transaction finish check conflicts n_so_far=%u", n_so_far));
+
+ /* fast path: no conflicts */
+ t->max_modseq = 1234;
+ MAIL_INDEX_TRANSACTION_FINISH(t, n_so_far);
+ test_assert(!array_is_created(&conflict_seqs));
+
+ /* try some conflicts */
+ t->max_modseq = 6;
+ MAIL_INDEX_TRANSACTION_FINISH(t, n_so_far);
+
+ i_assert(array_is_created(&conflict_seqs));
+
+ conflicts = array_get(&conflict_seqs, &count);
+ test_assert(count == 2);
+ test_assert(conflicts[0].seq1 == 6 && conflicts[0].seq2 == 6);
+ test_assert(conflicts[1].seq1 == 8 && conflicts[1].seq2 == 8);
+
+ test_end();
+ array_free(t->conflict_seqs);
+}
+
+static void
+test_mail_index_transaction_finish_modseq_updates(unsigned int n_so_far)
+{
+ struct mail_index_transaction *t;
+ const struct mail_transaction_modseq_update *ups;
+ struct mail_transaction_modseq_update u;
+ unsigned int count;
+
+ t = t_new(struct mail_index_transaction, 1);
+
+ test_begin(t_strdup_printf("mail index transaction finish modseq updates n_so_far=%u", n_so_far));
+
+ t_array_init(&t->modseq_updates, 10);
+ u.modseq_low32 = 1234567890;
+ u.modseq_high32 = 987654321;
+ u.uid = 1; array_push_back(&t->modseq_updates, &u);
+ u.modseq_low32++;
+ u.modseq_high32++;
+ u.uid = 2; array_push_back(&t->modseq_updates, &u);
+ u.modseq_low32++;
+ u.modseq_high32++;
+ u.uid = 5; array_push_back(&t->modseq_updates, &u);
+ u.modseq_low32 = 1234;
+ u.modseq_high32 = 0;
+ u.uid = 2; array_push_back(&t->modseq_updates, &u);
+
+ MAIL_INDEX_TRANSACTION_FINISH(t, n_so_far);
+
+ ups = array_get(&t->modseq_updates, &count);
+ test_assert(count == 4);
+
+ test_assert(ups[0].uid == 1*2);
+ test_assert(ups[0].modseq_low32 == 1234567890 &&
+ ups[0].modseq_high32 == 987654321);
+ test_assert(ups[1].uid == 2*2);
+ test_assert(ups[1].modseq_low32 == 1234567891 &&
+ ups[1].modseq_high32 == 987654322);
+ test_assert(ups[2].uid == 5*2);
+ test_assert(ups[2].modseq_low32 == 1234567892 &&
+ ups[2].modseq_high32 == 987654323);
+ test_assert(ups[3].uid == 2*2);
+ test_assert(ups[3].modseq_low32 == 1234 &&
+ ups[3].modseq_high32 == 0);
+ test_end();
+}
+
+static void
+test_mail_index_transaction_finish_expunges(unsigned int n_so_far)
+{
+ struct mail_index_transaction *t;
+ guid_128_t guid1, guid2, guid3;
+ const struct mail_transaction_expunge_guid *expunges;
+ struct mail_transaction_expunge_guid expunge;
+ unsigned int i, count;
+
+ for (i = 0; i < sizeof(guid2); i++) {
+ guid1[i] = i + 1;
+ guid2[i] = i ^ 0xff;
+ guid3[i] = i + 0x80;
+ }
+
+ recs[1].uid = 12;
+ recs[2].uid = 15;
+ recs[3].uid = 18;
+
+ t = t_new(struct mail_index_transaction, 1);
+ t->expunges_nonsorted = TRUE;
+
+ test_begin(t_strdup_printf("mail index transaction finish expunges n_so_far=%u", n_so_far));
+
+ t_array_init(&t->expunges, 3);
+ expunge.uid = 2;
+ memcpy(expunge.guid_128, guid2, sizeof(expunge.guid_128));
+ array_push_back(&t->expunges, &expunge);
+ array_push_back(&t->expunges, &expunge);
+ expunge.uid = 1;
+ memcpy(expunge.guid_128, guid1, sizeof(expunge.guid_128));
+ array_push_back(&t->expunges, &expunge);
+ array_push_back(&t->expunges, &expunge);
+ expunge.uid = 3;
+ memcpy(expunge.guid_128, guid3, sizeof(expunge.guid_128));
+ array_push_back(&t->expunges, &expunge);
+ array_push_back(&t->expunges, &expunge);
+
+ MAIL_INDEX_TRANSACTION_FINISH(t, n_so_far);
+
+ expunges = array_get(&t->expunges, &count);
+ test_assert(count == 3);
+ test_assert(expunges[0].uid == 12);
+ test_assert(memcmp(expunges[0].guid_128, guid1, sizeof(guid1)) == 0);
+ test_assert(expunges[1].uid == 15);
+ test_assert(memcmp(expunges[1].guid_128, guid2, sizeof(guid2)) == 0);
+ test_assert(expunges[2].uid == 18);
+ test_assert(memcmp(expunges[2].guid_128, guid3, sizeof(guid3)) == 0);
+ test_end();
+}
+
+static void test_state_reset(void)
+{
+ memset(recs, 0, sizeof(recs));
+ memset(modseqs, 0, sizeof(modseqs));
+ for (unsigned int n = 1; n < N_ELEMENTS(recs); n++)
+ recs[n].uid = n*2;
+}
+
+static void test_mail_index_transaction_finish(void)
+{
+ void (*const test_finish_functions[])(unsigned int) = {
+ test_mail_index_transaction_finish_flag_updates,
+ test_mail_index_transaction_finish_check_conflicts,
+ test_mail_index_transaction_finish_modseq_updates,
+ test_mail_index_transaction_finish_expunges,
+ };
+ unsigned int i, j;
+
+ for (i = 0; i < N_ELEMENTS(test_finish_functions); i++) {
+ for (j = 0; j < 3; j++) {
+ test_state_reset();
+ test_finish_functions[i](j);
+ }
+ }
+}
+
+int main(void)
+{
+ static void (*const test_functions[])(void) = {
+ test_mail_index_transaction_finish,
+ NULL
+ };
+
+ return test_run(test_functions);
+}
diff --git a/src/lib-index/test-mail-index-transaction-update.c b/src/lib-index/test-mail-index-transaction-update.c
new file mode 100644
index 0000000..cdfa951
--- /dev/null
+++ b/src/lib-index/test-mail-index-transaction-update.c
@@ -0,0 +1,683 @@
+/* Copyright (c) 2009-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "env-util.h"
+#include "test-common.h"
+#include "mail-index-private.h"
+#include "mail-index-transaction-private.h"
+
+#include <time.h>
+
+static struct mail_index_header hdr;
+static struct mail_index_record rec;
+
+const struct mail_index_header *
+mail_index_get_header(struct mail_index_view *view ATTR_UNUSED)
+{
+ return &hdr;
+}
+
+const struct mail_index_record *
+mail_index_lookup(struct mail_index_view *view ATTR_UNUSED,
+ uint32_t seq ATTR_UNUSED)
+{
+ return &rec;
+}
+
+void mail_index_lookup_keywords(struct mail_index_view *view ATTR_UNUSED,
+ uint32_t seq ATTR_UNUSED,
+ ARRAY_TYPE(keyword_indexes) *keyword_idx ATTR_UNUSED)
+{
+ array_clear(keyword_idx);
+}
+
+bool mail_index_map_get_ext_idx(struct mail_index_map *map ATTR_UNUSED,
+ uint32_t ext_id ATTR_UNUSED,
+ uint32_t *idx_r ATTR_UNUSED)
+{
+ return FALSE;
+}
+
+uint32_t mail_index_view_get_messages_count(struct mail_index_view *view ATTR_UNUSED)
+{
+ return hdr.messages_count;
+}
+
+void mail_index_transaction_lookup_latest_keywords(struct mail_index_transaction *t ATTR_UNUSED,
+ uint32_t seq ATTR_UNUSED,
+ ARRAY_TYPE(keyword_indexes) *keywords ATTR_UNUSED)
+{
+}
+
+struct mail_keywords *
+mail_index_keywords_create_from_indexes(struct mail_index *index ATTR_UNUSED,
+ const ARRAY_TYPE(keyword_indexes)
+ *keyword_indexes ATTR_UNUSED)
+{
+ return NULL;
+}
+
+void mail_index_keywords_unref(struct mail_keywords **keywords ATTR_UNUSED)
+{
+}
+
+static struct mail_index_transaction *
+mail_index_transaction_new(void)
+{
+ struct mail_index_transaction *t;
+
+ t = t_new(struct mail_index_transaction, 1);
+ t->first_new_seq = hdr.messages_count + 1;
+ return t;
+}
+static void mail_index_transaction_cleanup(struct mail_index_transaction *t)
+{
+ if (array_is_created(&t->appends))
+ array_free(&t->appends);
+ if (array_is_created(&t->updates))
+ array_free(&t->updates);
+ if (array_is_created(&t->modseq_updates))
+ array_free(&t->modseq_updates);
+ if (array_is_created(&t->expunges))
+ array_free(&t->expunges);
+}
+
+static void test_mail_index_append(void)
+{
+ struct mail_index_transaction *t;
+ const struct mail_index_record *appends;
+ ARRAY_TYPE(seq_range) saved_uids_arr;
+ const struct seq_range *saved_uids;
+ unsigned int count;
+ uint32_t seq;
+
+ hdr.messages_count = 4;
+ t = mail_index_transaction_new();
+
+ test_begin("mail index append");
+ mail_index_append(t, 0, &seq);
+ test_assert(t->log_updates);
+ test_assert(seq == 5);
+ mail_index_append(t, 0, &seq);
+ test_assert(seq == 6);
+ test_assert(!t->appends_nonsorted);
+
+ t_array_init(&saved_uids_arr, 128);
+ mail_index_append_finish_uids(t, 123, &saved_uids_arr);
+ saved_uids = array_get(&saved_uids_arr, &count);
+ test_assert(count == 1);
+ test_assert(saved_uids[0].seq1 == 123 && saved_uids[0].seq2 == 124);
+
+ appends = array_get(&t->appends, &count);
+ test_assert(appends[0].uid == 123);
+ test_assert(appends[0].flags == 0);
+ test_assert(appends[1].uid == 124);
+ test_assert(appends[1].flags == 0);
+ test_end();
+ mail_index_transaction_cleanup(t);
+
+ /* test with some uids */
+ t = mail_index_transaction_new();
+
+ test_begin("mail index append with uids");
+ mail_index_append(t, 0, &seq);
+ test_assert(seq == 5);
+ mail_index_append(t, 126, &seq);
+ test_assert(seq == 6);
+ test_assert(!t->appends_nonsorted);
+ mail_index_append(t, 124, &seq);
+ test_assert(seq == 7);
+ test_assert(t->appends_nonsorted);
+ mail_index_append(t, 0, &seq);
+ test_assert(seq == 8);
+ mail_index_append(t, 128, &seq);
+ test_assert(seq == 9);
+ test_assert(t->highest_append_uid == 128);
+
+ mail_index_append_finish_uids(t, 125, &saved_uids_arr);
+ saved_uids = array_get(&saved_uids_arr, &count);
+ test_assert(count == 4);
+ test_assert(saved_uids[0].seq1 == 129 && saved_uids[0].seq2 == 129);
+ test_assert(saved_uids[1].seq1 == 126 && saved_uids[1].seq2 == 126);
+ test_assert(saved_uids[2].seq1 == 130 && saved_uids[2].seq2 == 131);
+ test_assert(saved_uids[3].seq1 == 128 && saved_uids[3].seq2 == 128);
+
+ appends = array_get(&t->appends, &count);
+ test_assert(count == 5);
+ test_assert(appends[0].uid == 129);
+ test_assert(appends[1].uid == 126);
+ test_assert(appends[2].uid == 130);
+ test_assert(appends[3].uid == 131);
+ test_assert(appends[4].uid == 128);
+ test_end();
+
+ mail_index_transaction_cleanup(t);
+}
+
+static void test_mail_index_flag_update_fastpath(void)
+{
+ struct mail_index_transaction *t;
+ const struct mail_index_flag_update *updates;
+ unsigned int count;
+
+ hdr.messages_count = 20;
+ t = mail_index_transaction_new();
+
+ test_begin("mail index flag update fast paths");
+
+ mail_index_update_flags_range(t, 13, 14, MODIFY_REPLACE,
+ MAIL_DELETED);
+ test_assert(t->last_update_idx == 0);
+ test_assert(array_count(&t->updates) == 1);
+
+ mail_index_update_flags_range(t, 15, 15, MODIFY_REPLACE,
+ MAIL_DELETED);
+ test_assert(t->last_update_idx == 0);
+ test_assert(array_count(&t->updates) == 1);
+
+ mail_index_update_flags_range(t, 16, 16, MODIFY_ADD,
+ MAIL_DELETED);
+ test_assert(t->last_update_idx == 1);
+ test_assert(array_count(&t->updates) == 2);
+
+ updates = array_get(&t->updates, &count);
+ test_assert(updates[0].uid1 == 13);
+ test_assert(updates[0].uid2 == 15);
+ test_assert(updates[0].add_flags == MAIL_DELETED);
+ test_assert(updates[0].remove_flags ==
+ (MAIL_ANSWERED | MAIL_FLAGGED | MAIL_SEEN | MAIL_DRAFT));
+ test_assert(updates[1].uid1 == 16);
+ test_assert(updates[1].uid2 == 16);
+ test_assert(updates[1].add_flags == MAIL_DELETED);
+ test_assert(updates[1].remove_flags == 0);
+ test_assert(!t->log_updates);
+ test_end();
+
+ mail_index_transaction_cleanup(t);
+}
+
+static void test_mail_index_flag_update_simple_merges(void)
+{
+ struct mail_index_transaction *t;
+ const struct mail_index_flag_update *updates;
+ unsigned int count;
+
+ hdr.messages_count = 20;
+ t = mail_index_transaction_new();
+
+ test_begin("mail index flag update simple merges");
+
+ mail_index_update_flags_range(t, 6, 8, MODIFY_ADD,
+ MAIL_FLAGGED);
+ test_assert(t->last_update_idx == 0);
+ mail_index_update_flags_range(t, 5, 6, MODIFY_ADD,
+ MAIL_FLAGGED);
+ test_assert(t->last_update_idx == 0);
+ mail_index_update_flags_range(t, 4, 4, MODIFY_ADD,
+ MAIL_FLAGGED);
+ test_assert(t->last_update_idx == 0);
+ mail_index_update_flags_range(t, 7, 9, MODIFY_ADD,
+ MAIL_FLAGGED);
+ test_assert(t->last_update_idx == 0);
+ mail_index_update_flags_range(t, 10, 10, MODIFY_ADD,
+ MAIL_FLAGGED);
+ updates = array_get(&t->updates, &count);
+ test_assert(count == 1);
+ test_assert(updates[0].uid1 == 4);
+ test_assert(updates[0].uid2 == 10);
+ test_assert(updates[0].add_flags == MAIL_FLAGGED);
+ test_assert(updates[0].remove_flags == 0);
+
+ mail_index_update_flags_range(t, 12, 12, MODIFY_ADD,
+ MAIL_FLAGGED);
+ mail_index_update_flags_range(t, 11, 11, MODIFY_ADD,
+ MAIL_FLAGGED);
+ updates = array_get(&t->updates, &count);
+ test_assert(count == 1);
+ test_assert(updates[0].uid1 == 4);
+ test_assert(updates[0].uid2 == 12);
+ test_end();
+
+ mail_index_transaction_cleanup(t);
+}
+
+static void test_mail_index_flag_update_complex_merges(void)
+{
+ struct mail_index_transaction *t;
+ const struct mail_index_flag_update *updates;
+ unsigned int count;
+
+ hdr.messages_count = 20;
+ t = mail_index_transaction_new();
+
+ test_begin("mail index flag update complex merges");
+
+ mail_index_update_flags_range(t, 6, 8, MODIFY_REPLACE,
+ MAIL_SEEN);
+ mail_index_update_flags_range(t, 3, 6, MODIFY_ADD,
+ MAIL_FLAGGED);
+ mail_index_update_flags_range(t, 5, 7, MODIFY_ADD,
+ MAIL_DRAFT);
+ mail_index_update_flags_range(t, 6, 6, MODIFY_REPLACE,
+ MAIL_SEEN | MAIL_ANSWERED);
+ mail_index_update_flags_range(t, 5, 10, MODIFY_REMOVE,
+ MAIL_ANSWERED);
+ mail_index_update_flags_range(t, 7, 12, MODIFY_ADD,
+ MAIL_DELETED);
+
+ updates = array_get(&t->updates, &count);
+ test_assert(count == 7);
+ test_assert(updates[0].uid1 == 3);
+ test_assert(updates[0].uid2 == 4);
+ test_assert(updates[0].add_flags == MAIL_FLAGGED);
+ test_assert(updates[0].remove_flags == 0);
+ test_assert(updates[1].uid1 == 5);
+ test_assert(updates[1].uid2 == 5);
+ test_assert(updates[1].add_flags == (MAIL_DRAFT | MAIL_FLAGGED));
+ test_assert(updates[1].remove_flags == MAIL_ANSWERED);
+ test_assert(updates[2].uid1 == 6);
+ test_assert(updates[2].uid2 == 6);
+ test_assert(updates[2].add_flags == MAIL_SEEN);
+ test_assert(updates[2].remove_flags == (MAIL_ANSWERED | MAIL_FLAGGED | MAIL_DELETED | MAIL_DRAFT));
+ test_assert(updates[3].uid1 == 7);
+ test_assert(updates[3].uid2 == 7);
+ test_assert(updates[3].add_flags == (MAIL_SEEN | MAIL_DRAFT | MAIL_DELETED));
+ test_assert(updates[3].remove_flags == (MAIL_ANSWERED | MAIL_FLAGGED));
+ test_assert(updates[4].uid1 == 8);
+ test_assert(updates[4].uid2 == 8);
+ test_assert(updates[4].add_flags == (MAIL_SEEN | MAIL_DELETED));
+ test_assert(updates[4].remove_flags == (MAIL_ANSWERED | MAIL_FLAGGED | MAIL_DRAFT));
+ test_assert(updates[5].uid1 == 9);
+ test_assert(updates[5].uid2 == 10);
+ test_assert(updates[5].add_flags == MAIL_DELETED);
+ test_assert(updates[5].remove_flags == MAIL_ANSWERED);
+ test_assert(updates[6].uid1 == 11);
+ test_assert(updates[6].uid2 == 12);
+ test_assert(updates[6].add_flags == MAIL_DELETED);
+ test_assert(updates[6].remove_flags == 0);
+
+ test_end();
+
+ mail_index_transaction_cleanup(t);
+}
+
+static void
+flags_array_check(struct mail_index_transaction *t,
+ const enum mail_flags *flags, unsigned int msg_count)
+{
+ const struct mail_index_flag_update *updates;
+ unsigned int i, count, seq;
+
+ if (array_is_created(&t->updates))
+ updates = array_get(&t->updates, &count);
+ else {
+ updates = NULL;
+ count = 0;
+ }
+ for (seq = 1, i = 0; i < count; i++) {
+ if (i > 0) {
+ test_assert(updates[i-1].uid2 < updates[i].uid1);
+ test_assert(updates[i-1].uid2 + 1 != updates[i].uid1 ||
+ updates[i-1].add_flags != updates[i].add_flags ||
+ updates[i-1].remove_flags != updates[i].remove_flags);
+ }
+ for (; seq != updates[i].uid1; seq++)
+ test_assert(flags[seq] == 0);
+ for (; seq <= updates[i].uid2; seq++)
+ test_assert(flags[seq] == updates[i].add_flags);
+ }
+ for (; seq <= msg_count; seq++)
+ test_assert(flags[seq] == 0);
+}
+
+static void test_mail_index_flag_update_random(void)
+{
+ struct mail_index_transaction *t;
+ unsigned int r, seq1, seq2, seq;
+ enum mail_flags *flags, change;
+ enum modify_type modify_type;
+
+ hdr.messages_count = 20;
+ t = mail_index_transaction_new();
+
+ test_begin("mail index flag update random");
+
+ flags = t_new(enum mail_flags, hdr.messages_count + 1);
+ for (r = 0; r < 1000; r++) {
+ change = i_rand_limit(MAIL_FLAGS_NONRECENT + 1);
+ seq1 = i_rand_minmax(1, hdr.messages_count);
+ seq2 = seq1 == hdr.messages_count ? seq1 :
+ i_rand_minmax(seq1, hdr.messages_count);
+
+ switch (i_rand_limit(3)) {
+ case 0:
+ modify_type = MODIFY_ADD;
+ for (seq = seq1; seq <= seq2; seq++)
+ flags[seq] |= change;
+ break;
+ case 1:
+ modify_type = MODIFY_REMOVE;
+ for (seq = seq1; seq <= seq2; seq++)
+ flags[seq] &= ENUM_NEGATE(change);
+ break;
+ case 2:
+ modify_type = MODIFY_REPLACE;
+ for (seq = seq1; seq <= seq2; seq++)
+ flags[seq] = change;
+ break;
+ default:
+ i_unreached();
+ }
+ mail_index_update_flags_range(t, seq1, seq2, modify_type,
+ change);
+ flags_array_check(t, flags, hdr.messages_count);
+ }
+ test_end();
+
+ mail_index_transaction_cleanup(t);
+}
+
+static void test_mail_index_cancel_flag_updates(void)
+{
+ struct mail_index_transaction *t;
+ const struct mail_index_flag_update *updates;
+ unsigned int count;
+
+ hdr.messages_count = 20;
+ t = mail_index_transaction_new();
+
+ test_begin("mail index cancel flag updates");
+
+ mail_index_update_flags_range(t, 5, 7, MODIFY_REPLACE, 0);
+ updates = array_get(&t->updates, &count);
+ test_assert(count == 1);
+ test_assert(updates[0].uid1 == 5 && updates[0].uid2 == 7);
+ test_assert(mail_index_cancel_flag_updates(t, 5));
+ test_assert(updates[0].uid1 == 6 && updates[0].uid2 == 7);
+ test_assert(mail_index_cancel_flag_updates(t, 7));
+ test_assert(updates[0].uid1 == 6 && updates[0].uid2 == 6);
+ test_assert(mail_index_cancel_flag_updates(t, 6));
+ test_assert(!array_is_created(&t->updates));
+
+ mail_index_update_flags_range(t, 5, 7, MODIFY_REPLACE, 0);
+ test_assert(mail_index_cancel_flag_updates(t, 6));
+ updates = array_get(&t->updates, &count);
+ test_assert(count == 2);
+ test_assert(updates[0].uid1 == 5 && updates[0].uid2 == 5);
+ test_assert(updates[1].uid1 == 7 && updates[1].uid2 == 7);
+
+ test_end();
+
+ mail_index_transaction_cleanup(t);
+}
+
+static void test_mail_index_flag_update_appends(void)
+{
+ struct mail_index_transaction *t;
+ const struct mail_index_record *appends;
+ const struct mail_index_flag_update *updates;
+ unsigned int count;
+ uint32_t seq;
+
+ hdr.messages_count = 4;
+ t = mail_index_transaction_new();
+
+ test_begin("mail index flag update appends");
+ mail_index_append(t, 0, &seq);
+ test_assert(seq == 5);
+ mail_index_append(t, 0, &seq);
+ test_assert(seq == 6);
+ mail_index_append(t, 0, &seq);
+ test_assert(seq == 7);
+
+ mail_index_update_flags_range(t, 5, 6, MODIFY_REPLACE,
+ MAIL_SEEN | MAIL_FLAGGED);
+ mail_index_update_flags_range(t, 6, 7, MODIFY_ADD,
+ MAIL_DRAFT | MAIL_FLAGGED);
+ mail_index_update_flags_range(t, 5, 7, MODIFY_REMOVE,
+ MAIL_FLAGGED);
+
+ appends = array_get(&t->appends, &count);
+ test_assert(count == 3);
+ test_assert(appends[0].flags == MAIL_SEEN);
+ test_assert(appends[1].flags == (MAIL_SEEN | MAIL_DRAFT));
+ test_assert(appends[2].flags == MAIL_DRAFT);
+
+ /* mixed existing/appends */
+ mail_index_update_flags_range(t, 4, 5, MODIFY_ADD,
+ MAIL_ANSWERED);
+ test_assert(appends[0].flags == (MAIL_SEEN | MAIL_ANSWERED));
+
+ updates = array_get(&t->updates, &count);
+ test_assert(count == 1);
+ test_assert(updates[0].uid1 == 4);
+ test_assert(updates[0].uid2 == 4);
+ test_assert(updates[0].add_flags == MAIL_ANSWERED);
+ test_end();
+
+ mail_index_transaction_cleanup(t);
+}
+
+static bool test_flag_update_pos(struct mail_index_transaction *t,
+ uint32_t seq, unsigned int idx)
+{
+ unsigned int i, j, count;
+
+ count = array_count(&t->updates);
+ for (i = 0; i < idx; i++) {
+ for (j = idx + 1; j <= count; j++) {
+ if (mail_index_transaction_get_flag_update_pos(t, i, j, seq) != idx) {
+ test_assert(FALSE);
+ return FALSE;
+ }
+ }
+ }
+ return TRUE;
+}
+
+static void test_mail_index_transaction_get_flag_update_pos(void)
+{
+ struct mail_index_transaction *t;
+
+ test_begin("mail index transaction get flag update pos");
+
+ hdr.messages_count = 10;
+ t = mail_index_transaction_new();
+ mail_index_update_flags_range(t, 1, 1, MODIFY_REPLACE, 0);
+ mail_index_update_flags_range(t, 3, 4, MODIFY_REPLACE, 0);
+ mail_index_update_flags_range(t, 6, 7, MODIFY_REPLACE, 0);
+ mail_index_update_flags_range(t, 9, 10, MODIFY_REPLACE, 0);
+
+ test_assert(test_flag_update_pos(t, 1, 0));
+ test_assert(test_flag_update_pos(t, 2, 1));
+ test_assert(test_flag_update_pos(t, 3, 1));
+ test_assert(test_flag_update_pos(t, 4, 1));
+ test_assert(test_flag_update_pos(t, 5, 2));
+ test_assert(test_flag_update_pos(t, 6, 2));
+ test_assert(test_flag_update_pos(t, 7, 2));
+ test_assert(test_flag_update_pos(t, 8, 3));
+ test_assert(test_flag_update_pos(t, 9, 3));
+ test_assert(test_flag_update_pos(t, 10, 3));
+ test_assert(test_flag_update_pos(t, 11, 4));
+ test_assert(test_flag_update_pos(t, 12, 4));
+ test_end();
+
+ mail_index_transaction_cleanup(t);
+}
+
+static void test_mail_index_modseq_update(void)
+{
+ struct mail_index_transaction *t;
+ const struct mail_transaction_modseq_update *ups;
+ unsigned int count;
+
+ test_begin("mail index modseq update");
+
+ hdr.messages_count = 10;
+ t = mail_index_transaction_new();
+
+ mail_index_update_modseq(t, 4, 0x8234fefa02747429ULL);
+ mail_index_update_modseq(t, 6, 0x1234567890abcdefULL);
+ mail_index_update_modseq(t, 2, 0xfeed);
+ mail_index_update_modseq(t, 4, 2);
+ /* modseq=1 updates are ignored: */
+ mail_index_update_modseq(t, 5, 1);
+ mail_index_update_modseq(t, 6, 1);
+
+ ups = array_get(&t->modseq_updates, &count);
+ test_assert(count == 4);
+ test_assert(ups[0].uid == 4 &&
+ ups[0].modseq_high32 == 0x8234fefa &&
+ ups[0].modseq_low32 == 0x02747429);
+ test_assert(ups[1].uid == 6 &&
+ ups[1].modseq_high32 == 0x12345678 &&
+ ups[1].modseq_low32 == 0x90abcdef);
+ test_assert(ups[2].uid == 2 &&
+ ups[2].modseq_high32 == 0 &&
+ ups[2].modseq_low32 == 0xfeed);
+ test_assert(ups[3].uid == 4 &&
+ ups[3].modseq_high32 == 0 &&
+ ups[3].modseq_low32 == 2);
+ test_end();
+
+ mail_index_transaction_cleanup(t);
+}
+
+static void test_mail_index_expunge(void)
+{
+ static guid_128_t empty_guid = { 0, };
+ struct mail_index_transaction *t;
+ const struct mail_transaction_expunge_guid *expunges;
+ guid_128_t guid2, guid3, guid4;
+ unsigned int i, count;
+
+ test_begin("mail index expunge");
+
+ hdr.messages_count = 10;
+ t = mail_index_transaction_new();
+ for (i = 0; i < sizeof(guid2); i++) {
+ guid2[i] = i + 1;
+ guid3[i] = i ^ 0xff;
+ guid4[i] = i + 0x80;
+ }
+
+ mail_index_expunge_guid(t, 4, guid4);
+ test_assert(!t->expunges_nonsorted);
+ mail_index_expunge_guid(t, 2, guid2);
+ test_assert(t->expunges_nonsorted);
+ mail_index_expunge_guid(t, 3, guid3);
+ mail_index_expunge(t, 1);
+ mail_index_expunge(t, 5);
+
+ expunges = array_get(&t->expunges, &count);
+ test_assert(count == 5);
+ test_assert(expunges[0].uid == 4);
+ test_assert(memcmp(expunges[0].guid_128, guid4, sizeof(guid4)) == 0);
+ test_assert(expunges[1].uid == 2);
+ test_assert(memcmp(expunges[1].guid_128, guid2, sizeof(guid2)) == 0);
+ test_assert(expunges[2].uid == 3);
+ test_assert(memcmp(expunges[2].guid_128, guid3, sizeof(guid3)) == 0);
+ test_assert(expunges[3].uid == 1);
+ test_assert(memcmp(expunges[3].guid_128, empty_guid, sizeof(empty_guid)) == 0);
+ test_assert(expunges[4].uid == 5);
+ test_assert(memcmp(expunges[4].guid_128, empty_guid, sizeof(empty_guid)) == 0);
+
+ test_end();
+
+ mail_index_transaction_cleanup(t);
+}
+
+static void test_mail_index_update_day_first_uid(void)
+{
+ struct {
+ uint32_t now;
+ uint32_t old_day_stamp;
+ uint32_t new_day_stamp;
+ uint32_t new_day_first_uid[8];
+ } tests[] = {
+ /* 1487116800 = 2017-02-15 00:00:00 UTC */
+ { 1487116800, 1487116800, 1487116800, { 8, 7, 6, 5, 4, 3, 2, 1 } },
+ /* still same day */
+ { 1487116800+3600*24-1, 1487116800, 1487116800, { 8, 7, 6, 5, 4, 3, 2, 1 } },
+ /* one day earlier */
+ { 1487116800-1, 1487116800, 1487116800, { 8, 7, 6, 5, 4, 3, 2, 1 } },
+ /* next day */
+ { 1487116800+3600*24, 1487116800, 1487116800+3600*24, { 9, 8, 7, 6, 5, 4, 3, 2 } },
+ { 1487116800+3600*24*2-1, 1487116800, 1487116800+3600*24, { 9, 8, 7, 6, 5, 4, 3, 2 } },
+ /* 2 days */
+ { 1487116800+3600*24*2, 1487116800, 1487116800+3600*24*2, { 9, 8, 8, 7, 6, 5, 4, 3 } },
+ /* 3 days */
+ { 1487116800+3600*24*3, 1487116800, 1487116800+3600*24*3, { 9, 8, 8, 8, 7, 6, 5, 4 } },
+ /* 4 days */
+ { 1487116800+3600*24*4, 1487116800, 1487116800+3600*24*4, { 9, 8, 8, 8, 8, 7, 6, 5 } },
+ /* 5 days */
+ { 1487116800+3600*24*5, 1487116800, 1487116800+3600*24*5, { 9, 8, 8, 8, 8, 8, 7, 6 } },
+ /* 6 days */
+ { 1487116800+3600*24*6, 1487116800, 1487116800+3600*24*6, { 9, 8, 8, 8, 8, 8, 8, 7 } },
+ /* 7 days */
+ { 1487116800+3600*24*7, 1487116800, 1487116800+3600*24*7, { 9, 8, 8, 8, 8, 8, 8, 8 } },
+ /* 8 days */
+ { 1487116800+3600*24*8, 1487116800, 1487116800+3600*24*8, { 9, 8, 8, 8, 8, 8, 8, 8 } },
+ /* 366 days */
+ { 1487116800+3600*24*366, 1487116800, 1487116800+3600*24*366, { 9, 8, 8, 8, 8, 8, 8, 8 } },
+ };
+ struct mail_index_transaction *t;
+ struct mail_index_record *rec;
+ unsigned int i, j;
+
+ test_begin("mail index update day first uid");
+
+ /* daylight savings times were confusing these tests, so we'll now
+ just assume that TZ=UTC */
+ test_assert(timezone == 0);
+
+ hdr.messages_count = 10;
+ t = mail_index_transaction_new();
+ t->view = t_new(struct mail_index_view, 1);
+ t->view->map = t_new(struct mail_index_map, 1);
+
+ t_array_init(&t->appends, 1);
+ rec = array_append_space(&t->appends);
+ rec->uid = 9;
+
+ for (i = 0; i < N_ELEMENTS(tests); i++) {
+ i_zero(&hdr);
+ for (j = 0; j < N_ELEMENTS(hdr.day_first_uid); j++)
+ hdr.day_first_uid[j] = 8-j;
+ hdr.day_stamp = tests[i].old_day_stamp + timezone;
+ memcpy(t->post_hdr_change, &hdr, sizeof(hdr));
+ mail_index_update_day_headers(t, tests[i].now + timezone);
+
+ struct mail_index_header new_hdr;
+ memcpy(&new_hdr, t->post_hdr_change, sizeof(new_hdr));
+ test_assert_idx(new_hdr.day_stamp == tests[i].new_day_stamp + timezone, i);
+ test_assert_idx(memcmp(new_hdr.day_first_uid,
+ tests[i].new_day_first_uid,
+ sizeof(uint32_t) * 8) == 0, i);
+ }
+
+ test_end();
+}
+
+int main(void)
+{
+ static void (*const test_functions[])(void) = {
+ test_mail_index_append,
+ test_mail_index_flag_update_fastpath,
+ test_mail_index_flag_update_simple_merges,
+ test_mail_index_flag_update_complex_merges,
+ test_mail_index_flag_update_random,
+ test_mail_index_flag_update_appends,
+ test_mail_index_cancel_flag_updates,
+ test_mail_index_transaction_get_flag_update_pos,
+ test_mail_index_modseq_update,
+ test_mail_index_expunge,
+ test_mail_index_update_day_first_uid,
+ NULL
+ };
+ /* daylight saving time confuses things */
+ env_put("TZ", "UTC");
+ tzset();
+ return test_run(test_functions);
+}
diff --git a/src/lib-index/test-mail-index-write.c b/src/lib-index/test-mail-index-write.c
new file mode 100644
index 0000000..88eaa4a
--- /dev/null
+++ b/src/lib-index/test-mail-index-write.c
@@ -0,0 +1,151 @@
+/* Copyright (c) 2020 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "test-common.h"
+#include "mail-index-private.h"
+#include "mail-transaction-log-private.h"
+
+#define TEST_INDEX_FNAME ".test.index.write"
+#define TEST_INDEXID 123456
+#define LOG_FILE1_HEAD_OFFSET 200
+
+static bool expect_index_rewrite;
+static bool rotate_fail;
+
+static struct mail_transaction_log_file log_file = {
+ .hdr = {
+ .indexid = TEST_INDEXID,
+ .file_seq = 1,
+ },
+};
+static struct mail_transaction_log_file log_file2 = {
+ .hdr = {
+ .indexid = TEST_INDEXID,
+ .file_seq = 2,
+ .prev_file_seq = 1,
+ .prev_file_offset = LOG_FILE1_HEAD_OFFSET,
+ },
+};
+
+void mail_index_set_error(struct mail_index *index ATTR_UNUSED,
+ const char *fmt ATTR_UNUSED, ...)
+{
+}
+
+void mail_index_set_syscall_error(struct mail_index *index ATTR_UNUSED,
+ const char *function)
+{
+ i_error("%s() failed: %m", function);
+}
+
+void mail_index_file_set_syscall_error(struct mail_index *index ATTR_UNUSED,
+ const char *filepath,
+ const char *function)
+{
+ i_error("%s(%s) failed: %m", function, filepath);
+}
+
+int mail_index_create_tmp_file(struct mail_index *index ATTR_UNUSED,
+ const char *path_prefix, const char **path_r)
+{
+ const char *path;
+ int fd;
+
+ test_assert(expect_index_rewrite);
+
+ path = *path_r = t_strconcat(path_prefix, ".tmp", NULL);
+ fd = open(path, O_RDWR|O_CREAT, 0600);
+ if (fd == -1) {
+ i_error("creat() failed: %m");
+ return -1;
+ }
+ return fd;
+}
+
+int mail_index_move_to_memory(struct mail_index *index ATTR_UNUSED)
+{
+ return -1;
+}
+
+int mail_transaction_log_rotate(struct mail_transaction_log *log, bool reset)
+{
+ i_assert(!reset);
+
+ if (rotate_fail)
+ return -1;
+
+ log_file.next = &log_file2;
+ log->head = &log_file2;
+ return 0;
+}
+
+static void test_mail_index_write(void)
+{
+ struct mail_transaction_log log = {
+ .head = &log_file,
+ .files = &log_file,
+ };
+ struct mail_index_record_map rec_map = {
+ .records_count = 0,
+ };
+ buffer_t hdr_copy;
+ struct mail_index_map map = {
+ .hdr = {
+ .indexid = TEST_INDEXID,
+ .log_file_seq = 1,
+ .log_file_tail_offset = 100,
+ .log_file_head_offset = LOG_FILE1_HEAD_OFFSET,
+ },
+ .hdr_copy_buf = &hdr_copy,
+ .rec_map = &rec_map,
+ };
+ buffer_create_from_const_data(&hdr_copy, &map.hdr, sizeof(map.hdr));
+ struct mail_index index = {
+ .event = event_create(NULL),
+ .log = &log,
+ .map = &map,
+ .dir = ".",
+ .fd = -1,
+ .indexid = TEST_INDEXID,
+ .filepath = TEST_INDEX_FNAME,
+ .log_sync_locked = TRUE,
+ };
+
+ test_begin("test_mail_index_write()");
+
+ /* test failed rotation, no index rewrite */
+ rotate_fail = TRUE;
+ expect_index_rewrite = FALSE;
+ test_assert(!index.reopen_main_index);
+ index.fd = 1; /* anything but -1 */
+ mail_index_write(&index, TRUE, "testing");
+ test_assert(log.head == log.files);
+ test_assert(index.reopen_main_index);
+
+ /* test failed rotation, with index rewrite */
+ expect_index_rewrite = TRUE;
+ index.reopen_main_index = FALSE;
+ index.fd = -1;
+ mail_index_write(&index, TRUE, "testing");
+ test_assert(log.head == log.files);
+ test_assert(!index.reopen_main_index);
+
+ /* test successful rotation, with index rewrite */
+ rotate_fail = FALSE;
+ mail_index_write(&index, TRUE, "testing");
+ test_assert(log.head != log.files && log.head == &log_file2);
+ test_assert(!index.reopen_main_index);
+
+ event_unref(&index.event);
+ i_unlink(TEST_INDEX_FNAME);
+ test_end();
+}
+
+int main(void)
+{
+ static void (*const test_functions[])(void) = {
+ test_mail_index_write,
+ NULL
+ };
+ return test_run(test_functions);
+}
diff --git a/src/lib-index/test-mail-index.c b/src/lib-index/test-mail-index.c
new file mode 100644
index 0000000..3e2fd02
--- /dev/null
+++ b/src/lib-index/test-mail-index.c
@@ -0,0 +1,169 @@
+/* Copyright (c) 2019 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "test-common.h"
+#include "test-mail-index.h"
+#include "mail-transaction-log-private.h"
+
+static void test_mail_index_rotate(void)
+{
+ struct mail_index *index, *index2;
+ struct mail_index_view *view;
+ struct mail_index_transaction *trans;
+ struct mail_transaction_log_file *file;
+ const char *reason;
+
+ test_begin("mail index rotate");
+ index = test_mail_index_init();
+ index2 = test_mail_index_open();
+ view = mail_index_view_open(index);
+
+ /* First rotation of the index. The view will point to the old index. */
+ trans = mail_index_transaction_begin(view, 0);
+ mail_index_reset(trans);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+
+ /* Second rotation of the index. The log head doesn't have any extra
+ references. */
+ trans = mail_index_transaction_begin(view, 0);
+ mail_index_reset(trans);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+
+ /* The 2nd index's log head also doesn't have any extra references.
+ Check that it doesn't crash. */
+ test_assert(mail_transaction_log_find_file(index2->log, 3, FALSE, &file, &reason) == 0);
+
+ mail_index_view_close(&view);
+ test_mail_index_deinit(&index);
+ test_mail_index_deinit(&index2);
+ test_end();
+}
+
+static void
+test_mail_index_new_extension_rotate_write(struct mail_index *index2,
+ uint32_t uid)
+{
+ struct mail_index_view *view2;
+ struct mail_index_transaction *trans;
+ uint32_t hdr_ext_id, rec_ext_id, file_seq, seq, rec_ext = 0x12345678;
+ uoff_t file_offset;
+
+ /* Rotate the index in the index */
+ test_assert(mail_transaction_log_sync_lock(index2->log, "test",
+ &file_seq, &file_offset) == 0);
+ mail_index_write(index2, TRUE, "test");
+ mail_transaction_log_sync_unlock(index2->log, "test");
+
+ /* Write a new extension header to the 2nd index. */
+ hdr_ext_id = mail_index_ext_register(index2, "test",
+ sizeof(hdr_ext_id), 0, 0);
+ rec_ext_id = mail_index_ext_register(index2, "test-rec", 0,
+ sizeof(uint32_t), sizeof(uint32_t));
+ view2 = mail_index_view_open(index2);
+ trans = mail_index_transaction_begin(view2,
+ MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL);
+ mail_index_update_header_ext(trans, hdr_ext_id, 0,
+ &hdr_ext_id, sizeof(hdr_ext_id));
+ mail_index_append(trans, uid, &seq);
+ mail_index_update_ext(trans, seq, rec_ext_id, &rec_ext, NULL);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ mail_index_view_close(&view2);
+}
+
+static void test_mail_index_new_extension_sync(struct mail_index_view *view)
+{
+ struct mail_index_view_sync_ctx *sync_ctx;
+ struct mail_index_view_sync_rec sync_rec;
+ bool delayed_expunges;
+
+ test_assert(mail_index_refresh(view->index) == 0);
+ sync_ctx = mail_index_view_sync_begin(view,
+ MAIL_INDEX_VIEW_SYNC_FLAG_NOEXPUNGES);
+ test_assert(!mail_index_view_sync_next(sync_ctx, &sync_rec));
+ test_assert(mail_index_view_sync_commit(&sync_ctx, &delayed_expunges) == 0);
+}
+
+static void test_mail_index_new_extension(void)
+{
+ struct mail_index *index, *index2;
+ struct mail_index_view *view, *view2;
+ struct mail_index_transaction *trans;
+ uint32_t seq, rec_ext_id, rec_ext = 0x12345678;
+
+ test_begin("mail index new extension");
+ index = test_mail_index_init();
+ index2 = test_mail_index_open();
+ view = mail_index_view_open(index);
+
+ rec_ext_id = mail_index_ext_register(index, "test-rec", 0,
+ sizeof(uint32_t), sizeof(uint32_t));
+
+ /* Save two mails */
+ uint32_t uid_validity = 123456;
+ trans = mail_index_transaction_begin(view,
+ MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL);
+ mail_index_update_header(trans,
+ offsetof(struct mail_index_header, uid_validity),
+ &uid_validity, sizeof(uid_validity), TRUE);
+ mail_index_append(trans, 1, &seq);
+ mail_index_update_ext(trans, seq, rec_ext_id, &rec_ext, NULL);
+ mail_index_append(trans, 2, &seq);
+ mail_index_update_ext(trans, seq, rec_ext_id, &rec_ext, NULL);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+
+ /* refresh indexes and view */
+ test_assert(mail_index_refresh(index2) == 0);
+ mail_index_view_close(&view);
+ view = mail_index_view_open(index);
+
+ /* Expunge the mail in the 2nd index */
+ view2 = mail_index_view_open(index2);
+ trans = mail_index_transaction_begin(view2,
+ MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL);
+ mail_index_expunge(trans, 1);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ mail_index_view_close(&view2);
+
+ /* Sync the first view without expunges */
+ test_mail_index_new_extension_sync(view);
+
+ for (unsigned int i = 0; i < 3; i++)
+ test_mail_index_new_extension_rotate_write(index2, 3 + i);
+
+ /* Sync the first view. It needs to generate the missing view. */
+ test_expect_error_string("generating missing logs");
+ test_mail_index_new_extension_sync(view);
+ test_expect_no_more_errors();
+ test_assert(mail_index_get_header(view)->messages_count == 5);
+
+ /* Make sure the extensions records are still there.
+ Note that this works, because the extensions are looked up from the
+ newly refreshed index, not the old index. */
+ for (seq = 1; seq <= 5; seq++) {
+ const void *data;
+ bool expunged;
+ mail_index_lookup_ext(view, seq, rec_ext_id, &data, &expunged);
+ test_assert_idx(memcmp(data, &rec_ext, sizeof(rec_ext)) == 0, seq);
+ }
+
+ /* Once more rotate and write using the new extension */
+ test_mail_index_new_extension_rotate_write(index2, 6);
+ /* Make sure the first view understands the new extension by ID */
+ test_mail_index_new_extension_sync(view);
+
+ mail_index_view_close(&view);
+ test_mail_index_deinit(&index);
+ test_mail_index_deinit(&index2);
+ test_end();
+}
+
+int main(void)
+{
+ static void (*const test_functions[])(void) = {
+ test_mail_index_rotate,
+ test_mail_index_new_extension,
+ NULL
+ };
+ return test_run(test_functions);
+}
diff --git a/src/lib-index/test-mail-index.h b/src/lib-index/test-mail-index.h
new file mode 100644
index 0000000..75b343f
--- /dev/null
+++ b/src/lib-index/test-mail-index.h
@@ -0,0 +1,51 @@
+#ifndef TEST_MAIL_INDEX_H
+#define TEST_MAIL_INDEX_H
+
+#include "ioloop.h"
+#include "unlink-directory.h"
+#include "mail-index-private.h"
+
+#define TESTDIR_NAME ".dovecot.test"
+
+static inline struct mail_index *test_mail_index_open(void)
+{
+ struct mail_index *index;
+
+ index = mail_index_alloc(NULL, TESTDIR_NAME, "test.dovecot.index");
+ test_assert(mail_index_open_or_create(index, MAIL_INDEX_OPEN_FLAG_CREATE) == 0);
+ return index;
+}
+
+static inline struct mail_index *test_mail_index_init(void)
+{
+ const char *error;
+
+ (void)unlink_directory(TESTDIR_NAME, UNLINK_DIRECTORY_FLAG_RMDIR, &error);
+ if (mkdir(TESTDIR_NAME, 0700) < 0)
+ i_error("mkdir(%s) failed: %m", TESTDIR_NAME);
+
+ ioloop_time = 1;
+
+ return test_mail_index_open();
+}
+
+static inline void test_mail_index_close(struct mail_index **index)
+{
+ mail_index_close(*index);
+ mail_index_free(index);
+}
+
+static inline void test_mail_index_delete(void)
+{
+ const char *error;
+
+ (void)unlink_directory(TESTDIR_NAME, UNLINK_DIRECTORY_FLAG_RMDIR, &error);
+}
+
+static inline void test_mail_index_deinit(struct mail_index **index)
+{
+ test_mail_index_close(index);
+ test_mail_index_delete();
+}
+
+#endif
diff --git a/src/lib-index/test-mail-transaction-log-append.c b/src/lib-index/test-mail-transaction-log-append.c
new file mode 100644
index 0000000..4029432
--- /dev/null
+++ b/src/lib-index/test-mail-transaction-log-append.c
@@ -0,0 +1,176 @@
+/* Copyright (c) 2009-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "buffer.h"
+#include "test-common.h"
+#include "mail-index-private.h"
+#include "mail-transaction-log-private.h"
+
+#include <sys/stat.h>
+
+static bool log_lock_failure = FALSE;
+
+void mail_index_file_set_syscall_error(struct mail_index *index ATTR_UNUSED,
+ const char *filepath ATTR_UNUSED,
+ const char *function ATTR_UNUSED)
+{
+}
+
+int mail_transaction_log_lock_head(struct mail_transaction_log *log ATTR_UNUSED,
+ const char *lock_reason ATTR_UNUSED)
+{
+ return log_lock_failure ? -1 : 0;
+}
+
+void mail_transaction_log_file_unlock(struct mail_transaction_log_file *file ATTR_UNUSED,
+ const char *lock_reason ATTR_UNUSED) {}
+
+void mail_transaction_update_modseq(const struct mail_transaction_header *hdr,
+ const void *data ATTR_UNUSED,
+ uint64_t *cur_modseq,
+ unsigned int version ATTR_UNUSED)
+{
+ if ((hdr->type & MAIL_TRANSACTION_EXPUNGE) != 0)
+ *cur_modseq += 1;
+}
+
+int mail_index_move_to_memory(struct mail_index *index ATTR_UNUSED)
+{
+ return -1;
+}
+
+static void test_append_expunge(struct mail_transaction_log *log)
+{
+ static unsigned int buf[] = { 0x12345678, 0xabcdef09 };
+ struct mail_transaction_log_file *file = log->head;
+ struct mail_transaction_log_append_ctx *ctx;
+ const struct mail_transaction_header *hdr;
+ const unsigned int *bufp;
+ const struct mail_transaction_boundary *bound;
+
+ test_assert(mail_transaction_log_append_begin(log->index, MAIL_TRANSACTION_EXTERNAL, &ctx) == 0);
+ mail_transaction_log_append_add(ctx, MAIL_TRANSACTION_APPEND,
+ &buf[0], sizeof(buf[0]));
+ test_assert(ctx->new_highest_modseq == 0);
+ mail_transaction_log_append_add(ctx, MAIL_TRANSACTION_EXPUNGE,
+ &buf[1], sizeof(buf[1]));
+ test_assert(ctx->new_highest_modseq == 1);
+
+ test_assert(mail_transaction_log_append_commit(&ctx) == 0);
+ test_assert(file->sync_highest_modseq == 1);
+ test_assert(file->sync_offset == file->buffer_offset + file->buffer->used);
+
+ hdr = file->buffer->data;
+ test_assert(hdr->type == (MAIL_TRANSACTION_BOUNDARY |
+ MAIL_TRANSACTION_EXTERNAL));
+ test_assert(mail_index_offset_to_uint32(hdr->size) == sizeof(*hdr) + sizeof(*bound));
+ bound = (const void *)(hdr + 1);
+ test_assert(bound->size == file->buffer->used);
+ hdr = (const void *)(bound + 1);
+
+ test_assert(hdr->type == (MAIL_TRANSACTION_APPEND |
+ MAIL_TRANSACTION_EXTERNAL));
+ test_assert(mail_index_offset_to_uint32(hdr->size) == sizeof(*hdr) + sizeof(buf[0]));
+ bufp = (const void *)(hdr + 1);
+ test_assert(*bufp == buf[0]);
+
+ hdr = (const void *)(bufp + 1);
+ test_assert(hdr->type == (MAIL_TRANSACTION_EXPUNGE |
+ MAIL_TRANSACTION_EXPUNGE_PROT |
+ MAIL_TRANSACTION_EXTERNAL));
+ test_assert(mail_index_offset_to_uint32(hdr->size) == sizeof(*hdr) + sizeof(buf[0]));
+ bufp = (const void *)(hdr + 1);
+ test_assert(*bufp == buf[1]);
+
+ test_assert(file->buffer->used == (size_t)((const char *)(bufp+1) - (const char *)file->buffer->data));
+
+ buffer_set_used_size(file->buffer, 0);
+ file->buffer_offset = 0;
+ test_end();
+}
+
+static void test_append_sync_offset(struct mail_transaction_log *log)
+{
+ struct mail_transaction_log_file *file = log->head;
+ struct mail_transaction_log_append_ctx *ctx;
+ const struct mail_transaction_header *hdr;
+ const struct mail_transaction_header_update *u;
+ const uint32_t *offsetp;
+
+ test_begin("transaction log append: append_sync_offset only");
+ test_assert(mail_transaction_log_append_begin(log->index, 0, &ctx) == 0);
+ ctx->index_sync_transaction = TRUE;
+ file->max_tail_offset = 123;
+ test_assert(mail_transaction_log_append_commit(&ctx) == 0);
+
+ test_assert(file->buffer->used == sizeof(*hdr) + sizeof(*u) + sizeof(*offsetp));
+ hdr = file->buffer->data;
+ test_assert(hdr->type == MAIL_TRANSACTION_HEADER_UPDATE);
+ test_assert(mail_index_offset_to_uint32(hdr->size) == file->buffer->used);
+ u = (const void *)(hdr + 1);
+ test_assert(u->offset == offsetof(struct mail_index_header, log_file_tail_offset));
+ test_assert(u->size == sizeof(*offsetp));
+ offsetp = (const void *)(u+1);
+ test_assert(*offsetp == 123);
+
+ test_end();
+}
+
+static void test_mail_transaction_log_append(void)
+{
+ struct mail_transaction_log *log;
+ struct mail_transaction_log_file *file;
+ struct mail_transaction_log_append_ctx *ctx;
+ char tmp_path[] = "/tmp/dovecot.test.XXXXXX";
+ struct stat st;
+ int fd;
+
+ fd = mkstemp(tmp_path);
+ if (fd == -1)
+ i_fatal("mkstemp(%s) failed: %m", tmp_path);
+
+ test_begin("transaction log append");
+ log = i_new(struct mail_transaction_log, 1);
+ log->index = i_new(struct mail_index, 1);
+ log->index->log = log;
+ log->head = file = i_new(struct mail_transaction_log_file, 1);
+ file->fd = -1;
+
+ test_append_expunge(log);
+
+ test_begin("transaction log append: lock failure");
+ log_lock_failure = TRUE;
+ test_assert(mail_transaction_log_append_begin(log->index, 0, &ctx) < 0);
+ log_lock_failure = FALSE;
+ test_end();
+
+ test_append_sync_offset(log);
+
+ /* do this after head->buffer has already been initialized */
+ test_begin("transaction log append: garbage truncation");
+ file->sync_offset = 1;
+ file->buffer_offset = 1;
+ file->last_size = 3;
+ file->fd = fd;
+ test_assert(mail_transaction_log_append_begin(log->index, 0, &ctx) == 0);
+ test_assert(mail_transaction_log_append_commit(&ctx) == 0);
+ if (fstat(fd, &st) < 0) i_fatal("fstat() failed: %m");
+ test_assert(st.st_size == 1);
+ file->fd = -1;
+ test_end();
+
+ buffer_free(&log->head->buffer);
+ i_free(log->head);
+ i_free(log->index);
+ i_free(log);
+ i_unlink(tmp_path);
+}
+
+int main(void)
+{
+ static void (*const test_functions[])(void) = {
+ test_mail_transaction_log_append,
+ NULL
+ };
+ return test_run(test_functions);
+}
diff --git a/src/lib-index/test-mail-transaction-log-file.c b/src/lib-index/test-mail-transaction-log-file.c
new file mode 100644
index 0000000..6f591ce
--- /dev/null
+++ b/src/lib-index/test-mail-transaction-log-file.c
@@ -0,0 +1,418 @@
+/* Copyright (c) 2009-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "ioloop.h"
+#include "test-common.h"
+#include "mail-index-private.h"
+#include "mail-transaction-log-private.h"
+
+#define TEST_LOG_VERSION MAIL_TRANSACTION_LOG_VERSION_FULL(1, 3)
+
+#define INITIAL_MODSEQ 100
+
+struct update_modseq_test {
+ enum mail_transaction_type type;
+ unsigned int version;
+#define NOUPDATE (INITIAL_MODSEQ)
+#define UPDATE (INITIAL_MODSEQ+1)
+ uint64_t expected_modseq;
+ unsigned int count;
+ union {
+ const struct mail_transaction_flag_update *flag_update;
+ const struct mail_transaction_modseq_update *modseq_update;
+ } v;
+} update_modseq_tests[] = {
+ /* expunges: increase modseq */
+ { MAIL_TRANSACTION_EXPUNGE | MAIL_TRANSACTION_EXPUNGE_PROT | MAIL_TRANSACTION_EXTERNAL, TEST_LOG_VERSION, UPDATE, 1, { } },
+ { MAIL_TRANSACTION_EXPUNGE_GUID | MAIL_TRANSACTION_EXPUNGE_PROT | MAIL_TRANSACTION_EXTERNAL, TEST_LOG_VERSION, UPDATE, 1, { } },
+ /* expunges: don't increase modseq */
+ { MAIL_TRANSACTION_EXPUNGE | MAIL_TRANSACTION_EXPUNGE_PROT, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+ { MAIL_TRANSACTION_EXPUNGE_GUID | MAIL_TRANSACTION_EXPUNGE_PROT, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+ { MAIL_TRANSACTION_EXPUNGE | MAIL_TRANSACTION_EXTERNAL, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+ { MAIL_TRANSACTION_EXPUNGE_GUID | MAIL_TRANSACTION_EXTERNAL, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+
+ /* flag changes: don't increase modseq */
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, NOUPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .add_flags = 0 }
+ }
+ } },
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, NOUPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .add_flags = MAIL_INDEX_MAIL_FLAG_BACKEND }
+ }
+ } },
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, NOUPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .remove_flags = MAIL_INDEX_MAIL_FLAG_BACKEND }
+ }
+ } },
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, NOUPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .add_flags = MAIL_INDEX_MAIL_FLAG_DIRTY }
+ }
+ } },
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, NOUPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .remove_flags = MAIL_INDEX_MAIL_FLAG_DIRTY }
+ }
+ } },
+ /* flag changes: increase modseq */
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, UPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .add_flags = MAIL_SEEN }
+ }
+ } },
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, UPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .remove_flags = MAIL_SEEN }
+ }
+ } },
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, UPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .add_flags = MAIL_SEEN | MAIL_INDEX_MAIL_FLAG_BACKEND }
+ }
+ } },
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, UPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .add_flags = MAIL_SEEN | MAIL_INDEX_MAIL_FLAG_DIRTY }
+ }
+ } },
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, UPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .remove_flags = MAIL_SEEN | MAIL_INDEX_MAIL_FLAG_BACKEND }
+ }
+ } },
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, UPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .remove_flags = MAIL_SEEN | MAIL_INDEX_MAIL_FLAG_DIRTY }
+ }
+ } },
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, UPDATE, 2, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .add_flags = MAIL_INDEX_MAIL_FLAG_DIRTY },
+ { .uid1 = 3, .uid2 = 4, .add_flags = MAIL_SEEN }
+ }
+ } },
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, UPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .add_flags = 0, .modseq_inc_flag = 1 }
+ }
+ } },
+ { MAIL_TRANSACTION_FLAG_UPDATE, TEST_LOG_VERSION, UPDATE, 2, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .add_flags = MAIL_INDEX_MAIL_FLAG_DIRTY },
+ { .uid1 = 1, .uid2 = 2, .add_flags = MAIL_INDEX_MAIL_FLAG_DIRTY, .modseq_inc_flag = 1 }
+ }
+ } },
+ /* flag changes: increase modseq with old version */
+ { MAIL_TRANSACTION_FLAG_UPDATE, MAIL_TRANSACTION_LOG_VERSION_FULL(1, 2), UPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .add_flags = MAIL_INDEX_MAIL_FLAG_BACKEND }
+ }
+ } },
+ { MAIL_TRANSACTION_FLAG_UPDATE, MAIL_TRANSACTION_LOG_VERSION_FULL(1, 2), UPDATE, 1, {
+ .flag_update = (const struct mail_transaction_flag_update[]) {
+ { .uid1 = 1, .uid2 = 2, .add_flags = MAIL_INDEX_MAIL_FLAG_DIRTY }
+ }
+ } },
+ /* modseq updates: don't increase modseq */
+ { MAIL_TRANSACTION_MODSEQ_UPDATE, TEST_LOG_VERSION, NOUPDATE, 1, {
+ .modseq_update = (const struct mail_transaction_modseq_update[]) {
+ { .uid = 1, .modseq_low32 = 50, .modseq_high32 = 0 }
+ }
+ } },
+ { MAIL_TRANSACTION_MODSEQ_UPDATE, TEST_LOG_VERSION, NOUPDATE, 1, {
+ .modseq_update = (const struct mail_transaction_modseq_update[]) {
+ { .uid = 1, .modseq_low32 = 100, .modseq_high32 = 0 }
+ }
+ } },
+ /* modseq updates: increase modseq */
+ { MAIL_TRANSACTION_MODSEQ_UPDATE, TEST_LOG_VERSION, 500, 1, {
+ .modseq_update = (const struct mail_transaction_modseq_update[]) {
+ { .uid = 1, .modseq_low32 = 500, .modseq_high32 = 0 }
+ }
+ } },
+ { MAIL_TRANSACTION_MODSEQ_UPDATE, TEST_LOG_VERSION, 500, 2, {
+ .modseq_update = (const struct mail_transaction_modseq_update[]) {
+ { .uid = 1, .modseq_low32 = 50, .modseq_high32 = 0 },
+ { .uid = 1, .modseq_low32 = 500, .modseq_high32 = 0 }
+ }
+ } },
+ { MAIL_TRANSACTION_MODSEQ_UPDATE, TEST_LOG_VERSION, 500, 1, {
+ .modseq_update = (const struct mail_transaction_modseq_update[]) {
+ { .uid = 1, .modseq_low32 = 500, .modseq_high32 = 0 },
+ { .uid = 1, .modseq_low32 = 200, .modseq_high32 = 0 }
+ }
+ } },
+ { MAIL_TRANSACTION_MODSEQ_UPDATE, TEST_LOG_VERSION, (uint64_t)4294967346, 1, {
+ .modseq_update = (const struct mail_transaction_modseq_update[]) {
+ { .uid = 1, .modseq_low32 = 50, .modseq_high32 = 1 }
+ }
+ } },
+
+ /* appends, keyword changes, attribute changes: increase modseq */
+ { MAIL_TRANSACTION_APPEND, TEST_LOG_VERSION, UPDATE, 1, { } },
+ { MAIL_TRANSACTION_KEYWORD_UPDATE, TEST_LOG_VERSION, UPDATE, 1, { } },
+ { MAIL_TRANSACTION_KEYWORD_RESET, TEST_LOG_VERSION, UPDATE, 1, { } },
+ { MAIL_TRANSACTION_ATTRIBUTE_UPDATE, TEST_LOG_VERSION, UPDATE, 1, { } },
+
+ /* others: don't increase modseq */
+ { MAIL_TRANSACTION_HEADER_UPDATE, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+ { MAIL_TRANSACTION_HEADER_UPDATE, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+ { MAIL_TRANSACTION_EXT_INTRO, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+ { MAIL_TRANSACTION_EXT_RESET, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+ { MAIL_TRANSACTION_EXT_HDR_UPDATE, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+ { MAIL_TRANSACTION_EXT_REC_UPDATE, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+ { MAIL_TRANSACTION_EXT_ATOMIC_INC, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+ { MAIL_TRANSACTION_EXT_HDR_UPDATE32, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+ { MAIL_TRANSACTION_INDEX_DELETED, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+ { MAIL_TRANSACTION_INDEX_UNDELETED, TEST_LOG_VERSION, NOUPDATE, 1, { } },
+};
+
+static size_t update_modseq_test_get_size(const struct update_modseq_test *test)
+{
+ enum mail_transaction_type type =
+ test->type & MAIL_TRANSACTION_TYPE_MASK;
+
+ if (type == (MAIL_TRANSACTION_EXPUNGE | MAIL_TRANSACTION_EXPUNGE_PROT))
+ type = MAIL_TRANSACTION_EXPUNGE;
+ if (type == (MAIL_TRANSACTION_EXPUNGE_GUID | MAIL_TRANSACTION_EXPUNGE_PROT))
+ type = MAIL_TRANSACTION_EXPUNGE_GUID;
+
+ switch (type) {
+ case MAIL_TRANSACTION_EXPUNGE:
+ return sizeof(struct mail_transaction_expunge);
+ case MAIL_TRANSACTION_EXPUNGE_GUID:
+ return sizeof(struct mail_transaction_expunge_guid);
+ case MAIL_TRANSACTION_APPEND:
+ return sizeof(struct mail_index_record);
+ case MAIL_TRANSACTION_KEYWORD_UPDATE:
+ return sizeof(struct mail_transaction_keyword_update);
+ case MAIL_TRANSACTION_KEYWORD_RESET:
+ return sizeof(struct mail_transaction_keyword_reset);
+ case MAIL_TRANSACTION_ATTRIBUTE_UPDATE:
+ return 4;
+ case MAIL_TRANSACTION_FLAG_UPDATE:
+ return sizeof(struct mail_transaction_flag_update);
+ case MAIL_TRANSACTION_MODSEQ_UPDATE:
+ return sizeof(struct mail_transaction_modseq_update);
+ case MAIL_TRANSACTION_HEADER_UPDATE:
+ case MAIL_TRANSACTION_EXT_INTRO:
+ case MAIL_TRANSACTION_EXT_RESET:
+ case MAIL_TRANSACTION_EXT_HDR_UPDATE:
+ case MAIL_TRANSACTION_EXT_REC_UPDATE:
+ case MAIL_TRANSACTION_EXT_ATOMIC_INC:
+ case MAIL_TRANSACTION_EXT_HDR_UPDATE32:
+ case MAIL_TRANSACTION_INDEX_DELETED:
+ case MAIL_TRANSACTION_INDEX_UNDELETED:
+ return 4;
+ case MAIL_TRANSACTION_TYPE_MASK:
+ case MAIL_TRANSACTION_BOUNDARY:
+ case MAIL_TRANSACTION_EXPUNGE_PROT:
+ case MAIL_TRANSACTION_EXTERNAL:
+ case MAIL_TRANSACTION_SYNC:
+ break;
+ }
+ i_unreached();
+}
+
+static void test_mail_transaction_update_modseq(void)
+{
+ struct mail_transaction_header hdr;
+ unsigned char tempbuf[1024] = { 0 };
+
+ test_begin("mail_transaction_update_modseq()");
+ for (unsigned int i = 0; i < N_ELEMENTS(update_modseq_tests); i++) {
+ const struct update_modseq_test *test = &update_modseq_tests[i];
+ const void *data = test->v.flag_update;
+ uint64_t cur_modseq = INITIAL_MODSEQ;
+
+ if (data == NULL)
+ data = tempbuf;
+
+ hdr.type = test->type;
+ hdr.size = sizeof(hdr) + update_modseq_test_get_size(test) * test->count;
+ hdr.size = mail_index_uint32_to_offset(hdr.size);
+ mail_transaction_update_modseq(&hdr, data, &cur_modseq, test->version);
+ test_assert_idx(cur_modseq >= INITIAL_MODSEQ, i);
+ test_assert_idx(test->expected_modseq == cur_modseq, i);
+ }
+ test_end();
+}
+
+static struct mail_index *test_mail_index_open(void)
+{
+ struct mail_index *index = mail_index_alloc(NULL, NULL, "test.dovecot.index");
+ test_assert(mail_index_open_or_create(index, MAIL_INDEX_OPEN_FLAG_CREATE) == 0);
+ struct mail_index_view *view = mail_index_view_open(index);
+
+ struct mail_index_transaction *trans =
+ mail_index_transaction_begin(view, 0);
+ uint32_t uid_validity = 1234;
+ mail_index_update_header(trans,
+ offsetof(struct mail_index_header, uid_validity),
+ &uid_validity, sizeof(uid_validity), TRUE);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ mail_index_view_close(&view);
+ return index;
+}
+
+static void test_mail_transaction_log_file_modseq_offsets(void)
+{
+ test_begin("mail_transaction_log_file_get_modseq_next_offset() and _get_highest_modseq_at()");
+
+ struct mail_index *index = test_mail_index_open();
+ struct mail_transaction_log_file *file = index->log->head;
+
+ const unsigned int max_modseq = LOG_FILE_MODSEQ_CACHE_SIZE+2;
+ uoff_t modseq_next_offset[max_modseq+1];
+ uoff_t modseq_alt_next_offset[max_modseq+1];
+
+ /* start with modseq=2, because modseq=1 is the initial state */
+ modseq_next_offset[1] = sizeof(struct mail_transaction_log_header);
+ modseq_alt_next_offset[1] = sizeof(struct mail_transaction_log_header);
+ for (uint64_t modseq = 2; modseq <= max_modseq; modseq++) {
+ uint32_t seq;
+
+ struct mail_index_view *view = mail_index_view_open(index);
+ struct mail_index_transaction *trans =
+ mail_index_transaction_begin(view, 0);
+ mail_index_append(trans, modseq, &seq);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ modseq_next_offset[modseq] = file->sync_offset;
+ mail_index_view_close(&view);
+
+ /* add a non-modseq updating change */
+ view = mail_index_view_open(index);
+ trans = mail_index_transaction_begin(view, 0);
+ mail_index_update_flags(trans, seq, MODIFY_ADD,
+ (enum mail_flags)MAIL_INDEX_MAIL_FLAG_DIRTY);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ mail_index_view_close(&view);
+ modseq_alt_next_offset[modseq] = file->sync_offset;
+ }
+
+ /* mail_transaction_log_file_get_highest_modseq_at() is simultaneously
+ tested and it can also add offsets to cache. The difference is that
+ it adds the highest possible offset, while
+ mail_transaction_log_file_get_modseq_next_offset() adds the lowest
+ possible offset. So we'll need to allow both. */
+#define MODSEQ_MATCH(modseq, next_offset) \
+ ((next_offset) == modseq_next_offset[modseq] || \
+ (next_offset) == modseq_alt_next_offset[modseq])
+
+ /* 1) mail_transaction_log_file_get_modseq_next_offset() tests */
+ uint64_t modseq;
+ uoff_t next_offset;
+ /* initial_modseq fast path */
+ test_assert(mail_transaction_log_file_get_modseq_next_offset(file, 1, &next_offset) == 0);
+ test_assert(next_offset == modseq_next_offset[1]);
+ /* sync_highest_modseq fast path - it skips to sync_offset instead of
+ using exactly the same max_modseq */
+ test_assert(mail_transaction_log_file_get_modseq_next_offset(file, max_modseq, &next_offset) == 0);
+ test_assert(next_offset == file->sync_offset);
+ test_assert(next_offset != modseq_next_offset[max_modseq]);
+ /* update the offset for the random tests */
+ modseq_next_offset[max_modseq] = file->sync_offset;
+ /* add to cache */
+ test_assert(mail_transaction_log_file_get_modseq_next_offset(file, 2, &next_offset) == 0);
+ test_assert(MODSEQ_MATCH(2, next_offset));
+ /* get it from cache */
+ test_assert(mail_transaction_log_file_get_modseq_next_offset(file, 2, &next_offset) == 0);
+ test_assert(MODSEQ_MATCH(2, next_offset));
+ /* get next value from cache */
+ test_assert(mail_transaction_log_file_get_modseq_next_offset(file, 3, &next_offset) == 0);
+ test_assert(MODSEQ_MATCH(3, next_offset));
+ /* get previous value from cache again */
+ test_assert(mail_transaction_log_file_get_modseq_next_offset(file, 2, &next_offset) == 0);
+ test_assert(MODSEQ_MATCH(2, next_offset));
+ /* do some random testing with cache */
+ for (unsigned int i = 0; i < LOG_FILE_MODSEQ_CACHE_SIZE*10; i++) {
+ modseq = i_rand_minmax(1, max_modseq);
+ test_assert(mail_transaction_log_file_get_modseq_next_offset(file, modseq, &next_offset) == 0);
+ test_assert(MODSEQ_MATCH(modseq, next_offset));
+ }
+ /* go through all modseqs - do this after randomness testing or
+ modseq_alt_next_offset[] matching isn't triggered */
+ for (modseq = 1; modseq <= max_modseq; modseq++) {
+ test_assert(mail_transaction_log_file_get_modseq_next_offset(file, modseq, &next_offset) == 0);
+ test_assert(MODSEQ_MATCH(modseq, next_offset));
+ }
+
+ /* 2) mail_transaction_log_file_get_highest_modseq_at() tests */
+ uint64_t modseq_at;
+ const char *error;
+ /* initial_offset */
+ test_assert(mail_transaction_log_file_get_highest_modseq_at(
+ file, modseq_next_offset[1], &modseq, &error) == 1);
+ test_assert(modseq == 1);
+ /* sync_offset fast path */
+ test_assert(mail_transaction_log_file_get_highest_modseq_at(
+ file, file->sync_offset, &modseq, &error) == 1);
+ test_assert(modseq == max_modseq);
+ /* do some random testing with cache */
+ for (unsigned int i = 0; i < LOG_FILE_MODSEQ_CACHE_SIZE*10; i++) {
+ modseq = i_rand_minmax(1, max_modseq);
+ test_assert(mail_transaction_log_file_get_highest_modseq_at(
+ file, modseq_next_offset[modseq], &modseq_at, &error) == 1);
+ test_assert(modseq_at == modseq);
+ test_assert(mail_transaction_log_file_get_highest_modseq_at(
+ file, modseq_alt_next_offset[modseq], &modseq_at, &error) == 1);
+ test_assert(modseq_at == modseq);
+ }
+ /* go through all modseqs - do this after randomness testing or
+ modseq_alt_next_offset[] matching isn't triggered */
+ for (modseq = 1; modseq <= max_modseq; modseq++) {
+ test_assert(mail_transaction_log_file_get_highest_modseq_at(
+ file, modseq_next_offset[modseq], &modseq_at, &error) == 1);
+ test_assert(modseq_at == modseq);
+ }
+
+ mail_index_close(index);
+ mail_index_free(&index);
+ test_end();
+}
+
+static void
+test_mail_transaction_log_file_get_modseq_next_offset_inconsistency(void)
+{
+ test_begin("mail_transaction_log_file_get_modseq_next_offset() inconsistency");
+
+ struct mail_index *index = test_mail_index_open();
+ struct mail_transaction_log_file *file = index->log->head;
+ uint32_t seq;
+
+ /* add modseq=2 */
+ struct mail_index_view *view = mail_index_view_open(index);
+ struct mail_index_transaction *trans =
+ mail_index_transaction_begin(view, 0);
+ mail_index_append(trans, 1, &seq);
+ test_assert(mail_index_transaction_commit(&trans) == 0);
+ mail_index_view_close(&view);
+
+ /* emulate a broken mail_index_modseq_header header */
+ file->sync_highest_modseq = 3;
+
+ uoff_t next_offset;
+ test_expect_error_string("Transaction log modseq tracking is corrupted");
+ test_assert(mail_transaction_log_file_get_modseq_next_offset(file, 2, &next_offset) == 0);
+ test_expect_no_more_errors();
+ test_assert(next_offset == file->sync_offset);
+
+ mail_index_close(index);
+ mail_index_free(&index);
+ test_end();
+}
+
+int main(void)
+{
+ static void (*const test_functions[])(void) = {
+ test_mail_transaction_update_modseq,
+ test_mail_transaction_log_file_modseq_offsets,
+ test_mail_transaction_log_file_get_modseq_next_offset_inconsistency,
+ NULL
+ };
+ ioloop_time = 1;
+ return test_run(test_functions);
+}
diff --git a/src/lib-index/test-mail-transaction-log-view.c b/src/lib-index/test-mail-transaction-log-view.c
new file mode 100644
index 0000000..17a7628
--- /dev/null
+++ b/src/lib-index/test-mail-transaction-log-view.c
@@ -0,0 +1,268 @@
+/* Copyright (c) 2009-2018 Dovecot authors, see the included COPYING file */
+
+#include "lib.h"
+#include "array.h"
+#include "test-common.h"
+#include "mail-index-private.h"
+#include "mail-transaction-log-view-private.h"
+
+static struct mail_transaction_log *log;
+static struct mail_transaction_log_view *view;
+static bool clean_refcount0_files = FALSE;
+
+static void
+test_transaction_log_file_add(uint32_t file_seq)
+{
+ struct mail_transaction_log_file **p, *file;
+
+ file = i_new(struct mail_transaction_log_file, 1);
+ file->hdr.file_seq = file_seq;
+ file->hdr.hdr_size = file->sync_offset = sizeof(file->hdr);
+ file->hdr.prev_file_seq = file_seq - 1;
+ file->hdr.prev_file_offset = (uint32_t)-1;
+ file->log = log;
+ file->fd = -1;
+ file->buffer = buffer_create_dynamic(default_pool, 256);
+ file->buffer_offset = file->hdr.hdr_size;
+
+ /* files must be sorted by file_seq */
+ for (p = &log->files; *p != NULL; p = &(*p)->next) {
+ if ((*p)->hdr.file_seq > file->hdr.file_seq) {
+ file->next = *p;
+ break;
+ }
+ }
+ *p = file;
+ log->head = file;
+}
+
+void mail_index_set_error(struct mail_index *index ATTR_UNUSED,
+ const char *fmt ATTR_UNUSED, ...)
+{
+}
+
+void mail_transaction_log_file_set_corrupted(struct mail_transaction_log_file *file ATTR_UNUSED,
+ const char *fmt ATTR_UNUSED, ...)
+{
+}
+
+void mail_transaction_logs_clean(struct mail_transaction_log *log ATTR_UNUSED)
+{
+}
+
+int mail_transaction_log_find_file(struct mail_transaction_log *log,
+ uint32_t file_seq, bool nfs_flush ATTR_UNUSED,
+ struct mail_transaction_log_file **file_r,
+ const char **reason_r)
+{
+ struct mail_transaction_log_file *file, *next;
+
+ for (file = log->files; file != NULL; file = next) {
+ next = file->next;
+ if (file->hdr.file_seq == file_seq) {
+ *file_r = file;
+ return 1;
+ }
+ /* refcount=0 files at the beginning of the list may be freed */
+ if (file->refcount == 0 && file == log->files &&
+ clean_refcount0_files)
+ log->files = next;
+ }
+ if (clean_refcount0_files && file_seq == 4) {
+ /* "clean refcount=0 files" test autocreates this file */
+ test_transaction_log_file_add(4);
+ *file_r = log->head;
+ return 1;
+ }
+ *reason_r = "not found";
+ return 0;
+}
+
+int mail_transaction_log_file_map(struct mail_transaction_log_file *file ATTR_UNUSED,
+ uoff_t start_offset ATTR_UNUSED, uoff_t end_offset ATTR_UNUSED,
+ const char **reason_r ATTR_UNUSED)
+{
+ return 1;
+}
+
+int mail_transaction_log_file_get_highest_modseq_at(
+ struct mail_transaction_log_file *file ATTR_UNUSED,
+ uoff_t offset ATTR_UNUSED, uint64_t *highest_modseq_r,
+ const char **error_r ATTR_UNUSED)
+{
+ *highest_modseq_r = 0;
+ return 1;
+}
+
+void mail_transaction_update_modseq(const struct mail_transaction_header *hdr ATTR_UNUSED,
+ const void *data ATTR_UNUSED,
+ uint64_t *cur_modseq,
+ unsigned int version ATTR_UNUSED)
+{
+ *cur_modseq += 1;
+}
+
+static bool view_is_file_refed(uint32_t file_seq)
+{
+ struct mail_transaction_log_file *const *files;
+ unsigned int i, count;
+ bool ret = FALSE;
+
+ files = array_get(&view->file_refs, &count);
+ for (i = 0; i < count; i++) {
+ if (files[i]->hdr.file_seq == file_seq) {
+ i_assert(!ret); /* could be a test too.. */
+ ret = TRUE;
+ }
+ }
+ return ret;
+}
+
+static size_t
+add_append_record(struct mail_transaction_log_file *file,
+ const struct mail_index_record *rec)
+{
+ struct mail_transaction_header hdr;
+ size_t size;
+
+ i_zero(&hdr);
+ hdr.type = MAIL_TRANSACTION_APPEND | MAIL_TRANSACTION_EXTERNAL;
+ hdr.size = mail_index_uint32_to_offset(sizeof(hdr) + sizeof(*rec));
+
+ buffer_append(file->buffer, &hdr, sizeof(hdr));
+ buffer_append(file->buffer, rec, sizeof(*rec));
+
+ size = sizeof(hdr) + sizeof(*rec);
+ file->sync_offset += size;
+ return size;
+}
+
+static void test_mail_transaction_log_view(void)
+{
+ const struct mail_transaction_header *hdr;
+ const struct mail_index_record *rec;
+ struct mail_index_record append_rec;
+ const void *data;
+ void *oldfile;
+ uint32_t seq;
+ uoff_t offset, last_log_size;
+ const char *reason;
+ bool reset;
+
+ test_begin("init");
+ log = i_new(struct mail_transaction_log, 1);
+ log->index = i_new(struct mail_index, 1);
+ log->index->log = log;
+ log->index->log_sync_locked = TRUE;
+ test_transaction_log_file_add(1);
+ test_transaction_log_file_add(2);
+ test_transaction_log_file_add(3);
+
+ /* add an append record to the 3rd log file */
+ i_zero(&append_rec);
+ append_rec.uid = 1;
+
+ last_log_size = sizeof(struct mail_transaction_log_header) +
+ add_append_record(log->head, &append_rec);
+
+ view = mail_transaction_log_view_open(log);
+ i_assert(view != NULL);
+ test_assert(log->views == view &&
+ !view_is_file_refed(1) && !view_is_file_refed(2) &&
+ view_is_file_refed(3));
+ test_end();
+
+ /* we have files 1-3 opened */
+ test_begin("set all");
+ test_assert(mail_transaction_log_view_set(view, 0, 0, (uint32_t)-1, UOFF_T_MAX, &reset, &reason) == 1 &&
+ reset && view_is_file_refed(1) && view_is_file_refed(2) &&
+ view_is_file_refed(3) &&
+ !mail_transaction_log_view_is_corrupted(view));
+ mail_transaction_log_view_get_prev_pos(view, &seq, &offset);
+ test_assert(seq == 1 && offset == sizeof(struct mail_transaction_log_header));
+ test_assert(mail_transaction_log_view_next(view, &hdr, &data) == 1);
+ test_assert(hdr->type == (MAIL_TRANSACTION_APPEND | MAIL_TRANSACTION_EXTERNAL));
+ rec = data;
+ test_assert(memcmp(rec, &append_rec, sizeof(*rec)) == 0);
+ test_assert(mail_transaction_log_view_next(view, &hdr, &data) == 0);
+ test_assert(mail_transaction_log_view_is_last(view));
+ mail_transaction_log_view_get_prev_pos(view, &seq, &offset);
+ test_assert(seq == 3 && offset == last_log_size);
+ test_end();
+
+ test_begin("set first");
+ test_assert(mail_transaction_log_view_set(view, 0, 0, 0, 0, &reset, &reason) == 1);
+ mail_transaction_log_view_get_prev_pos(view, &seq, &offset);
+ test_assert(seq == 1 && offset == sizeof(struct mail_transaction_log_header));
+ test_assert(mail_transaction_log_view_next(view, &hdr, &data) == 0);
+ mail_transaction_log_view_get_prev_pos(view, &seq, &offset);
+ test_assert(seq == 1 && offset == sizeof(struct mail_transaction_log_header));
+ test_end();
+
+ test_begin("set end");
+ test_assert(mail_transaction_log_view_set(view, 3, last_log_size, (uint32_t)-1, UOFF_T_MAX, &reset, &reason) == 1);
+ mail_transaction_log_view_get_prev_pos(view, &seq, &offset);
+ test_assert(seq == 3 && offset == last_log_size);
+ test_assert(mail_transaction_log_view_next(view, &hdr, &data) == 0);
+ mail_transaction_log_view_get_prev_pos(view, &seq, &offset);
+ test_assert(seq == 3 && offset == last_log_size);
+ test_end();
+
+ test_begin("log clear");
+ mail_transaction_log_view_clear(view, 2);
+ test_assert(!view_is_file_refed(1) && view_is_file_refed(2) &&
+ view_is_file_refed(3));
+ oldfile = log->files;
+ buffer_free(&log->files->buffer);
+ log->files = log->files->next;
+ i_free(oldfile);
+ test_assert(log->files->hdr.file_seq == 2);
+ test_end();
+
+ /* --- first file has been removed --- */
+
+ test_begin("set 2-3");
+ test_assert(mail_transaction_log_view_set(view, 2, 0, (uint32_t)-1, UOFF_T_MAX, &reset, &reason) == 1);
+ test_end();
+
+ test_begin("missing log handing");
+ test_assert(mail_transaction_log_view_set(view, 0, 0, (uint32_t)-1, UOFF_T_MAX, &reset, &reason) == 0);
+ test_end();
+
+ test_begin("closed log handling");
+ view->log = NULL;
+ test_assert(mail_transaction_log_view_set(view, 0, 0, (uint32_t)-1, UOFF_T_MAX, &reset, &reason) == 0);
+ view->log = log;
+ test_end();
+
+ test_begin("clean refcount=0 files");
+ oldfile = log->files;
+ /* clear all references */
+ mail_transaction_log_view_clear(view, 0);
+ clean_refcount0_files = TRUE;
+ /* create a new file during mail_transaction_log_view_set(), which
+ triggers freeing any unreferenced files. */
+ test_assert(mail_transaction_log_view_set(view, 2, 0, 4, UOFF_T_MAX, &reset, &reason) == 1);
+ clean_refcount0_files = FALSE;
+ log->files = oldfile;
+ test_end();
+
+ mail_transaction_log_view_close(&view);
+ i_free(log->index);
+ while (log->files != NULL) {
+ oldfile = log->files;
+ buffer_free(&log->files->buffer);
+ log->files = log->files->next;
+ i_free(oldfile);
+ }
+ i_free(log);
+}
+
+int main(void)
+{
+ static void (*const test_functions[])(void) = {
+ test_mail_transaction_log_view,
+ NULL
+ };
+ return test_run(test_functions);
+}