summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 08:06:26 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 08:06:26 +0000
commit1660d4b7a65d9ad2ce0deaa19d35579ca4084ac5 (patch)
tree6cf8220b628ebd2ccfc1375dd6516c6996e9abcc /lib
parentInitial commit. (diff)
downloadcryptsetup-1660d4b7a65d9ad2ce0deaa19d35579ca4084ac5.tar.xz
cryptsetup-1660d4b7a65d9ad2ce0deaa19d35579ca4084ac5.zip
Adding upstream version 2:2.6.1.upstream/2%2.6.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/Makemodule.am111
-rw-r--r--lib/bitlk/bitlk.c1460
-rw-r--r--lib/bitlk/bitlk.h148
-rw-r--r--lib/bitops.h123
-rw-r--r--lib/crypt_plain.c117
-rw-r--r--lib/crypto_backend/Makemodule.am41
-rw-r--r--lib/crypto_backend/argon2/LICENSE30
-rw-r--r--lib/crypto_backend/argon2/Makemodule.am30
-rw-r--r--lib/crypto_backend/argon2/README5
-rw-r--r--lib/crypto_backend/argon2/argon2.c458
-rw-r--r--lib/crypto_backend/argon2/argon2.h437
-rw-r--r--lib/crypto_backend/argon2/blake2/blake2-impl.h154
-rw-r--r--lib/crypto_backend/argon2/blake2/blake2.h89
-rw-r--r--lib/crypto_backend/argon2/blake2/blake2b.c392
-rw-r--r--lib/crypto_backend/argon2/blake2/blamka-round-opt.h471
-rw-r--r--lib/crypto_backend/argon2/blake2/blamka-round-ref.h56
-rw-r--r--lib/crypto_backend/argon2/core.c646
-rw-r--r--lib/crypto_backend/argon2/core.h228
-rw-r--r--lib/crypto_backend/argon2/encoding.c462
-rw-r--r--lib/crypto_backend/argon2/encoding.h57
-rw-r--r--lib/crypto_backend/argon2/opt.c283
-rw-r--r--lib/crypto_backend/argon2/ref.c194
-rw-r--r--lib/crypto_backend/argon2/thread.c49
-rw-r--r--lib/crypto_backend/argon2/thread.h62
-rw-r--r--lib/crypto_backend/argon2_generic.c79
-rw-r--r--lib/crypto_backend/base64.c276
-rw-r--r--lib/crypto_backend/cipher_check.c161
-rw-r--r--lib/crypto_backend/cipher_generic.c89
-rw-r--r--lib/crypto_backend/crc32.c183
-rw-r--r--lib/crypto_backend/crypto_backend.h161
-rw-r--r--lib/crypto_backend/crypto_backend_internal.h75
-rw-r--r--lib/crypto_backend/crypto_cipher_kernel.c351
-rw-r--r--lib/crypto_backend/crypto_gcrypt.c573
-rw-r--r--lib/crypto_backend/crypto_kernel.c428
-rw-r--r--lib/crypto_backend/crypto_nettle.c460
-rw-r--r--lib/crypto_backend/crypto_nss.c407
-rw-r--r--lib/crypto_backend/crypto_openssl.c849
-rw-r--r--lib/crypto_backend/crypto_storage.c347
-rw-r--r--lib/crypto_backend/pbkdf2_generic.c232
-rw-r--r--lib/crypto_backend/pbkdf_check.c443
-rw-r--r--lib/crypto_backend/utf8.c288
-rw-r--r--lib/fvault2/fvault2.c1057
-rw-r--r--lib/fvault2/fvault2.h80
-rw-r--r--lib/integrity/integrity.c402
-rw-r--r--lib/integrity/integrity.h101
-rw-r--r--lib/internal.h253
-rw-r--r--lib/keyslot_context.c488
-rw-r--r--lib/keyslot_context.h111
-rw-r--r--lib/libcryptsetup.h2881
-rw-r--r--lib/libcryptsetup.pc.in11
-rw-r--r--lib/libcryptsetup.sym167
-rw-r--r--lib/libcryptsetup_macros.h70
-rw-r--r--lib/libcryptsetup_symver.h103
-rw-r--r--lib/libdevmapper.c3181
-rw-r--r--lib/loopaes/loopaes.c253
-rw-r--r--lib/loopaes/loopaes.h46
-rw-r--r--lib/luks1/af.c170
-rw-r--r--lib/luks1/af.h67
-rw-r--r--lib/luks1/keyencryption.c268
-rw-r--r--lib/luks1/keymanage.c1300
-rw-r--r--lib/luks1/luks.h194
-rw-r--r--lib/luks2/luks2.h497
-rw-r--r--lib/luks2/luks2_digest.c455
-rw-r--r--lib/luks2/luks2_digest_pbkdf2.c210
-rw-r--r--lib/luks2/luks2_disk_metadata.c811
-rw-r--r--lib/luks2/luks2_internal.h388
-rw-r--r--lib/luks2/luks2_json_format.c411
-rw-r--r--lib/luks2/luks2_json_metadata.c2874
-rw-r--r--lib/luks2/luks2_keyslot.c977
-rw-r--r--lib/luks2/luks2_keyslot_luks2.c821
-rw-r--r--lib/luks2/luks2_keyslot_reenc.c752
-rw-r--r--lib/luks2/luks2_luks1_convert.c945
-rw-r--r--lib/luks2/luks2_reencrypt.c4375
-rw-r--r--lib/luks2/luks2_reencrypt_digest.c410
-rw-r--r--lib/luks2/luks2_segment.c426
-rw-r--r--lib/luks2/luks2_token.c1043
-rw-r--r--lib/luks2/luks2_token_keyring.c144
-rw-r--r--lib/nls.h34
-rw-r--r--lib/random.c244
-rw-r--r--lib/setup.c6564
-rw-r--r--lib/tcrypt/tcrypt.c1136
-rw-r--r--lib/tcrypt/tcrypt.h120
-rw-r--r--lib/utils.c334
-rw-r--r--lib/utils_benchmark.c218
-rw-r--r--lib/utils_blkid.c347
-rw-r--r--lib/utils_blkid.h69
-rw-r--r--lib/utils_crypt.c347
-rw-r--r--lib/utils_crypt.h54
-rw-r--r--lib/utils_device.c1089
-rw-r--r--lib/utils_device_locking.c520
-rw-r--r--lib/utils_device_locking.h49
-rw-r--r--lib/utils_devpath.c459
-rw-r--r--lib/utils_dm.h246
-rw-r--r--lib/utils_io.c299
-rw-r--r--lib/utils_io.h43
-rw-r--r--lib/utils_keyring.c237
-rw-r--r--lib/utils_keyring.h55
-rw-r--r--lib/utils_loop.c331
-rw-r--r--lib/utils_loop.h34
-rw-r--r--lib/utils_pbkdf.c333
-rw-r--r--lib/utils_safe_memory.c122
-rw-r--r--lib/utils_storage_wrappers.c394
-rw-r--r--lib/utils_storage_wrappers.h75
-rw-r--r--lib/utils_wipe.c311
-rw-r--r--lib/verity/rs.h63
-rw-r--r--lib/verity/rs_decode_char.c201
-rw-r--r--lib/verity/rs_encode_char.c173
-rw-r--r--lib/verity/verity.c416
-rw-r--r--lib/verity/verity.h87
-rw-r--r--lib/verity/verity_fec.c336
-rw-r--r--lib/verity/verity_hash.c444
-rw-r--r--lib/volumekey.c147
112 files changed, 54178 insertions, 0 deletions
diff --git a/lib/Makemodule.am b/lib/Makemodule.am
new file mode 100644
index 0000000..2e60a90
--- /dev/null
+++ b/lib/Makemodule.am
@@ -0,0 +1,111 @@
+pkgconfigdir = $(libdir)/pkgconfig
+pkgconfig_DATA += lib/libcryptsetup.pc
+
+lib_LTLIBRARIES += libcryptsetup.la
+
+noinst_LTLIBRARIES += libutils_io.la
+
+include_HEADERS += lib/libcryptsetup.h
+
+EXTRA_DIST += lib/libcryptsetup.pc.in lib/libcryptsetup.sym
+
+libutils_io_la_CFLAGS = $(AM_CFLAGS)
+
+libutils_io_la_SOURCES = \
+ lib/utils_io.c \
+ lib/utils_io.h
+
+libcryptsetup_la_CPPFLAGS = $(AM_CPPFLAGS)
+
+libcryptsetup_la_DEPENDENCIES = libutils_io.la libcrypto_backend.la lib/libcryptsetup.sym
+
+libcryptsetup_la_LDFLAGS = $(AM_LDFLAGS) -no-undefined \
+ -Wl,--version-script=$(top_srcdir)/lib/libcryptsetup.sym \
+ -version-info @LIBCRYPTSETUP_VERSION_INFO@
+
+libcryptsetup_la_CFLAGS = $(AM_CFLAGS) @CRYPTO_CFLAGS@
+
+libcryptsetup_la_LIBADD = \
+ @UUID_LIBS@ \
+ @DEVMAPPER_LIBS@ \
+ @CRYPTO_LIBS@ \
+ @LIBARGON2_LIBS@ \
+ @JSON_C_LIBS@ \
+ @BLKID_LIBS@ \
+ @DL_LIBS@ \
+ $(LTLIBINTL) \
+ libcrypto_backend.la \
+ libutils_io.la
+
+libcryptsetup_la_SOURCES = \
+ lib/setup.c \
+ lib/internal.h \
+ lib/bitops.h \
+ lib/nls.h \
+ lib/libcryptsetup.h \
+ lib/libcryptsetup_macros.h \
+ lib/libcryptsetup_symver.h \
+ lib/utils.c \
+ lib/utils_benchmark.c \
+ lib/utils_crypt.c \
+ lib/utils_crypt.h \
+ lib/utils_loop.c \
+ lib/utils_loop.h \
+ lib/utils_devpath.c \
+ lib/utils_wipe.c \
+ lib/utils_device.c \
+ lib/utils_keyring.c \
+ lib/utils_keyring.h \
+ lib/utils_device_locking.c \
+ lib/utils_device_locking.h \
+ lib/utils_pbkdf.c \
+ lib/utils_safe_memory.c \
+ lib/utils_storage_wrappers.c \
+ lib/utils_storage_wrappers.h \
+ lib/libdevmapper.c \
+ lib/utils_dm.h \
+ lib/volumekey.c \
+ lib/random.c \
+ lib/crypt_plain.c \
+ lib/integrity/integrity.h \
+ lib/integrity/integrity.c \
+ lib/loopaes/loopaes.h \
+ lib/loopaes/loopaes.c \
+ lib/tcrypt/tcrypt.h \
+ lib/tcrypt/tcrypt.c \
+ lib/keyslot_context.h \
+ lib/keyslot_context.c \
+ lib/luks1/af.h \
+ lib/luks1/af.c \
+ lib/luks1/keyencryption.c \
+ lib/luks1/keymanage.c \
+ lib/luks1/luks.h \
+ lib/verity/verity_hash.c \
+ lib/verity/verity_fec.c \
+ lib/verity/verity.c \
+ lib/verity/verity.h \
+ lib/verity/rs_encode_char.c \
+ lib/verity/rs_decode_char.c \
+ lib/verity/rs.h \
+ lib/luks2/luks2_disk_metadata.c \
+ lib/luks2/luks2_json_format.c \
+ lib/luks2/luks2_json_metadata.c \
+ lib/luks2/luks2_luks1_convert.c \
+ lib/luks2/luks2_digest.c \
+ lib/luks2/luks2_digest_pbkdf2.c \
+ lib/luks2/luks2_keyslot.c \
+ lib/luks2/luks2_keyslot_luks2.c \
+ lib/luks2/luks2_keyslot_reenc.c \
+ lib/luks2/luks2_reencrypt.c \
+ lib/luks2/luks2_reencrypt_digest.c \
+ lib/luks2/luks2_segment.c \
+ lib/luks2/luks2_token_keyring.c \
+ lib/luks2/luks2_token.c \
+ lib/luks2/luks2_internal.h \
+ lib/luks2/luks2.h \
+ lib/utils_blkid.c \
+ lib/utils_blkid.h \
+ lib/bitlk/bitlk.h \
+ lib/bitlk/bitlk.c \
+ lib/fvault2/fvault2.h \
+ lib/fvault2/fvault2.c
diff --git a/lib/bitlk/bitlk.c b/lib/bitlk/bitlk.c
new file mode 100644
index 0000000..de7bcea
--- /dev/null
+++ b/lib/bitlk/bitlk.c
@@ -0,0 +1,1460 @@
+/*
+ * BITLK (BitLocker-compatible) volume handling
+ *
+ * Copyright (C) 2019-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2019-2023 Milan Broz
+ * Copyright (C) 2019-2023 Vojtech Trefny
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <uuid/uuid.h>
+#include <time.h>
+#include <limits.h>
+
+#include "bitlk.h"
+#include "internal.h"
+
+#define BITLK_BOOTCODE_V1 "\xeb\x52\x90"
+#define BITLK_BOOTCODE_V2 "\xeb\x58\x90"
+#define BITLK_SIGNATURE "-FVE-FS-"
+#define BITLK_SIGNATURE_TOGO "MSWIN4.1"
+#define BITLK_HEADER_METADATA_OFFSET 160
+#define BITLK_HEADER_METADATA_OFFSET_TOGO 424
+
+/* FVE metadata header is split into two parts */
+#define BITLK_FVE_METADATA_BLOCK_HEADER_LEN 64
+#define BITLK_FVE_METADATA_HEADER_LEN 48
+#define BITLK_FVE_METADATA_HEADERS_LEN BITLK_FVE_METADATA_BLOCK_HEADER_LEN + BITLK_FVE_METADATA_HEADER_LEN
+
+/* total size of the FVE area (64 KiB) */
+#define BITLK_FVE_METADATA_SIZE 64 * 1024
+
+#define BITLK_ENTRY_HEADER_LEN 8
+#define BITLK_VMK_HEADER_LEN 28
+
+#define BITLK_OPEN_KEY_METADATA_LEN 12
+
+#define BITLK_RECOVERY_KEY_LEN 55
+#define BITLK_RECOVERY_PARTS 8
+#define BITLK_RECOVERY_PART_LEN 6
+
+#define BITLK_BEK_FILE_HEADER_LEN 48
+#define BITLK_STARTUP_KEY_HEADER_LEN 24
+
+#define BITLK_KDF_HASH "sha256"
+#define BITLK_KDF_ITERATION_COUNT 0x100000
+
+/* maximum number of segments for the DM device */
+#define MAX_BITLK_SEGMENTS 10
+
+/* January 1, 1970 as MS file time */
+#define EPOCH_AS_FILETIME 116444736000000000
+#define HUNDREDS_OF_NANOSECONDS 10000000
+
+/* not available in older version of libuuid */
+#ifndef UUID_STR_LEN
+#define UUID_STR_LEN 37
+#endif
+
+/* known types of GUIDs from the BITLK superblock */
+const uint8_t BITLK_GUID_NORMAL[16] = { 0x3b, 0xd6, 0x67, 0x49, 0x29, 0x2e, 0xd8, 0x4a,
+ 0x83, 0x99, 0xf6, 0xa3, 0x39, 0xe3, 0xd0, 0x01 };
+const uint8_t BITLK_GUID_EOW[16] = { 0x3b, 0x4d, 0xa8, 0x92, 0x80, 0xdd, 0x0e, 0x4d,
+ 0x9e, 0x4e, 0xb1, 0xe3, 0x28, 0x4e, 0xae, 0xd8 };
+
+/* taken from libfdisk gpt.c -- TODO: this is a good candidate for adding to libuuid */
+struct bitlk_guid {
+ uint32_t time_low;
+ uint16_t time_mid;
+ uint16_t time_hi_and_version;
+ uint8_t clock_seq_hi;
+ uint8_t clock_seq_low;
+ uint8_t node[6];
+} __attribute__ ((packed));
+
+static void swap_guid(struct bitlk_guid *guid) {
+ guid->time_low = swab32(guid->time_low);
+ guid->time_mid = swab16(guid->time_mid);
+ guid->time_hi_and_version = swab16(guid->time_hi_and_version);
+}
+
+static void guid_to_string(struct bitlk_guid *guid, char *out) {
+ swap_guid(guid);
+ uuid_unparse((unsigned char *) guid, out);
+}
+
+typedef enum {
+ BITLK_SEGTYPE_CRYPT,
+ BITLK_SEGTYPE_ZERO,
+} BitlkSegmentType;
+
+struct segment {
+ uint64_t offset;
+ uint64_t length;
+ uint64_t iv_offset;
+ BitlkSegmentType type;
+};
+
+struct bitlk_signature {
+ uint8_t boot_code[3];
+ uint8_t signature[8];
+ uint16_t sector_size;
+} __attribute__ ((packed));
+
+struct bitlk_superblock {
+ struct bitlk_guid guid;
+ uint64_t fve_offset[3];
+} __attribute__ ((packed));
+
+struct bitlk_fve_metadata {
+ /* FVE metadata block header */
+ uint8_t signature[8];
+ uint16_t fve_size;
+ uint16_t fve_version;
+ uint16_t curr_state;
+ uint16_t next_state;
+ uint64_t volume_size;
+ uint32_t unknown2;
+ uint32_t volume_header_size;
+ uint64_t fve_offset[3];
+ uint64_t volume_header_offset;
+ /* FVE metadata header */
+ uint32_t metadata_size;
+ uint32_t metadata_version;
+ uint32_t metadata_header_size;
+ uint32_t metada_size_copy;
+ struct bitlk_guid guid;
+ uint32_t next_nonce;
+ uint16_t encryption;
+ uint16_t unknown3;
+ uint64_t creation_time;
+} __attribute__ ((packed));
+
+struct bitlk_entry_header_block {
+ uint64_t offset;
+ uint64_t size;
+} __attribute__ ((packed));
+
+struct bitlk_entry_vmk {
+ struct bitlk_guid guid;
+ uint8_t modified[8];
+ uint16_t _unknown;
+ uint16_t protection;
+} __attribute__ ((packed));
+
+struct bitlk_kdf_data {
+ char last_sha256[32];
+ char initial_sha256[32];
+ char salt[16];
+ uint64_t count;
+};
+
+struct bitlk_bek_header {
+ uint32_t metadata_size;
+ uint32_t metadata_version;
+ uint32_t metadata_header_size;
+ uint32_t metada_size_copy;
+ struct bitlk_guid guid;
+ uint32_t next_nonce;
+ uint16_t encryption;
+ uint16_t unknown;
+ uint64_t creation_time;
+} __attribute__ ((packed));
+
+static BITLKVMKProtection get_vmk_protection(uint16_t protection)
+{
+ switch (protection) {
+ case 0x0000:
+ return BITLK_PROTECTION_CLEAR_KEY;
+ case 0x0100:
+ return BITLK_PROTECTION_TPM;
+ case 0x0200:
+ return BITLK_PROTECTION_STARTUP_KEY;
+ case 0x0500:
+ return BITLK_PROTECTION_TPM_PIN;
+ case 0x0800:
+ return BITLK_PROTECTION_RECOVERY_PASSPHRASE;
+ case 0x1000:
+ return BITLK_PROTECTION_SMART_CARD;
+ case 0x2000:
+ return BITLK_PROTECTION_PASSPHRASE;
+ default:
+ return BITLK_PROTECTION_UNKNOWN;
+ }
+}
+
+static const char* get_vmk_protection_string(BITLKVMKProtection protection)
+{
+ switch (protection) {
+ case BITLK_PROTECTION_CLEAR_KEY:
+ return "VMK protected with clear key";
+ case BITLK_PROTECTION_TPM:
+ return "VMK protected with TPM";
+ case BITLK_PROTECTION_STARTUP_KEY:
+ return "VMK protected with startup key";
+ case BITLK_PROTECTION_TPM_PIN:
+ return "VMK protected with TPM and PIN";
+ case BITLK_PROTECTION_PASSPHRASE:
+ return "VMK protected with passphrase";
+ case BITLK_PROTECTION_RECOVERY_PASSPHRASE:
+ return "VMK protected with recovery passphrase";
+ case BITLK_PROTECTION_SMART_CARD:
+ return "VMK protected with smart card";
+ default:
+ return "VMK with unknown protection";
+ }
+}
+
+static const char* get_bitlk_type_string(BITLKEncryptionType type)
+{
+ switch (type)
+ {
+ case BITLK_ENCRYPTION_TYPE_NORMAL:
+ return "normal";
+ case BITLK_ENCRYPTION_TYPE_EOW:
+ return "encrypt-on-write";
+ default:
+ return "unknown";
+ }
+}
+
+static uint64_t filetime_to_unixtime(uint64_t time)
+{
+ return (time - EPOCH_AS_FILETIME) / HUNDREDS_OF_NANOSECONDS;
+}
+
+static int parse_vmk_entry(struct crypt_device *cd, uint8_t *data, int start, int end, struct bitlk_vmk **vmk)
+{
+ uint16_t key_entry_size = 0;
+ uint16_t key_entry_type = 0;
+ uint16_t key_entry_value = 0;
+ size_t key_size = 0;
+ char *string = NULL;
+ const char *key = NULL;
+ struct volume_key *vk = NULL;
+ bool supported = false;
+ int r = 0;
+
+ /* only passphrase or recovery passphrase vmks are supported (can be used to activate) */
+ supported = (*vmk)->protection == BITLK_PROTECTION_PASSPHRASE ||
+ (*vmk)->protection == BITLK_PROTECTION_RECOVERY_PASSPHRASE ||
+ (*vmk)->protection == BITLK_PROTECTION_STARTUP_KEY;
+
+ while ((end - start) >= (ssize_t)(sizeof(key_entry_size) + sizeof(key_entry_type) + sizeof(key_entry_value))) {
+ /* size of this entry */
+ memcpy(&key_entry_size, data + start, sizeof(key_entry_size));
+ key_entry_size = le16_to_cpu(key_entry_size);
+ if (key_entry_size == 0)
+ break;
+
+ if (key_entry_size > (end - start))
+ return -EINVAL;
+
+ /* type and value of this entry */
+ memcpy(&key_entry_type, data + start + sizeof(key_entry_size), sizeof(key_entry_type));
+ memcpy(&key_entry_value,
+ data + start + sizeof(key_entry_size) + sizeof(key_entry_type),
+ sizeof(key_entry_value));
+ key_entry_type = le16_to_cpu(key_entry_type);
+ key_entry_value = le16_to_cpu(key_entry_value);
+
+ if (key_entry_type != BITLK_ENTRY_TYPE_PROPERTY) {
+ if (supported) {
+ log_err(cd, _("Unexpected metadata entry type '%u' found when parsing supported Volume Master Key."), key_entry_type);
+ return -EINVAL;
+ } else {
+ log_dbg(cd, "Unexpected metadata entry type '%u' found when parsing unsupported VMK.", key_entry_type);
+ }
+ }
+
+ /* stretch key with salt, skip 4 B (encryption method of the stretch key) */
+ if (key_entry_value == BITLK_ENTRY_VALUE_STRETCH_KEY) {
+ if ((end - start) < (BITLK_ENTRY_HEADER_LEN + BITLK_SALT_SIZE + 4))
+ return -EINVAL;
+ memcpy((*vmk)->salt,
+ data + start + BITLK_ENTRY_HEADER_LEN + 4,
+ BITLK_SALT_SIZE);
+ /* AES-CCM encrypted key */
+ } else if (key_entry_value == BITLK_ENTRY_VALUE_ENCRYPTED_KEY) {
+ if (key_entry_size < (BITLK_ENTRY_HEADER_LEN + BITLK_NONCE_SIZE + BITLK_VMK_MAC_TAG_SIZE))
+ return -EINVAL;
+ /* nonce */
+ memcpy((*vmk)->nonce,
+ data + start + BITLK_ENTRY_HEADER_LEN,
+ BITLK_NONCE_SIZE);
+ /* MAC tag */
+ memcpy((*vmk)->mac_tag,
+ data + start + BITLK_ENTRY_HEADER_LEN + BITLK_NONCE_SIZE,
+ BITLK_VMK_MAC_TAG_SIZE);
+ /* AES-CCM encrypted key */
+ key_size = key_entry_size - (BITLK_ENTRY_HEADER_LEN + BITLK_NONCE_SIZE + BITLK_VMK_MAC_TAG_SIZE);
+ key = (const char *) data + start + BITLK_ENTRY_HEADER_LEN + BITLK_NONCE_SIZE + BITLK_VMK_MAC_TAG_SIZE;
+ vk = crypt_alloc_volume_key(key_size, key);
+ if (vk == NULL)
+ return -ENOMEM;
+ crypt_volume_key_add_next(&((*vmk)->vk), vk);
+ /* clear key for a partially decrypted volume */
+ } else if (key_entry_value == BITLK_ENTRY_VALUE_KEY) {
+ /* We currently don't want to support opening a partially decrypted
+ * device so we don't need to store this key.
+ *
+ * key_size = key_entry_size - (BITLK_ENTRY_HEADER_LEN + 4);
+ * key = (const char *) data + start + BITLK_ENTRY_HEADER_LEN + 4;
+ * vk = crypt_alloc_volume_key(key_size, key);
+ * if (vk == NULL)
+ * return -ENOMEM;
+ * crypt_volume_key_add_next(&((*vmk)->vk), vk);
+ */
+ log_dbg(cd, "Skipping clear key metadata entry.");
+ /* unknown timestamps in recovery protected VMK */
+ } else if (key_entry_value == BITLK_ENTRY_VALUE_RECOVERY_TIME) {
+ ;
+ } else if (key_entry_value == BITLK_ENTRY_VALUE_STRING) {
+ if (key_entry_size < BITLK_ENTRY_HEADER_LEN)
+ return -EINVAL;
+ string = malloc((key_entry_size - BITLK_ENTRY_HEADER_LEN) * 2 + 1);
+ if (!string)
+ return -ENOMEM;
+ r = crypt_utf16_to_utf8(&string, CONST_CAST(char16_t *)(data + start + BITLK_ENTRY_HEADER_LEN),
+ key_entry_size - BITLK_ENTRY_HEADER_LEN);
+ if (r < 0 || !string) {
+ free(string);
+ log_err(cd, _("Invalid string found when parsing Volume Master Key."));
+ return -EINVAL;
+ } else if ((*vmk)->name != NULL) {
+ if (supported) {
+ log_err(cd, _("Unexpected string ('%s') found when parsing supported Volume Master Key."), string);
+ free(string);
+ return -EINVAL;
+ }
+ log_dbg(cd, "Unexpected string ('%s') found when parsing unsupported VMK.", string);
+ free(string);
+ string = NULL;
+ } else {
+ /* Assume that strings in VMK are the name of the VMK */
+ (*vmk)->name = string;
+ string = NULL;
+ }
+ /* no idea what this is, lets hope it's not important */
+ } else if (key_entry_value == BITLK_ENTRY_VALUE_USE_KEY && (*vmk)->protection == BITLK_PROTECTION_STARTUP_KEY) {
+ ;
+ } else {
+ if (supported) {
+ log_err(cd, _("Unexpected metadata entry value '%u' found when parsing supported Volume Master Key."), key_entry_value);
+ return -EINVAL;
+ } else {
+ log_dbg(cd, "Unexpected metadata entry value '%u' found when parsing unsupported VMK.", key_entry_value);
+ }
+ }
+
+ start += key_entry_size;
+ }
+
+ return 0;
+}
+
+void BITLK_bitlk_fvek_free(struct bitlk_fvek *fvek)
+{
+ if (!fvek)
+ return;
+
+ crypt_free_volume_key(fvek->vk);
+ free(fvek);
+}
+
+void BITLK_bitlk_vmk_free(struct bitlk_vmk *vmk)
+{
+ struct bitlk_vmk *vmk_next = NULL;
+
+ while (vmk) {
+ if (vmk->guid)
+ free(vmk->guid);
+ if (vmk->name)
+ free(vmk->name);
+ crypt_free_volume_key(vmk->vk);
+ vmk_next = vmk->next;
+ free(vmk);
+ vmk = vmk_next;
+ }
+}
+
+void BITLK_bitlk_metadata_free(struct bitlk_metadata *metadata)
+{
+ if (!metadata)
+ return;
+
+ free(metadata->guid);
+ if (metadata->description)
+ free(metadata->description);
+ BITLK_bitlk_vmk_free(metadata->vmks);
+ BITLK_bitlk_fvek_free(metadata->fvek);
+}
+
+int BITLK_read_sb(struct crypt_device *cd, struct bitlk_metadata *params)
+{
+ int devfd;
+ struct device *device = crypt_metadata_device(cd);
+ struct bitlk_signature sig = {};
+ struct bitlk_superblock sb = {};
+ struct bitlk_fve_metadata fve = {};
+ struct bitlk_entry_vmk entry_vmk = {};
+ uint8_t *fve_entries = NULL;
+ size_t fve_entries_size = 0;
+ uint32_t fve_metadata_size = 0;
+ int fve_offset = 0;
+ char guid_buf[UUID_STR_LEN] = {0};
+ uint16_t entry_size = 0;
+ uint16_t entry_type = 0;
+ int i = 0;
+ int r = 0;
+ int start = 0;
+ size_t key_size = 0;
+ const char *key = NULL;
+ char *description = NULL;
+
+ struct bitlk_vmk *vmk = NULL;
+ struct bitlk_vmk *vmk_p = params->vmks;
+
+ devfd = device_open(cd, crypt_data_device(cd), O_RDONLY);
+ if (devfd < 0) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* read and check the signature */
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), &sig, sizeof(sig), 0) != sizeof(sig)) {
+ log_dbg(cd, "Failed to read BITLK signature from %s.", device_path(device));
+ r = -EIO;
+ goto out;
+ }
+
+ if (memcmp(sig.signature, BITLK_SIGNATURE, sizeof(sig.signature)) == 0) {
+ params->togo = false;
+ fve_offset = BITLK_HEADER_METADATA_OFFSET;
+ } else if (memcmp(sig.signature, BITLK_SIGNATURE_TOGO, sizeof(sig.signature)) == 0) {
+ params->togo = true;
+ fve_offset = BITLK_HEADER_METADATA_OFFSET_TOGO;
+ } else {
+ log_dbg(cd, "Invalid or unknown signature for BITLK device.");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (memcmp(sig.boot_code, BITLK_BOOTCODE_V1, sizeof(sig.boot_code)) == 0) {
+ log_err(cd, _("BITLK version 1 is currently not supported."));
+ r = -ENOTSUP;
+ goto out;
+ } else if (memcmp(sig.boot_code, BITLK_BOOTCODE_V2, sizeof(sig.boot_code)) == 0)
+ ;
+ else {
+ log_err(cd, _("Invalid or unknown boot signature for BITLK device."));
+ r = -EINVAL;
+ goto out;
+ }
+
+ params->sector_size = le16_to_cpu(sig.sector_size);
+ if (params->sector_size == 0) {
+ log_dbg(cd, "Got sector size 0, assuming 512.");
+ params->sector_size = SECTOR_SIZE;
+ }
+
+ if (!(params->sector_size == 512 || params->sector_size == 4096)) {
+ log_err(cd, _("Unsupported sector size %" PRIu16 "."), params->sector_size);
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* read GUID and FVE metadata offsets */
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), &sb, sizeof(sb), fve_offset) != sizeof(sb)) {
+ log_err(cd, _("Failed to read BITLK header from %s."), device_path(device));
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* get encryption "type" based on the GUID from BITLK superblock */
+ if (memcmp(&sb.guid, BITLK_GUID_NORMAL, 16) == 0)
+ params->type = BITLK_ENCRYPTION_TYPE_NORMAL;
+ else if (memcmp(&sb.guid, BITLK_GUID_EOW, 16) == 0)
+ params->type = BITLK_ENCRYPTION_TYPE_EOW;
+ else
+ params->type = BITLK_ENCRYPTION_TYPE_UNKNOWN;
+ log_dbg(cd, "BITLK type from GUID: %s.", get_bitlk_type_string(params->type));
+
+ for (i = 0; i < 3; i++)
+ params->metadata_offset[i] = le64_to_cpu(sb.fve_offset[i]);
+
+ log_dbg(cd, "Reading BITLK FVE metadata of size %zu on device %s, offset %" PRIu64 ".",
+ sizeof(fve), device_path(device), params->metadata_offset[0]);
+
+ /* read FVE metadata from the first metadata area */
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), &fve, sizeof(fve), params->metadata_offset[0]) != sizeof(fve) ||
+ memcmp(fve.signature, BITLK_SIGNATURE, sizeof(fve.signature)) ||
+ le16_to_cpu(fve.fve_version) != 2) {
+ log_err(cd, _("Failed to read BITLK FVE metadata from %s."), device_path(device));
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* check encryption state for the device */
+ params->state = true;
+ if (le16_to_cpu(fve.curr_state) != BITLK_STATE_NORMAL || le16_to_cpu(fve.next_state) != BITLK_STATE_NORMAL) {
+ params->state = false;
+ log_dbg(cd, "Unknown/unsupported state detected. Current state: %"PRIu16", next state: %"PRIu16".",
+ le16_to_cpu(fve.curr_state), le16_to_cpu(fve.next_state));
+ }
+
+ params->volume_size = le64_to_cpu(fve.volume_size);
+ params->metadata_version = le16_to_cpu(fve.fve_version);
+
+ switch (le16_to_cpu(fve.encryption)) {
+ /* AES-CBC with Elephant difuser */
+ case 0x8000:
+ params->key_size = 256;
+ params->cipher = "aes";
+ params->cipher_mode = "cbc-elephant";
+ break;
+ case 0x8001:
+ params->key_size = 512;
+ params->cipher = "aes";
+ params->cipher_mode = "cbc-elephant";
+ break;
+ /* AES-CBC */
+ case 0x8002:
+ params->key_size = 128;
+ params->cipher = "aes";
+ params->cipher_mode = "cbc-eboiv";
+ break;
+ case 0x8003:
+ params->key_size = 256;
+ params->cipher = "aes";
+ params->cipher_mode = "cbc-eboiv";
+ break;
+ /* AES-XTS */
+ case 0x8004:
+ params->key_size = 256;
+ params->cipher = "aes";
+ params->cipher_mode = "xts-plain64";
+ break;
+ case 0x8005:
+ params->key_size = 512;
+ params->cipher = "aes";
+ params->cipher_mode = "xts-plain64";
+ break;
+ default:
+ log_err(cd, _("Unknown or unsupported encryption type."));
+ params->key_size = 0;
+ params->cipher = NULL;
+ params->cipher_mode = NULL;
+ r = -ENOTSUP;
+ goto out;
+ };
+
+ /* device GUID */
+ guid_to_string(&fve.guid, guid_buf);
+ params->guid = strdup(guid_buf);
+ if (!params->guid) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ params->creation_time = filetime_to_unixtime(le64_to_cpu(fve.creation_time));
+
+ fve_metadata_size = le32_to_cpu(fve.metadata_size);
+ if (fve_metadata_size < (BITLK_FVE_METADATA_HEADER_LEN + sizeof(entry_size) + sizeof(entry_type)) ||
+ fve_metadata_size > BITLK_FVE_METADATA_SIZE) {
+ r = -EINVAL;
+ goto out;
+ }
+ fve_entries_size = fve_metadata_size - BITLK_FVE_METADATA_HEADER_LEN;
+
+ /* read and parse all FVE metadata entries */
+ fve_entries = malloc(fve_entries_size);
+ if (!fve_entries) {
+ r = -ENOMEM;
+ goto out;
+ }
+ memset(fve_entries, 0, fve_entries_size);
+
+ log_dbg(cd, "Reading BITLK FVE metadata entries of size %zu on device %s, offset %" PRIu64 ".",
+ fve_entries_size, device_path(device), params->metadata_offset[0] + BITLK_FVE_METADATA_HEADERS_LEN);
+
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), fve_entries, fve_entries_size,
+ params->metadata_offset[0] + BITLK_FVE_METADATA_HEADERS_LEN) != (ssize_t)fve_entries_size) {
+ log_err(cd, _("Failed to read BITLK metadata entries from %s."), device_path(device));
+ r = -EINVAL;
+ goto out;
+ }
+
+ while ((fve_entries_size - start) >= (sizeof(entry_size) + sizeof(entry_type))) {
+
+ /* size of this entry */
+ memcpy(&entry_size, fve_entries + start, sizeof(entry_size));
+ entry_size = le16_to_cpu(entry_size);
+ if (entry_size == 0)
+ break;
+
+ if (entry_size > (fve_entries_size - start)) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* type of this entry */
+ memcpy(&entry_type, fve_entries + start + sizeof(entry_size), sizeof(entry_type));
+ entry_type = le16_to_cpu(entry_type);
+
+ /* VMK */
+ if (entry_type == BITLK_ENTRY_TYPE_VMK) {
+ if (entry_size < (BITLK_ENTRY_HEADER_LEN + sizeof(entry_vmk))) {
+ r = -EINVAL;
+ goto out;
+ }
+ /* skip first four variables in the entry (entry size, type, value and version) */
+ memcpy(&entry_vmk,
+ fve_entries + start + BITLK_ENTRY_HEADER_LEN,
+ sizeof(entry_vmk));
+
+ vmk = malloc(sizeof(struct bitlk_vmk));
+ if (!vmk) {
+ r = -ENOMEM;
+ goto out;
+ }
+ memset(vmk, 0, sizeof(struct bitlk_vmk));
+
+ guid_to_string(&entry_vmk.guid, guid_buf);
+ vmk->guid = strdup (guid_buf);
+
+ vmk->name = NULL;
+
+ vmk->protection = get_vmk_protection(le16_to_cpu(entry_vmk.protection));
+
+ /* more data in another entry list */
+ r = parse_vmk_entry(cd, fve_entries,
+ start + BITLK_ENTRY_HEADER_LEN + BITLK_VMK_HEADER_LEN,
+ start + entry_size, &vmk);
+ if (r < 0) {
+ BITLK_bitlk_vmk_free(vmk);
+ goto out;
+ }
+
+ if (params->vmks == NULL)
+ params->vmks = vmk;
+ else
+ vmk_p->next = vmk;
+
+ vmk_p = vmk;
+ vmk = vmk->next;
+ /* FVEK */
+ } else if (entry_type == BITLK_ENTRY_TYPE_FVEK && !params->fvek) {
+ if (entry_size < (BITLK_ENTRY_HEADER_LEN + BITLK_NONCE_SIZE + BITLK_VMK_MAC_TAG_SIZE)) {
+ r = -EINVAL;
+ goto out;
+ }
+ params->fvek = malloc(sizeof(struct bitlk_fvek));
+ if (!params->fvek) {
+ r = -ENOMEM;
+ goto out;
+ }
+ memcpy(params->fvek->nonce,
+ fve_entries + start + BITLK_ENTRY_HEADER_LEN,
+ BITLK_NONCE_SIZE);
+ /* MAC tag */
+ memcpy(params->fvek->mac_tag,
+ fve_entries + start + BITLK_ENTRY_HEADER_LEN + BITLK_NONCE_SIZE,
+ BITLK_VMK_MAC_TAG_SIZE);
+ /* AES-CCM encrypted key */
+ key_size = entry_size - (BITLK_ENTRY_HEADER_LEN + BITLK_NONCE_SIZE + BITLK_VMK_MAC_TAG_SIZE);
+ key = (const char *) fve_entries + start + BITLK_ENTRY_HEADER_LEN + BITLK_NONCE_SIZE + BITLK_VMK_MAC_TAG_SIZE;
+ params->fvek->vk = crypt_alloc_volume_key(key_size, key);
+ if (params->fvek->vk == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+ /* volume header info (location and size) */
+ } else if (entry_type == BITLK_ENTRY_TYPE_VOLUME_HEADER) {
+ struct bitlk_entry_header_block entry_header;
+ if ((fve_entries_size - start) < (BITLK_ENTRY_HEADER_LEN + sizeof(entry_header))) {
+ r = -EINVAL;
+ goto out;
+ }
+ memcpy(&entry_header,
+ fve_entries + start + BITLK_ENTRY_HEADER_LEN,
+ sizeof(entry_header));
+ params->volume_header_offset = le64_to_cpu(entry_header.offset);
+ params->volume_header_size = le64_to_cpu(entry_header.size);
+ /* volume description (utf-16 string) */
+ } else if (entry_type == BITLK_ENTRY_TYPE_DESCRIPTION && !params->description) {
+ if (entry_size < BITLK_ENTRY_HEADER_LEN) {
+ r = -EINVAL;
+ goto out;
+ }
+ description = malloc((entry_size - BITLK_ENTRY_HEADER_LEN) * 2 + 1);
+ if (!description) {
+ r = -ENOMEM;
+ goto out;
+ }
+ r = crypt_utf16_to_utf8(&description, CONST_CAST(char16_t *)(fve_entries + start + BITLK_ENTRY_HEADER_LEN),
+ entry_size - BITLK_ENTRY_HEADER_LEN);
+ if (r < 0) {
+ free(description);
+ BITLK_bitlk_vmk_free(vmk);
+ log_err(cd, _("Failed to convert BITLK volume description"));
+ goto out;
+ }
+ params->description = description;
+ }
+
+ start += entry_size;
+ }
+
+out:
+ if (fve_entries)
+ free(fve_entries);
+ return r;
+}
+
+int BITLK_dump(struct crypt_device *cd, struct device *device, struct bitlk_metadata *params)
+{
+ struct volume_key *vk_p;
+ struct bitlk_vmk *vmk_p;
+ int next_id = 0;
+ int i = 0;
+
+ log_std(cd, "Info for BITLK%s device %s.\n", params->togo ? " To Go" : "", device_path(device));
+ log_std(cd, "Version: \t%u\n", params->metadata_version);
+ log_std(cd, "GUID: \t%s\n", params->guid);
+ log_std(cd, "Sector size: \t%u [bytes]\n", params->sector_size);
+ log_std(cd, "Volume size: \t%" PRIu64 " [bytes]\n", params->volume_size);
+ log_std(cd, "Created: \t%s", ctime((time_t *)&(params->creation_time)));
+ log_std(cd, "Description: \t%s\n", params->description);
+ log_std(cd, "Cipher name: \t%s\n", params->cipher);
+ log_std(cd, "Cipher mode: \t%s\n", params->cipher_mode);
+ log_std(cd, "Cipher key: \t%u bits\n", params->key_size);
+
+ log_std(cd, "\n");
+
+ log_std(cd, "Keyslots:\n");
+ vmk_p = params->vmks;
+ while (vmk_p) {
+ log_std(cd, " %d: VMK\n", next_id);
+ if (vmk_p->name != NULL) {
+ log_std(cd, "\tName: \t%s\n", vmk_p->name);
+ }
+ log_std(cd, "\tGUID: \t%s\n", vmk_p->guid);
+ log_std(cd, "\tProtection: \t%s\n", get_vmk_protection_string (vmk_p->protection));
+ log_std(cd, "\tSalt: \t");
+ crypt_log_hex(cd, (const char *) vmk_p->salt, 16, "", 0, NULL);
+ log_std(cd, "\n");
+
+ vk_p = vmk_p->vk;
+ while (vk_p) {
+ log_std(cd, "\tKey data size:\t%zu [bytes]\n", vk_p->keylength);
+ vk_p = vk_p->next;
+ }
+ vmk_p = vmk_p->next;
+ next_id++;
+ }
+
+ log_std(cd, " %d: FVEK\n", next_id);
+ log_std(cd, "\tKey data size:\t%zu [bytes]\n", params->fvek->vk->keylength);
+
+ log_std(cd, "\n");
+
+ log_std(cd, "Metadata segments:\n");
+
+ for (i = 0; i < 3; i++) {
+ log_std(cd, " %d: FVE metadata area\n", i);
+ log_std(cd, "\tOffset: \t%" PRIu64 " [bytes]\n", params->metadata_offset[i]);
+ log_std(cd, "\tSize: \t%d [bytes]\n", BITLK_FVE_METADATA_SIZE);
+ }
+
+ log_std(cd, " %d: Volume header\n", i);
+ log_std(cd, "\tOffset: \t%" PRIu64 " [bytes]\n", params->volume_header_offset);
+ log_std(cd, "\tSize: \t%" PRIu64 " [bytes]\n", params->volume_header_size);
+ log_std(cd, "\tCipher: \t%s-%s\n", params->cipher, params->cipher_mode);
+
+ return 0;
+}
+
+/* check if given passphrase can be a recovery key (has right format) and convert it */
+static int get_recovery_key(struct crypt_device *cd,
+ const char *password,
+ size_t passwordLen,
+ struct volume_key **rc_key)
+{
+ unsigned int i, j = 0;
+ uint16_t parts[BITLK_RECOVERY_PARTS] = {0};
+ char part_str[BITLK_RECOVERY_PART_LEN + 1] = {0};
+ long part_num = 0;
+
+ /* check the passphrase it should be:
+ - 55 characters
+ - 8 groups of 6 divided by '-'
+ - each part is a number dividable by 11
+ */
+ if (passwordLen != BITLK_RECOVERY_KEY_LEN) {
+ if (passwordLen == BITLK_RECOVERY_KEY_LEN + 1 && password[passwordLen - 1] == '\n') {
+ /* looks like a recovery key with an extra newline, possibly from a key file */
+ passwordLen--;
+ log_dbg(cd, "Possible extra EOL stripped from the recovery key.");
+ } else
+ return 0;
+ }
+
+ for (i = BITLK_RECOVERY_PART_LEN; i < passwordLen; i += BITLK_RECOVERY_PART_LEN + 1) {
+ if (password[i] != '-')
+ return 0;
+ }
+
+ for (i = 0, j = 0; i < passwordLen; i += BITLK_RECOVERY_PART_LEN + 1, j++) {
+ strncpy(part_str, password + i, BITLK_RECOVERY_PART_LEN);
+
+ errno = 0;
+ part_num = strtol(part_str, NULL, 10);
+ if ((errno == ERANGE && (part_num == LONG_MAX || part_num == LONG_MIN)) ||
+ (errno != 0 && part_num == 0))
+ return -errno;
+
+ if (part_num % 11 != 0)
+ return 0;
+ parts[j] = cpu_to_le16(part_num / 11);
+ }
+
+ *rc_key = crypt_alloc_volume_key(16, (const char*) parts);
+ if (*rc_key == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int parse_external_key_entry(struct crypt_device *cd,
+ const char *data,
+ int start,
+ int end,
+ struct volume_key **vk,
+ const struct bitlk_metadata *params)
+{
+ uint16_t key_entry_size = 0;
+ uint16_t key_entry_type = 0;
+ uint16_t key_entry_value = 0;
+ size_t key_size = 0;
+ const char *key = NULL;
+ struct bitlk_guid guid;
+ char guid_buf[UUID_STR_LEN] = {0};
+
+ while ((end - start) >= (ssize_t)(sizeof(key_entry_size) + sizeof(key_entry_type) + sizeof(key_entry_value))) {
+ /* size of this entry */
+ memcpy(&key_entry_size, data + start, sizeof(key_entry_size));
+ key_entry_size = le16_to_cpu(key_entry_size);
+ if (key_entry_size == 0)
+ break;
+
+ if (key_entry_size > (end - start))
+ return -EINVAL;
+
+ /* type and value of this entry */
+ memcpy(&key_entry_type, data + start + sizeof(key_entry_size), sizeof(key_entry_type));
+ memcpy(&key_entry_value,
+ data + start + sizeof(key_entry_size) + sizeof(key_entry_type),
+ sizeof(key_entry_value));
+ key_entry_type = le16_to_cpu(key_entry_type);
+ key_entry_value = le16_to_cpu(key_entry_value);
+
+ if (key_entry_type != BITLK_ENTRY_TYPE_PROPERTY && key_entry_type != BITLK_ENTRY_TYPE_VOLUME_GUID) {
+ log_err(cd, _("Unexpected metadata entry type '%u' found when parsing external key."), key_entry_type);
+ return -EINVAL;
+ }
+
+ if (key_entry_value == BITLK_ENTRY_VALUE_KEY) {
+ if (key_entry_size < (BITLK_ENTRY_HEADER_LEN + 4))
+ return -EINVAL;
+ key_size = key_entry_size - (BITLK_ENTRY_HEADER_LEN + 4);
+ key = (const char *) data + start + BITLK_ENTRY_HEADER_LEN + 4;
+ *vk = crypt_alloc_volume_key(key_size, key);
+ if (*vk == NULL)
+ return -ENOMEM;
+ return 0;
+ /* optional "ExternalKey" string, we can safely ignore it */
+ } else if (key_entry_value == BITLK_ENTRY_VALUE_STRING)
+ ;
+ /* GUID of the BitLocker device we are trying to open with this key */
+ else if (key_entry_value == BITLK_ENTRY_VALUE_GUID) {
+ if ((end - start) < (ssize_t)(BITLK_ENTRY_HEADER_LEN + sizeof(struct bitlk_guid)))
+ return -EINVAL;
+ memcpy(&guid, data + start + BITLK_ENTRY_HEADER_LEN, sizeof(struct bitlk_guid));
+ guid_to_string(&guid, guid_buf);
+ if (strcmp(guid_buf, params->guid) != 0) {
+ log_err(cd, _("BEK file GUID '%s' does not match GUID of the volume."), guid_buf);
+ return -EINVAL;
+ }
+ } else {
+ log_err(cd, _("Unexpected metadata entry value '%u' found when parsing external key."), key_entry_value);
+ return -EINVAL;
+ }
+
+ start += key_entry_size;
+ }
+
+ /* if we got here we failed to parse the metadata */
+ return -EINVAL;
+}
+
+/* check if given passphrase can be a startup key (has right format) and convert it */
+static int get_startup_key(struct crypt_device *cd,
+ const char *password,
+ size_t passwordLen,
+ const struct bitlk_vmk *vmk,
+ struct volume_key **su_key,
+ const struct bitlk_metadata *params)
+{
+ struct bitlk_bek_header bek_header = {0};
+ char guid_buf[UUID_STR_LEN] = {0};
+
+ uint16_t key_entry_size = 0;
+ uint16_t key_entry_type = 0;
+ uint16_t key_entry_value = 0;
+
+ if (passwordLen < (BITLK_BEK_FILE_HEADER_LEN + sizeof(key_entry_size) + sizeof(key_entry_type) + sizeof(key_entry_value)))
+ return -EPERM;
+
+ memcpy(&bek_header, password, BITLK_BEK_FILE_HEADER_LEN);
+
+ /* metadata should contain GUID of the VMK this startup key is used for */
+ guid_to_string(&bek_header.guid, guid_buf);
+ if (strcmp(guid_buf, vmk->guid) == 0)
+ log_dbg(cd, "Found matching startup key for VMK %s", vmk->guid);
+ else
+ return -EPERM;
+
+ if (le32_to_cpu(bek_header.metadata_version) != 1) {
+ log_err(cd, _("Unsupported BEK metadata version %" PRIu32), le32_to_cpu(bek_header.metadata_version));
+ return -ENOTSUP;
+ }
+
+ if (le32_to_cpu(bek_header.metadata_size) != passwordLen) {
+ log_err(cd, _("Unexpected BEK metadata size %" PRIu32 " does not match BEK file length"),
+ le32_to_cpu(bek_header.metadata_size));
+ return -EINVAL;
+ }
+
+ /* we are expecting exactly one metadata entry starting immediately after the header */
+ memcpy(&key_entry_size, password + BITLK_BEK_FILE_HEADER_LEN, sizeof(key_entry_size));
+ key_entry_size = le16_to_cpu(key_entry_size);
+ if (key_entry_size < BITLK_ENTRY_HEADER_LEN) {
+ log_dbg(cd, "Unexpected metadata entry size %" PRIu16 " when parsing BEK file", key_entry_size);
+ return -EINVAL;
+ }
+
+ /* type and value of this entry */
+ memcpy(&key_entry_type, password + BITLK_BEK_FILE_HEADER_LEN + sizeof(key_entry_size), sizeof(key_entry_type));
+ memcpy(&key_entry_value,
+ password + BITLK_BEK_FILE_HEADER_LEN + sizeof(key_entry_size) + sizeof(key_entry_type),
+ sizeof(key_entry_value));
+ key_entry_type = le16_to_cpu(key_entry_type);
+ key_entry_value = le16_to_cpu(key_entry_value);
+
+ if (key_entry_type == BITLK_ENTRY_TYPE_STARTUP_KEY && key_entry_value == BITLK_ENTRY_VALUE_EXTERNAL_KEY) {
+ return parse_external_key_entry(cd, password,
+ BITLK_BEK_FILE_HEADER_LEN + BITLK_ENTRY_HEADER_LEN + BITLK_STARTUP_KEY_HEADER_LEN,
+ passwordLen, su_key, params);
+ } else {
+ log_err(cd, _("Unexpected metadata entry found when parsing startup key."));
+ log_dbg(cd, "Entry type: %u, entry value: %u", key_entry_type, key_entry_value);
+ return -EINVAL;
+ }
+}
+
+static int bitlk_kdf(struct crypt_device *cd,
+ const char *password,
+ size_t passwordLen,
+ bool recovery,
+ const uint8_t *salt,
+ struct volume_key **vk)
+{
+ struct bitlk_kdf_data kdf = {};
+ struct crypt_hash *hd = NULL;
+ int len = 0;
+ char16_t *utf16Password = NULL;
+ int i = 0;
+ int r = 0;
+
+ memcpy(kdf.salt, salt, 16);
+
+ r = crypt_hash_init(&hd, BITLK_KDF_HASH);
+ if (r < 0)
+ return r;
+ len = crypt_hash_size(BITLK_KDF_HASH);
+ if (len < 0) {
+ crypt_hash_destroy(hd);
+ return len;
+ }
+
+ if (!recovery) {
+ /* passphrase: convert to UTF-16 first, then sha256(sha256(pw)) */
+ utf16Password = crypt_safe_alloc(sizeof(char16_t) * (passwordLen + 1));
+ if (!utf16Password) {
+ r = -ENOMEM;
+ goto out;
+ }
+ r = crypt_utf8_to_utf16(&utf16Password, CONST_CAST(char*)password, passwordLen);
+ if (r < 0)
+ goto out;
+
+ crypt_hash_write(hd, (char*)utf16Password, passwordLen * 2);
+ r = crypt_hash_final(hd, kdf.initial_sha256, len);
+ if (r < 0)
+ goto out;
+
+ crypt_hash_write(hd, kdf.initial_sha256, len);
+ r = crypt_hash_final(hd, kdf.initial_sha256, len);
+ if (r < 0)
+ goto out;
+ } else {
+ /* recovery passphrase: already converted in #get_recovery_key, now just sha256(rpw) */
+ crypt_hash_write(hd, password, passwordLen);
+ r = crypt_hash_final(hd, kdf.initial_sha256, len);
+ if (r < 0)
+ goto out;
+ }
+
+ for (i = 0; i < BITLK_KDF_ITERATION_COUNT; i++) {
+ crypt_hash_write(hd, (const char*) &kdf, sizeof(kdf));
+ r = crypt_hash_final(hd, kdf.last_sha256, len);
+ if (r < 0)
+ goto out;
+ kdf.count = cpu_to_le64(le64_to_cpu(kdf.count) + 1);
+ }
+
+ *vk = crypt_alloc_volume_key(len, kdf.last_sha256);
+
+out:
+ crypt_safe_free(utf16Password);
+ if (hd)
+ crypt_hash_destroy(hd);
+ return r;
+}
+
+static int decrypt_key(struct crypt_device *cd,
+ struct volume_key **vk,
+ struct volume_key *enc_key,
+ struct volume_key *key,
+ const uint8_t *tag, size_t tag_size,
+ const uint8_t *iv, size_t iv_size,
+ bool is_fvek)
+{
+ char *outbuf;
+ int r;
+ uint16_t key_size = 0;
+
+ outbuf = crypt_safe_alloc(enc_key->keylength);
+ if (!outbuf)
+ return -ENOMEM;
+
+ r = crypt_bitlk_decrypt_key(key->key, key->keylength, enc_key->key, outbuf, enc_key->keylength,
+ (const char*)iv, iv_size, (const char*)tag, tag_size);
+ if (r < 0) {
+ if (r == -ENOTSUP)
+ log_err(cd, _("This operation is not supported."));
+ goto out;
+ }
+
+ /* key_data has it's size as part of the metadata */
+ memcpy(&key_size, outbuf, 2);
+ key_size = le16_to_cpu(key_size);
+ if (enc_key->keylength != key_size) {
+ log_err(cd, _("Unexpected key data size."));
+ log_dbg(cd, "Expected key data size: %zu, got %" PRIu16 "", enc_key->keylength, key_size);
+
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (is_fvek && strcmp(crypt_get_cipher_mode(cd), "cbc-elephant") == 0 &&
+ crypt_get_volume_key_size(cd) == 32) {
+ /* 128bit AES-CBC with Elephant -- key size is 256 bit (2 keys) but key data is 512 bits,
+ data: 16B CBC key, 16B empty, 16B elephant key, 16B empty */
+ memcpy(outbuf + 16 + BITLK_OPEN_KEY_METADATA_LEN,
+ outbuf + 2 * 16 + BITLK_OPEN_KEY_METADATA_LEN, 16);
+ key_size = 32 + BITLK_OPEN_KEY_METADATA_LEN;
+ }
+
+
+ *vk = crypt_alloc_volume_key(key_size - BITLK_OPEN_KEY_METADATA_LEN,
+ (const char *)(outbuf + BITLK_OPEN_KEY_METADATA_LEN));
+ r = *vk ? 0 : -ENOMEM;
+out:
+ crypt_safe_free(outbuf);
+ return r;
+}
+
+int BITLK_get_volume_key(struct crypt_device *cd,
+ const char *password,
+ size_t passwordLen,
+ const struct bitlk_metadata *params,
+ struct volume_key **open_fvek_key)
+{
+ int r = 0;
+ struct volume_key *open_vmk_key = NULL;
+ struct volume_key *vmk_dec_key = NULL;
+ struct volume_key *recovery_key = NULL;
+ const struct bitlk_vmk *next_vmk = NULL;
+
+ next_vmk = params->vmks;
+ while (next_vmk) {
+ if (next_vmk->protection == BITLK_PROTECTION_PASSPHRASE) {
+ r = bitlk_kdf(cd, password, passwordLen, false, next_vmk->salt, &vmk_dec_key);
+ if (r) {
+ /* something wrong happened, but we still want to check other key slots */
+ next_vmk = next_vmk->next;
+ continue;
+ }
+ } else if (next_vmk->protection == BITLK_PROTECTION_RECOVERY_PASSPHRASE) {
+ r = get_recovery_key(cd, password, passwordLen, &recovery_key);
+ if (r) {
+ /* something wrong happened, but we still want to check other key slots */
+ next_vmk = next_vmk->next;
+ continue;
+ }
+ if (recovery_key == NULL) {
+ /* r = 0 but no key -> given passphrase is not a recovery passphrase */
+ r = -EPERM;
+ next_vmk = next_vmk->next;
+ continue;
+ }
+ log_dbg(cd, "Trying to use given password as a recovery key.");
+ r = bitlk_kdf(cd, recovery_key->key, recovery_key->keylength,
+ true, next_vmk->salt, &vmk_dec_key);
+ crypt_free_volume_key(recovery_key);
+ if (r)
+ return r;
+ } else if (next_vmk->protection == BITLK_PROTECTION_STARTUP_KEY) {
+ r = get_startup_key(cd, password, passwordLen, next_vmk, &vmk_dec_key, params);
+ if (r) {
+ next_vmk = next_vmk->next;
+ continue;
+ }
+ log_dbg(cd, "Trying to use external key found in provided password.");
+ } else {
+ /* only passphrase, recovery passphrase and startup key VMKs supported right now */
+ log_dbg(cd, "Skipping %s", get_vmk_protection_string(next_vmk->protection));
+ next_vmk = next_vmk->next;
+ if (r == 0)
+ /* we need to set error code in case we have only unsupported VMKs */
+ r = -ENOTSUP;
+ continue;
+ }
+
+ log_dbg(cd, "Trying to decrypt %s.", get_vmk_protection_string(next_vmk->protection));
+ r = decrypt_key(cd, &open_vmk_key, next_vmk->vk, vmk_dec_key,
+ next_vmk->mac_tag, BITLK_VMK_MAC_TAG_SIZE,
+ next_vmk->nonce, BITLK_NONCE_SIZE, false);
+ if (r < 0) {
+ log_dbg(cd, "Failed to decrypt VMK using provided passphrase.");
+ crypt_free_volume_key(vmk_dec_key);
+ if (r == -ENOTSUP)
+ return r;
+ next_vmk = next_vmk->next;
+ continue;
+ }
+ crypt_free_volume_key(vmk_dec_key);
+
+ r = decrypt_key(cd, open_fvek_key, params->fvek->vk, open_vmk_key,
+ params->fvek->mac_tag, BITLK_VMK_MAC_TAG_SIZE,
+ params->fvek->nonce, BITLK_NONCE_SIZE, true);
+ if (r < 0) {
+ log_dbg(cd, "Failed to decrypt FVEK using VMK.");
+ crypt_free_volume_key(open_vmk_key);
+ if (r == -ENOTSUP)
+ return r;
+ } else {
+ crypt_free_volume_key(open_vmk_key);
+ break;
+ }
+
+ next_vmk = next_vmk->next;
+ }
+
+ if (r) {
+ log_dbg(cd, "No more VMKs to try.");
+ return r;
+ }
+
+ return 0;
+}
+
+static int _activate_check(struct crypt_device *cd,
+ const struct bitlk_metadata *params)
+{
+ const struct bitlk_vmk *next_vmk = NULL;
+
+ if (!params->state) {
+ log_err(cd, _("This BITLK device is in an unsupported state and cannot be activated."));
+ return -ENOTSUP;
+ }
+
+ if (params->type != BITLK_ENCRYPTION_TYPE_NORMAL) {
+ log_err(cd, _("BITLK devices with type '%s' cannot be activated."), get_bitlk_type_string(params->type));
+ return -ENOTSUP;
+ }
+
+ next_vmk = params->vmks;
+ while (next_vmk) {
+ if (next_vmk->protection == BITLK_PROTECTION_CLEAR_KEY) {
+ log_err(cd, _("Activation of partially decrypted BITLK device is not supported."));
+ return -ENOTSUP;
+ }
+ next_vmk = next_vmk->next;
+ }
+
+ return 0;
+}
+
+static int _activate(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *open_fvek_key,
+ const struct bitlk_metadata *params,
+ uint32_t flags)
+{
+ int r = 0;
+ int i = 0;
+ int j = 0;
+ int min = 0;
+ int num_segments = 0;
+ struct crypt_dm_active_device dmd = {
+ .flags = flags,
+ };
+ struct dm_target *next_segment = NULL;
+ struct segment segments[MAX_BITLK_SEGMENTS] = {};
+ struct segment temp;
+ uint64_t next_start = 0;
+ uint64_t next_end = 0;
+ uint64_t last_segment = 0;
+ uint32_t dmt_flags = 0;
+
+ r = _activate_check(cd, params);
+ if (r)
+ return r;
+
+ r = device_block_adjust(cd, crypt_data_device(cd), DEV_EXCL,
+ 0, &dmd.size, &dmd.flags);
+ if (r)
+ return r;
+
+ if (dmd.size * SECTOR_SIZE != params->volume_size)
+ log_std(cd, _("WARNING: BitLocker volume size %" PRIu64 " does not match the underlying device size %" PRIu64 ""),
+ params->volume_size,
+ dmd.size * SECTOR_SIZE);
+
+ /* there will be always 4 dm-zero segments: 3x metadata, 1x FS header */
+ for (i = 0; i < 3; i++) {
+ segments[num_segments].offset = params->metadata_offset[i] / SECTOR_SIZE;
+ segments[num_segments].length = BITLK_FVE_METADATA_SIZE / SECTOR_SIZE;
+ segments[num_segments].iv_offset = 0;
+ segments[num_segments].type = BITLK_SEGTYPE_ZERO;
+ num_segments++;
+ }
+ segments[num_segments].offset = params->volume_header_offset / SECTOR_SIZE;
+ segments[num_segments].length = params->volume_header_size / SECTOR_SIZE;
+ segments[num_segments].iv_offset = 0;
+ segments[num_segments].type = BITLK_SEGTYPE_ZERO;
+ num_segments++;
+
+ /* filesystem header (moved from the special location) */
+ segments[num_segments].offset = 0;
+ segments[num_segments].length = params->volume_header_size / SECTOR_SIZE;
+ segments[num_segments].iv_offset = params->volume_header_offset / SECTOR_SIZE;
+ segments[num_segments].type = BITLK_SEGTYPE_CRYPT;
+ num_segments++;
+
+ /* now fill gaps between the dm-zero segments with dm-crypt */
+ last_segment = params->volume_header_size / SECTOR_SIZE;
+ while (true) {
+ next_start = dmd.size;
+ next_end = dmd.size;
+
+ /* start of the next segment: end of the first existing segment after the last added */
+ for (i = 0; i < num_segments; i++)
+ if (segments[i].offset + segments[i].length < next_start && segments[i].offset + segments[i].length >= last_segment)
+ next_start = segments[i].offset + segments[i].length;
+
+ /* end of the next segment: start of the next segment after start we found above */
+ for (i = 0; i < num_segments; i++)
+ if (segments[i].offset < next_end && segments[i].offset >= next_start)
+ next_end = segments[i].offset;
+
+ /* two zero segments next to each other, just bump the last_segment
+ so the algorithm moves */
+ if (next_end - next_start == 0) {
+ last_segment = next_end + 1;
+ continue;
+ }
+
+ segments[num_segments].offset = next_start;
+ segments[num_segments].length = next_end - next_start;
+ segments[num_segments].iv_offset = next_start;
+ segments[num_segments].type = BITLK_SEGTYPE_CRYPT;
+ last_segment = next_end;
+ num_segments++;
+
+ if (next_end == dmd.size)
+ break;
+
+ if (num_segments == 10) {
+ log_dbg(cd, "Failed to calculate number of dm-crypt segments for open.");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* device mapper needs the segment sorted */
+ for (i = 0; i < num_segments - 1; i++) {
+ min = i;
+ for (j = i + 1; j < num_segments; j++)
+ if (segments[j].offset < segments[min].offset)
+ min = j;
+
+ if (min != i) {
+ temp.offset = segments[min].offset;
+ temp.length = segments[min].length;
+ temp.iv_offset = segments[min].iv_offset;
+ temp.type = segments[min].type;
+
+ segments[min].offset = segments[i].offset;
+ segments[min].length = segments[i].length;
+ segments[min].iv_offset = segments[i].iv_offset;
+ segments[min].type = segments[i].type;
+
+ segments[i].offset = temp.offset;
+ segments[i].length = temp.length;
+ segments[i].iv_offset = temp.iv_offset;
+ segments[i].type = temp.type;
+ }
+ }
+
+ if (params->sector_size != SECTOR_SIZE)
+ dmd.flags |= CRYPT_ACTIVATE_IV_LARGE_SECTORS;
+
+ r = dm_targets_allocate(&dmd.segment, num_segments);
+ if (r)
+ goto out;
+ next_segment = &dmd.segment;
+
+ for (i = 0; i < num_segments; i++) {
+ if (segments[i].type == BITLK_SEGTYPE_ZERO)
+ r = dm_zero_target_set(next_segment,
+ segments[i].offset,
+ segments[i].length);
+ else if (segments[i].type == BITLK_SEGTYPE_CRYPT)
+ r = dm_crypt_target_set(next_segment,
+ segments[i].offset,
+ segments[i].length,
+ crypt_data_device(cd),
+ open_fvek_key,
+ crypt_get_cipher_spec(cd),
+ segments[i].iv_offset,
+ segments[i].iv_offset,
+ NULL, 0,
+ params->sector_size);
+ if (r)
+ goto out;
+
+ next_segment = next_segment->next;
+ }
+
+ log_dbg(cd, "Trying to activate BITLK on device %s%s%s.",
+ device_path(crypt_data_device(cd)), name ? " with name " :"", name ?: "");
+
+ r = dm_create_device(cd, name, CRYPT_BITLK, &dmd);
+ if (r < 0) {
+ dm_flags(cd, DM_CRYPT, &dmt_flags);
+ if (!strcmp(params->cipher_mode, "cbc-eboiv") && !(dmt_flags & DM_BITLK_EBOIV_SUPPORTED)) {
+ log_err(cd, _("Cannot activate device, kernel dm-crypt is missing support for BITLK IV."));
+ r = -ENOTSUP;
+ }
+ if (!strcmp(params->cipher_mode, "cbc-elephant") && !(dmt_flags & DM_BITLK_ELEPHANT_SUPPORTED)) {
+ log_err(cd, _("Cannot activate device, kernel dm-crypt is missing support for BITLK Elephant diffuser."));
+ r = -ENOTSUP;
+ }
+ if ((dmd.flags & CRYPT_ACTIVATE_IV_LARGE_SECTORS) && !(dmt_flags & DM_SECTOR_SIZE_SUPPORTED)) {
+ log_err(cd, _("Cannot activate device, kernel dm-crypt is missing support for large sector size."));
+ r = -ENOTSUP;
+ }
+ if (dm_flags(cd, DM_ZERO, &dmt_flags) < 0) {
+ log_err(cd, _("Cannot activate device, kernel dm-zero module is missing."));
+ r = -ENOTSUP;
+ }
+ }
+out:
+ dm_targets_free(cd, &dmd);
+ return r;
+}
+
+int BITLK_activate_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ const char *password,
+ size_t passwordLen,
+ const struct bitlk_metadata *params,
+ uint32_t flags)
+{
+ int r = 0;
+ struct volume_key *open_fvek_key = NULL;
+
+ r = _activate_check(cd, params);
+ if (r)
+ return r;
+
+ r = BITLK_get_volume_key(cd, password, passwordLen, params, &open_fvek_key);
+ if (r < 0)
+ goto out;
+
+ /* Password verify only */
+ if (!name)
+ goto out;
+
+ r = _activate(cd, name, open_fvek_key, params, flags);
+out:
+ crypt_free_volume_key(open_fvek_key);
+ return r;
+}
+
+int BITLK_activate_by_volume_key(struct crypt_device *cd,
+ const char *name,
+ const char *volume_key,
+ size_t volume_key_size,
+ const struct bitlk_metadata *params,
+ uint32_t flags)
+{
+ int r = 0;
+ struct volume_key *open_fvek_key = NULL;
+
+ r = _activate_check(cd, params);
+ if (r)
+ return r;
+
+ open_fvek_key = crypt_alloc_volume_key(volume_key_size, volume_key);
+ if (!open_fvek_key)
+ return -ENOMEM;
+
+ r = _activate(cd, name, open_fvek_key, params, flags);
+
+ crypt_free_volume_key(open_fvek_key);
+ return r;
+}
diff --git a/lib/bitlk/bitlk.h b/lib/bitlk/bitlk.h
new file mode 100644
index 0000000..54d3dc7
--- /dev/null
+++ b/lib/bitlk/bitlk.h
@@ -0,0 +1,148 @@
+/*
+ * BITLK (BitLocker-compatible) header definition
+ *
+ * Copyright (C) 2019-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2019-2023 Milan Broz
+ * Copyright (C) 2019-2023 Vojtech Trefny
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTSETUP_BITLK_H
+#define _CRYPTSETUP_BITLK_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+struct crypt_device;
+struct device;
+struct volume_key;
+
+#define BITLK_NONCE_SIZE 12
+#define BITLK_SALT_SIZE 16
+#define BITLK_VMK_MAC_TAG_SIZE 16
+
+#define BITLK_STATE_NORMAL 0x0004
+
+typedef enum {
+ BITLK_ENCRYPTION_TYPE_NORMAL = 0,
+ BITLK_ENCRYPTION_TYPE_EOW,
+ BITLK_ENCRYPTION_TYPE_UNKNOWN,
+} BITLKEncryptionType;
+
+typedef enum {
+ BITLK_PROTECTION_CLEAR_KEY = 0,
+ BITLK_PROTECTION_TPM,
+ BITLK_PROTECTION_STARTUP_KEY,
+ BITLK_PROTECTION_TPM_PIN,
+ BITLK_PROTECTION_RECOVERY_PASSPHRASE,
+ BITLK_PROTECTION_PASSPHRASE,
+ BITLK_PROTECTION_SMART_CARD,
+ BITLK_PROTECTION_UNKNOWN,
+} BITLKVMKProtection;
+
+typedef enum {
+ BITLK_ENTRY_TYPE_PROPERTY = 0x0000,
+ BITLK_ENTRY_TYPE_VMK = 0x0002,
+ BITLK_ENTRY_TYPE_FVEK = 0x0003,
+ BITLK_ENTRY_TYPE_STARTUP_KEY = 0x0006,
+ BITLK_ENTRY_TYPE_DESCRIPTION = 0x0007,
+ BITLK_ENTRY_TYPE_VOLUME_HEADER = 0x000f,
+ BITLK_ENTRY_TYPE_VOLUME_GUID = 0x0019,
+} BITLKFVEEntryType;
+
+typedef enum {
+ BITLK_ENTRY_VALUE_ERASED = 0x0000,
+ BITLK_ENTRY_VALUE_KEY = 0x0001,
+ BITLK_ENTRY_VALUE_STRING = 0x0002,
+ BITLK_ENTRY_VALUE_STRETCH_KEY = 0x0003,
+ BITLK_ENTRY_VALUE_USE_KEY = 0x0004,
+ BITLK_ENTRY_VALUE_ENCRYPTED_KEY = 0x0005,
+ BITLK_ENTRY_VALUE_TPM_KEY = 0x0006,
+ BITLK_ENTRY_VALUE_VALIDATION = 0x0007,
+ BITLK_ENTRY_VALUE_VMK = 0x0008,
+ BITLK_ENTRY_VALUE_EXTERNAL_KEY = 0x0009,
+ BITLK_ENTRY_VALUE_OFFSET_SIZE = 0x000f,
+ BITLK_ENTRY_VALUE_RECOVERY_TIME = 0x015,
+ BITLK_ENTRY_VALUE_GUID = 0x0017,
+} BITLKFVEEntryValue;
+
+struct bitlk_vmk {
+ char *guid;
+ char *name;
+ BITLKVMKProtection protection;
+ uint8_t salt[BITLK_SALT_SIZE];
+ uint8_t mac_tag[BITLK_VMK_MAC_TAG_SIZE];
+ uint8_t nonce[BITLK_NONCE_SIZE];
+ struct volume_key *vk;
+ struct bitlk_vmk *next;
+};
+
+struct bitlk_fvek {
+ uint8_t mac_tag[BITLK_VMK_MAC_TAG_SIZE];
+ uint8_t nonce[BITLK_NONCE_SIZE];
+ struct volume_key *vk;
+};
+
+struct bitlk_metadata {
+ uint16_t sector_size;
+ uint64_t volume_size;
+ bool togo;
+ bool state;
+ BITLKEncryptionType type;
+ const char *cipher;
+ const char *cipher_mode;
+ uint16_t key_size;
+ char *guid;
+ uint64_t creation_time;
+ char *description;
+ uint64_t metadata_offset[3];
+ uint32_t metadata_version;
+ uint64_t volume_header_offset;
+ uint64_t volume_header_size;
+ struct bitlk_vmk *vmks;
+ struct bitlk_fvek *fvek;
+};
+
+int BITLK_read_sb(struct crypt_device *cd, struct bitlk_metadata *params);
+
+int BITLK_dump(struct crypt_device *cd, struct device *device, struct bitlk_metadata *params);
+
+int BITLK_get_volume_key(struct crypt_device *cd,
+ const char *password,
+ size_t passwordLen,
+ const struct bitlk_metadata *params,
+ struct volume_key **open_fvek_key);
+
+int BITLK_activate_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ const char *password,
+ size_t passwordLen,
+ const struct bitlk_metadata *params,
+ uint32_t flags);
+
+int BITLK_activate_by_volume_key(struct crypt_device *cd,
+ const char *name,
+ const char *volume_key,
+ size_t volume_key_size,
+ const struct bitlk_metadata *params,
+ uint32_t flags);
+
+void BITLK_bitlk_fvek_free(struct bitlk_fvek *fvek);
+void BITLK_bitlk_vmk_free(struct bitlk_vmk *vmk);
+void BITLK_bitlk_metadata_free(struct bitlk_metadata *params);
+
+#endif
diff --git a/lib/bitops.h b/lib/bitops.h
new file mode 100644
index 0000000..a991687
--- /dev/null
+++ b/lib/bitops.h
@@ -0,0 +1,123 @@
+/*
+ * No copyright is claimed. This code is in the public domain; do with
+ * it what you wish.
+ *
+ * Written by Karel Zak <kzak@redhat.com>
+ */
+#ifndef BITOPS_H
+#define BITOPS_H
+
+#include <stdint.h>
+#include <sys/param.h>
+
+#if defined(HAVE_BYTESWAP_H)
+# include <byteswap.h>
+#endif
+
+#if defined(HAVE_ENDIAN_H)
+# include <endian.h>
+#elif defined(HAVE_SYS_ENDIAN_H) /* BSDs have them here */
+# include <sys/endian.h>
+#endif
+
+#if defined(__OpenBSD__)
+# include <sys/types.h>
+# define be16toh(x) betoh16(x)
+# define be32toh(x) betoh32(x)
+# define be64toh(x) betoh64(x)
+#endif
+
+/*
+ * Fallbacks
+ */
+#ifndef bswap_16
+# define bswap_16(x) ((((x) & 0x00FF) << 8) | \
+ (((x) & 0xFF00) >> 8))
+#endif
+
+#ifndef bswap_32
+# define bswap_32(x) ((((x) & 0x000000FF) << 24) | \
+ (((x) & 0x0000FF00) << 8) | \
+ (((x) & 0x00FF0000) >> 8) | \
+ (((x) & 0xFF000000) >> 24))
+#endif
+
+#ifndef bswap_64
+# define bswap_64(x) ((((x) & 0x00000000000000FFULL) << 56) | \
+ (((x) & 0x000000000000FF00ULL) << 40) | \
+ (((x) & 0x0000000000FF0000ULL) << 24) | \
+ (((x) & 0x00000000FF000000ULL) << 8) | \
+ (((x) & 0x000000FF00000000ULL) >> 8) | \
+ (((x) & 0x0000FF0000000000ULL) >> 24) | \
+ (((x) & 0x00FF000000000000ULL) >> 40) | \
+ (((x) & 0xFF00000000000000ULL) >> 56))
+#endif
+
+#ifndef htobe16
+# if !defined(WORDS_BIGENDIAN)
+# define htobe16(x) bswap_16 (x)
+# define htole16(x) (x)
+# define be16toh(x) bswap_16 (x)
+# define le16toh(x) (x)
+# define htobe32(x) bswap_32 (x)
+# define htole32(x) (x)
+# define be32toh(x) bswap_32 (x)
+# define le32toh(x) (x)
+# define htobe64(x) bswap_64 (x)
+# define htole64(x) (x)
+# define be64toh(x) bswap_64 (x)
+# define le64toh(x) (x)
+# else
+# define htobe16(x) (x)
+# define htole16(x) bswap_16 (x)
+# define be16toh(x) (x)
+# define le16toh(x) bswap_16 (x)
+# define htobe32(x) (x)
+# define htole32(x) bswap_32 (x)
+# define be32toh(x) (x)
+# define le32toh(x) bswap_32 (x)
+# define htobe64(x) (x)
+# define htole64(x) bswap_64 (x)
+# define be64toh(x) (x)
+# define le64toh(x) bswap_64 (x)
+# endif
+#endif
+
+/*
+ * Byte swab macros (based on linux/byteorder/swab.h)
+ */
+#define swab16(x) bswap_16(x)
+#define swab32(x) bswap_32(x)
+#define swab64(x) bswap_64(x)
+
+#define cpu_to_le16(x) ((uint16_t) htole16(x))
+#define cpu_to_le32(x) ((uint32_t) htole32(x))
+#define cpu_to_le64(x) ((uint64_t) htole64(x))
+
+#define cpu_to_be16(x) ((uint16_t) htobe16(x))
+#define cpu_to_be32(x) ((uint32_t) htobe32(x))
+#define cpu_to_be64(x) ((uint64_t) htobe64(x))
+
+#define le16_to_cpu(x) ((uint16_t) le16toh(x))
+#define le32_to_cpu(x) ((uint32_t) le32toh(x))
+#define le64_to_cpu(x) ((uint64_t) le64toh(x))
+
+#define be16_to_cpu(x) ((uint16_t) be16toh(x))
+#define be32_to_cpu(x) ((uint32_t) be32toh(x))
+#define be64_to_cpu(x) ((uint64_t) be64toh(x))
+
+/*
+ * Bit map related macros. Usually provided by libc.
+ */
+#ifndef NBBY
+# define NBBY CHAR_BIT
+#endif
+
+#ifndef setbit
+# define setbit(a,i) ((a)[(i)/NBBY] |= 1<<((i)%NBBY))
+# define clrbit(a,i) ((a)[(i)/NBBY] &= ~(1<<((i)%NBBY)))
+# define isset(a,i) ((a)[(i)/NBBY] & (1<<((i)%NBBY)))
+# define isclr(a,i) (((a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0)
+#endif
+
+#endif /* BITOPS_H */
diff --git a/lib/crypt_plain.c b/lib/crypt_plain.c
new file mode 100644
index 0000000..c839b09
--- /dev/null
+++ b/lib/crypt_plain.c
@@ -0,0 +1,117 @@
+/*
+ * cryptsetup plain device helper functions
+ *
+ * Copyright (C) 2004 Jana Saout <jana@saout.de>
+ * Copyright (C) 2010-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+
+#include "libcryptsetup.h"
+#include "internal.h"
+
+static int hash(const char *hash_name, size_t key_size, char *key,
+ size_t passphrase_size, const char *passphrase)
+{
+ struct crypt_hash *md = NULL;
+ size_t len;
+ int round, i, r = 0;
+
+ if (crypt_hash_init(&md, hash_name))
+ return -ENOENT;
+
+ len = crypt_hash_size(hash_name);
+
+ for(round = 0; key_size && !r; round++) {
+ /* hack from hashalot to avoid null bytes in key */
+ for(i = 0; i < round; i++)
+ if (crypt_hash_write(md, "A", 1))
+ r = 1;
+
+ if (crypt_hash_write(md, passphrase, passphrase_size))
+ r = 1;
+
+ if (len > key_size)
+ len = key_size;
+
+ if (crypt_hash_final(md, key, len))
+ r = 1;
+
+ key += len;
+ key_size -= len;
+ }
+
+ crypt_hash_destroy(md);
+ return r;
+}
+
+#define PLAIN_HASH_LEN_MAX 256
+
+int crypt_plain_hash(struct crypt_device *cd,
+ const char *hash_name,
+ char *key, size_t key_size,
+ const char *passphrase, size_t passphrase_size)
+{
+ char hash_name_buf[PLAIN_HASH_LEN_MAX], *s;
+ size_t hash_size, pad_size;
+ int r;
+
+ log_dbg(cd, "Plain: hashing passphrase using %s.", hash_name);
+
+ if (strlen(hash_name) >= PLAIN_HASH_LEN_MAX)
+ return -EINVAL;
+ strncpy(hash_name_buf, hash_name, PLAIN_HASH_LEN_MAX);
+ hash_name_buf[PLAIN_HASH_LEN_MAX - 1] = '\0';
+
+ /* hash[:hash_length] */
+ if ((s = strchr(hash_name_buf, ':'))) {
+ *s = '\0';
+ s++;
+ if (!*s || sscanf(s, "%zd", &hash_size) != 1) {
+ log_dbg(cd, "Hash length is not a number");
+ return -EINVAL;
+ }
+ if (hash_size > key_size) {
+ log_dbg(cd, "Hash length %zd > key length %zd",
+ hash_size, key_size);
+ return -EINVAL;
+ }
+ pad_size = key_size - hash_size;
+ } else {
+ hash_size = key_size;
+ pad_size = 0;
+ }
+
+ /* No hash, copy passphrase directly */
+ if (!strcmp(hash_name_buf, "plain")) {
+ if (passphrase_size < hash_size) {
+ log_dbg(cd, "Too short plain passphrase.");
+ return -EINVAL;
+ }
+ memcpy(key, passphrase, hash_size);
+ r = 0;
+ } else
+ r = hash(hash_name_buf, hash_size, key, passphrase_size, passphrase);
+
+ if (r == 0 && pad_size)
+ memset(key + hash_size, 0, pad_size);
+
+ return r;
+}
diff --git a/lib/crypto_backend/Makemodule.am b/lib/crypto_backend/Makemodule.am
new file mode 100644
index 0000000..7507763
--- /dev/null
+++ b/lib/crypto_backend/Makemodule.am
@@ -0,0 +1,41 @@
+noinst_LTLIBRARIES += libcrypto_backend.la
+
+libcrypto_backend_la_CFLAGS = $(AM_CFLAGS) @CRYPTO_CFLAGS@
+
+libcrypto_backend_la_SOURCES = \
+ lib/crypto_backend/crypto_backend.h \
+ lib/crypto_backend/crypto_backend_internal.h \
+ lib/crypto_backend/crypto_cipher_kernel.c \
+ lib/crypto_backend/crypto_storage.c \
+ lib/crypto_backend/pbkdf_check.c \
+ lib/crypto_backend/crc32.c \
+ lib/crypto_backend/base64.c \
+ lib/crypto_backend/utf8.c \
+ lib/crypto_backend/argon2_generic.c \
+ lib/crypto_backend/cipher_generic.c \
+ lib/crypto_backend/cipher_check.c
+
+if CRYPTO_BACKEND_GCRYPT
+libcrypto_backend_la_SOURCES += lib/crypto_backend/crypto_gcrypt.c
+endif
+if CRYPTO_BACKEND_OPENSSL
+libcrypto_backend_la_SOURCES += lib/crypto_backend/crypto_openssl.c
+endif
+if CRYPTO_BACKEND_NSS
+libcrypto_backend_la_SOURCES += lib/crypto_backend/crypto_nss.c
+endif
+if CRYPTO_BACKEND_KERNEL
+libcrypto_backend_la_SOURCES += lib/crypto_backend/crypto_kernel.c
+endif
+if CRYPTO_BACKEND_NETTLE
+libcrypto_backend_la_SOURCES += lib/crypto_backend/crypto_nettle.c
+endif
+
+if CRYPTO_INTERNAL_PBKDF2
+libcrypto_backend_la_SOURCES += lib/crypto_backend/pbkdf2_generic.c
+endif
+
+if CRYPTO_INTERNAL_ARGON2
+libcrypto_backend_la_DEPENDENCIES = libargon2.la
+libcrypto_backend_la_LIBADD = libargon2.la
+endif
diff --git a/lib/crypto_backend/argon2/LICENSE b/lib/crypto_backend/argon2/LICENSE
new file mode 100644
index 0000000..de14cd2
--- /dev/null
+++ b/lib/crypto_backend/argon2/LICENSE
@@ -0,0 +1,30 @@
+ CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER.
+
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
+
+ the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
+ moral rights retained by the original author(s) and/or performer(s);
+ publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
+ rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
+ rights protecting the extraction, dissemination, use and reuse of data in a Work;
+ database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
+ other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
+ Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
+ Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
+ Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work.
diff --git a/lib/crypto_backend/argon2/Makemodule.am b/lib/crypto_backend/argon2/Makemodule.am
new file mode 100644
index 0000000..6fef2f1
--- /dev/null
+++ b/lib/crypto_backend/argon2/Makemodule.am
@@ -0,0 +1,30 @@
+noinst_LTLIBRARIES += libargon2.la
+
+libargon2_la_CFLAGS = $(AM_CFLAGS) -std=c89 -pthread -O3
+libargon2_la_CPPFLAGS = $(AM_CPPFLAGS) \
+ -I lib/crypto_backend/argon2 \
+ -I lib/crypto_backend/argon2/blake2
+
+libargon2_la_SOURCES = \
+ lib/crypto_backend/argon2/blake2/blake2b.c \
+ lib/crypto_backend/argon2/blake2/blake2.h \
+ lib/crypto_backend/argon2/blake2/blake2-impl.h \
+ lib/crypto_backend/argon2/argon2.c \
+ lib/crypto_backend/argon2/argon2.h \
+ lib/crypto_backend/argon2/core.c \
+ lib/crypto_backend/argon2/core.h \
+ lib/crypto_backend/argon2/encoding.c \
+ lib/crypto_backend/argon2/encoding.h \
+ lib/crypto_backend/argon2/thread.c \
+ lib/crypto_backend/argon2/thread.h
+
+if CRYPTO_INTERNAL_SSE_ARGON2
+libargon2_la_SOURCES += lib/crypto_backend/argon2/blake2/blamka-round-opt.h \
+ lib/crypto_backend/argon2/opt.c
+else
+libargon2_la_SOURCES += lib/crypto_backend/argon2/blake2/blamka-round-ref.h \
+ lib/crypto_backend/argon2/ref.c
+endif
+
+EXTRA_DIST += lib/crypto_backend/argon2/LICENSE
+EXTRA_DIST += lib/crypto_backend/argon2/README
diff --git a/lib/crypto_backend/argon2/README b/lib/crypto_backend/argon2/README
new file mode 100644
index 0000000..5376b52
--- /dev/null
+++ b/lib/crypto_backend/argon2/README
@@ -0,0 +1,5 @@
+This is bundled Argon2 algorithm library, copied from
+ https://github.com/P-H-C/phc-winner-argon2
+
+For more info see Password Hashing Competition site:
+ https://password-hashing.net/
diff --git a/lib/crypto_backend/argon2/argon2.c b/lib/crypto_backend/argon2/argon2.c
new file mode 100644
index 0000000..c784fca
--- /dev/null
+++ b/lib/crypto_backend/argon2/argon2.c
@@ -0,0 +1,458 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "argon2.h"
+#include "encoding.h"
+#include "core.h"
+
+/* to silent gcc -Wcast-qual for const cast */
+#define CONST_CAST(x) (x)(uintptr_t)
+
+const char *argon2_type2string(argon2_type type, int uppercase) {
+ switch (type) {
+ case Argon2_d:
+ return uppercase ? "Argon2d" : "argon2d";
+ case Argon2_i:
+ return uppercase ? "Argon2i" : "argon2i";
+ case Argon2_id:
+ return uppercase ? "Argon2id" : "argon2id";
+ }
+
+ return NULL;
+}
+
+int argon2_ctx(argon2_context *context, argon2_type type) {
+ /* 1. Validate all inputs */
+ int result = validate_inputs(context);
+ uint32_t memory_blocks, segment_length;
+ argon2_instance_t instance;
+
+ if (ARGON2_OK != result) {
+ return result;
+ }
+
+ if (Argon2_d != type && Argon2_i != type && Argon2_id != type) {
+ return ARGON2_INCORRECT_TYPE;
+ }
+
+ /* 2. Align memory size */
+ /* Minimum memory_blocks = 8L blocks, where L is the number of lanes */
+ memory_blocks = context->m_cost;
+
+ if (memory_blocks < 2 * ARGON2_SYNC_POINTS * context->lanes) {
+ memory_blocks = 2 * ARGON2_SYNC_POINTS * context->lanes;
+ }
+
+ segment_length = memory_blocks / (context->lanes * ARGON2_SYNC_POINTS);
+ /* Ensure that all segments have equal length */
+ memory_blocks = segment_length * (context->lanes * ARGON2_SYNC_POINTS);
+
+ instance.version = context->version;
+ instance.memory = NULL;
+ instance.passes = context->t_cost;
+ instance.memory_blocks = memory_blocks;
+ instance.segment_length = segment_length;
+ instance.lane_length = segment_length * ARGON2_SYNC_POINTS;
+ instance.lanes = context->lanes;
+ instance.threads = context->threads;
+ instance.type = type;
+
+ if (instance.threads > instance.lanes) {
+ instance.threads = instance.lanes;
+ }
+
+ /* 3. Initialization: Hashing inputs, allocating memory, filling first
+ * blocks
+ */
+ result = initialize(&instance, context);
+
+ if (ARGON2_OK != result) {
+ return result;
+ }
+
+ /* 4. Filling memory */
+ result = fill_memory_blocks(&instance);
+
+ if (ARGON2_OK != result) {
+ return result;
+ }
+ /* 5. Finalization */
+ finalize(context, &instance);
+
+ return ARGON2_OK;
+}
+
+int argon2_hash(const uint32_t t_cost, const uint32_t m_cost,
+ const uint32_t parallelism, const void *pwd,
+ const size_t pwdlen, const void *salt, const size_t saltlen,
+ void *hash, const size_t hashlen, char *encoded,
+ const size_t encodedlen, argon2_type type,
+ const uint32_t version){
+
+ argon2_context context;
+ int result;
+ uint8_t *out;
+
+ if (pwdlen > ARGON2_MAX_PWD_LENGTH) {
+ return ARGON2_PWD_TOO_LONG;
+ }
+
+ if (saltlen > ARGON2_MAX_SALT_LENGTH) {
+ return ARGON2_SALT_TOO_LONG;
+ }
+
+ if (hashlen > ARGON2_MAX_OUTLEN) {
+ return ARGON2_OUTPUT_TOO_LONG;
+ }
+
+ if (hashlen < ARGON2_MIN_OUTLEN) {
+ return ARGON2_OUTPUT_TOO_SHORT;
+ }
+
+ out = malloc(hashlen);
+ if (!out) {
+ return ARGON2_MEMORY_ALLOCATION_ERROR;
+ }
+
+ context.out = (uint8_t *)out;
+ context.outlen = (uint32_t)hashlen;
+ context.pwd = CONST_CAST(uint8_t *)pwd;
+ context.pwdlen = (uint32_t)pwdlen;
+ context.salt = CONST_CAST(uint8_t *)salt;
+ context.saltlen = (uint32_t)saltlen;
+ context.secret = NULL;
+ context.secretlen = 0;
+ context.ad = NULL;
+ context.adlen = 0;
+ context.t_cost = t_cost;
+ context.m_cost = m_cost;
+ context.lanes = parallelism;
+ context.threads = parallelism;
+ context.allocate_cbk = NULL;
+ context.free_cbk = NULL;
+ context.flags = ARGON2_DEFAULT_FLAGS;
+ context.version = version;
+
+ result = argon2_ctx(&context, type);
+
+ if (result != ARGON2_OK) {
+ clear_internal_memory(out, hashlen);
+ free(out);
+ return result;
+ }
+
+ /* if raw hash requested, write it */
+ if (hash) {
+ memcpy(hash, out, hashlen);
+ }
+
+ /* if encoding requested, write it */
+ if (encoded && encodedlen) {
+ if (encode_string(encoded, encodedlen, &context, type) != ARGON2_OK) {
+ clear_internal_memory(out, hashlen); /* wipe buffers if error */
+ clear_internal_memory(encoded, encodedlen);
+ free(out);
+ return ARGON2_ENCODING_FAIL;
+ }
+ }
+ clear_internal_memory(out, hashlen);
+ free(out);
+
+ return ARGON2_OK;
+}
+
+int argon2i_hash_encoded(const uint32_t t_cost, const uint32_t m_cost,
+ const uint32_t parallelism, const void *pwd,
+ const size_t pwdlen, const void *salt,
+ const size_t saltlen, const size_t hashlen,
+ char *encoded, const size_t encodedlen) {
+
+ return argon2_hash(t_cost, m_cost, parallelism, pwd, pwdlen, salt, saltlen,
+ NULL, hashlen, encoded, encodedlen, Argon2_i,
+ ARGON2_VERSION_NUMBER);
+}
+
+int argon2i_hash_raw(const uint32_t t_cost, const uint32_t m_cost,
+ const uint32_t parallelism, const void *pwd,
+ const size_t pwdlen, const void *salt,
+ const size_t saltlen, void *hash, const size_t hashlen) {
+
+ return argon2_hash(t_cost, m_cost, parallelism, pwd, pwdlen, salt, saltlen,
+ hash, hashlen, NULL, 0, Argon2_i, ARGON2_VERSION_NUMBER);
+}
+
+int argon2d_hash_encoded(const uint32_t t_cost, const uint32_t m_cost,
+ const uint32_t parallelism, const void *pwd,
+ const size_t pwdlen, const void *salt,
+ const size_t saltlen, const size_t hashlen,
+ char *encoded, const size_t encodedlen) {
+
+ return argon2_hash(t_cost, m_cost, parallelism, pwd, pwdlen, salt, saltlen,
+ NULL, hashlen, encoded, encodedlen, Argon2_d,
+ ARGON2_VERSION_NUMBER);
+}
+
+int argon2d_hash_raw(const uint32_t t_cost, const uint32_t m_cost,
+ const uint32_t parallelism, const void *pwd,
+ const size_t pwdlen, const void *salt,
+ const size_t saltlen, void *hash, const size_t hashlen) {
+
+ return argon2_hash(t_cost, m_cost, parallelism, pwd, pwdlen, salt, saltlen,
+ hash, hashlen, NULL, 0, Argon2_d, ARGON2_VERSION_NUMBER);
+}
+
+int argon2id_hash_encoded(const uint32_t t_cost, const uint32_t m_cost,
+ const uint32_t parallelism, const void *pwd,
+ const size_t pwdlen, const void *salt,
+ const size_t saltlen, const size_t hashlen,
+ char *encoded, const size_t encodedlen) {
+
+ return argon2_hash(t_cost, m_cost, parallelism, pwd, pwdlen, salt, saltlen,
+ NULL, hashlen, encoded, encodedlen, Argon2_id,
+ ARGON2_VERSION_NUMBER);
+}
+
+int argon2id_hash_raw(const uint32_t t_cost, const uint32_t m_cost,
+ const uint32_t parallelism, const void *pwd,
+ const size_t pwdlen, const void *salt,
+ const size_t saltlen, void *hash, const size_t hashlen) {
+ return argon2_hash(t_cost, m_cost, parallelism, pwd, pwdlen, salt, saltlen,
+ hash, hashlen, NULL, 0, Argon2_id,
+ ARGON2_VERSION_NUMBER);
+}
+
+static int argon2_compare(const uint8_t *b1, const uint8_t *b2, size_t len) {
+ size_t i;
+ uint8_t d = 0U;
+
+ for (i = 0U; i < len; i++) {
+ d |= b1[i] ^ b2[i];
+ }
+ return (int)((1 & ((d - 1) >> 8)) - 1);
+}
+
+int argon2_verify(const char *encoded, const void *pwd, const size_t pwdlen,
+ argon2_type type) {
+
+ argon2_context ctx;
+ uint8_t *desired_result = NULL;
+
+ int ret = ARGON2_OK;
+
+ size_t encoded_len;
+ uint32_t max_field_len;
+
+ if (pwdlen > ARGON2_MAX_PWD_LENGTH) {
+ return ARGON2_PWD_TOO_LONG;
+ }
+
+ if (encoded == NULL) {
+ return ARGON2_DECODING_FAIL;
+ }
+
+ encoded_len = strlen(encoded);
+ if (encoded_len > UINT32_MAX) {
+ return ARGON2_DECODING_FAIL;
+ }
+
+ /* No field can be longer than the encoded length */
+ /* coverity[strlen_assign] */
+ max_field_len = (uint32_t)encoded_len;
+
+ ctx.saltlen = max_field_len;
+ ctx.outlen = max_field_len;
+
+ ctx.salt = malloc(ctx.saltlen);
+ ctx.out = malloc(ctx.outlen);
+ if (!ctx.salt || !ctx.out) {
+ ret = ARGON2_MEMORY_ALLOCATION_ERROR;
+ goto fail;
+ }
+
+ ctx.pwd = CONST_CAST(uint8_t *)pwd;
+ ctx.pwdlen = (uint32_t)pwdlen;
+
+ ret = decode_string(&ctx, encoded, type);
+ if (ret != ARGON2_OK) {
+ goto fail;
+ }
+
+ /* Set aside the desired result, and get a new buffer. */
+ desired_result = ctx.out;
+ ctx.out = malloc(ctx.outlen);
+ if (!ctx.out) {
+ ret = ARGON2_MEMORY_ALLOCATION_ERROR;
+ goto fail;
+ }
+
+ ret = argon2_verify_ctx(&ctx, (char *)desired_result, type);
+ if (ret != ARGON2_OK) {
+ goto fail;
+ }
+
+fail:
+ free(ctx.salt);
+ free(ctx.out);
+ free(desired_result);
+
+ return ret;
+}
+
+int argon2i_verify(const char *encoded, const void *pwd, const size_t pwdlen) {
+
+ return argon2_verify(encoded, pwd, pwdlen, Argon2_i);
+}
+
+int argon2d_verify(const char *encoded, const void *pwd, const size_t pwdlen) {
+
+ return argon2_verify(encoded, pwd, pwdlen, Argon2_d);
+}
+
+int argon2id_verify(const char *encoded, const void *pwd, const size_t pwdlen) {
+
+ return argon2_verify(encoded, pwd, pwdlen, Argon2_id);
+}
+
+int argon2d_ctx(argon2_context *context) {
+ return argon2_ctx(context, Argon2_d);
+}
+
+int argon2i_ctx(argon2_context *context) {
+ return argon2_ctx(context, Argon2_i);
+}
+
+int argon2id_ctx(argon2_context *context) {
+ return argon2_ctx(context, Argon2_id);
+}
+
+int argon2_verify_ctx(argon2_context *context, const char *hash,
+ argon2_type type) {
+ int ret = argon2_ctx(context, type);
+ if (ret != ARGON2_OK) {
+ return ret;
+ }
+
+ if (argon2_compare(CONST_CAST(uint8_t *)hash, context->out, context->outlen)) {
+ return ARGON2_VERIFY_MISMATCH;
+ }
+
+ return ARGON2_OK;
+}
+
+int argon2d_verify_ctx(argon2_context *context, const char *hash) {
+ return argon2_verify_ctx(context, hash, Argon2_d);
+}
+
+int argon2i_verify_ctx(argon2_context *context, const char *hash) {
+ return argon2_verify_ctx(context, hash, Argon2_i);
+}
+
+int argon2id_verify_ctx(argon2_context *context, const char *hash) {
+ return argon2_verify_ctx(context, hash, Argon2_id);
+}
+
+const char *argon2_error_message(int error_code) {
+ switch (error_code) {
+ case ARGON2_OK:
+ return "OK";
+ case ARGON2_OUTPUT_PTR_NULL:
+ return "Output pointer is NULL";
+ case ARGON2_OUTPUT_TOO_SHORT:
+ return "Output is too short";
+ case ARGON2_OUTPUT_TOO_LONG:
+ return "Output is too long";
+ case ARGON2_PWD_TOO_SHORT:
+ return "Password is too short";
+ case ARGON2_PWD_TOO_LONG:
+ return "Password is too long";
+ case ARGON2_SALT_TOO_SHORT:
+ return "Salt is too short";
+ case ARGON2_SALT_TOO_LONG:
+ return "Salt is too long";
+ case ARGON2_AD_TOO_SHORT:
+ return "Associated data is too short";
+ case ARGON2_AD_TOO_LONG:
+ return "Associated data is too long";
+ case ARGON2_SECRET_TOO_SHORT:
+ return "Secret is too short";
+ case ARGON2_SECRET_TOO_LONG:
+ return "Secret is too long";
+ case ARGON2_TIME_TOO_SMALL:
+ return "Time cost is too small";
+ case ARGON2_TIME_TOO_LARGE:
+ return "Time cost is too large";
+ case ARGON2_MEMORY_TOO_LITTLE:
+ return "Memory cost is too small";
+ case ARGON2_MEMORY_TOO_MUCH:
+ return "Memory cost is too large";
+ case ARGON2_LANES_TOO_FEW:
+ return "Too few lanes";
+ case ARGON2_LANES_TOO_MANY:
+ return "Too many lanes";
+ case ARGON2_PWD_PTR_MISMATCH:
+ return "Password pointer is NULL, but password length is not 0";
+ case ARGON2_SALT_PTR_MISMATCH:
+ return "Salt pointer is NULL, but salt length is not 0";
+ case ARGON2_SECRET_PTR_MISMATCH:
+ return "Secret pointer is NULL, but secret length is not 0";
+ case ARGON2_AD_PTR_MISMATCH:
+ return "Associated data pointer is NULL, but ad length is not 0";
+ case ARGON2_MEMORY_ALLOCATION_ERROR:
+ return "Memory allocation error";
+ case ARGON2_FREE_MEMORY_CBK_NULL:
+ return "The free memory callback is NULL";
+ case ARGON2_ALLOCATE_MEMORY_CBK_NULL:
+ return "The allocate memory callback is NULL";
+ case ARGON2_INCORRECT_PARAMETER:
+ return "Argon2_Context context is NULL";
+ case ARGON2_INCORRECT_TYPE:
+ return "There is no such version of Argon2";
+ case ARGON2_OUT_PTR_MISMATCH:
+ return "Output pointer mismatch";
+ case ARGON2_THREADS_TOO_FEW:
+ return "Not enough threads";
+ case ARGON2_THREADS_TOO_MANY:
+ return "Too many threads";
+ case ARGON2_MISSING_ARGS:
+ return "Missing arguments";
+ case ARGON2_ENCODING_FAIL:
+ return "Encoding failed";
+ case ARGON2_DECODING_FAIL:
+ return "Decoding failed";
+ case ARGON2_THREAD_FAIL:
+ return "Threading failure";
+ case ARGON2_DECODING_LENGTH_FAIL:
+ return "Some of encoded parameters are too long or too short";
+ case ARGON2_VERIFY_MISMATCH:
+ return "The password does not match the supplied hash";
+ default:
+ return "Unknown error code";
+ }
+}
+
+size_t argon2_encodedlen(uint32_t t_cost, uint32_t m_cost, uint32_t parallelism,
+ uint32_t saltlen, uint32_t hashlen, argon2_type type) {
+ if (!argon2_type2string(type, 0))
+ return 0;
+ return strlen("$$v=$m=,t=,p=$$") + strlen(argon2_type2string(type, 0)) +
+ numlen(t_cost) + numlen(m_cost) + numlen(parallelism) +
+ b64len(saltlen) + b64len(hashlen) + numlen(ARGON2_VERSION_NUMBER) + 1;
+}
diff --git a/lib/crypto_backend/argon2/argon2.h b/lib/crypto_backend/argon2/argon2.h
new file mode 100644
index 0000000..20df933
--- /dev/null
+++ b/lib/crypto_backend/argon2/argon2.h
@@ -0,0 +1,437 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#ifndef ARGON2_H
+#define ARGON2_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <limits.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Symbols visibility control */
+#ifdef A2_VISCTL
+#define ARGON2_PUBLIC __attribute__((visibility("default")))
+#define ARGON2_LOCAL __attribute__ ((visibility ("hidden")))
+#elif _MSC_VER
+#define ARGON2_PUBLIC __declspec(dllexport)
+#define ARGON2_LOCAL
+#else
+#define ARGON2_PUBLIC
+#define ARGON2_LOCAL
+#endif
+
+/*
+ * Argon2 input parameter restrictions
+ */
+
+/* Minimum and maximum number of lanes (degree of parallelism) */
+#define ARGON2_MIN_LANES UINT32_C(1)
+#define ARGON2_MAX_LANES UINT32_C(0xFFFFFF)
+
+/* Minimum and maximum number of threads */
+#define ARGON2_MIN_THREADS UINT32_C(1)
+#define ARGON2_MAX_THREADS UINT32_C(0xFFFFFF)
+
+/* Number of synchronization points between lanes per pass */
+#define ARGON2_SYNC_POINTS UINT32_C(4)
+
+/* Minimum and maximum digest size in bytes */
+#define ARGON2_MIN_OUTLEN UINT32_C(4)
+#define ARGON2_MAX_OUTLEN UINT32_C(0xFFFFFFFF)
+
+/* Minimum and maximum number of memory blocks (each of BLOCK_SIZE bytes) */
+#define ARGON2_MIN_MEMORY (2 * ARGON2_SYNC_POINTS) /* 2 blocks per slice */
+
+#define ARGON2_MIN(a, b) ((a) < (b) ? (a) : (b))
+/* Max memory size is addressing-space/2, topping at 2^32 blocks (4 TB) */
+#define ARGON2_MAX_MEMORY_BITS \
+ ARGON2_MIN(UINT32_C(32), (sizeof(void *) * CHAR_BIT - 10 - 1))
+#define ARGON2_MAX_MEMORY \
+ ARGON2_MIN(UINT32_C(0xFFFFFFFF), UINT64_C(1) << ARGON2_MAX_MEMORY_BITS)
+
+/* Minimum and maximum number of passes */
+#define ARGON2_MIN_TIME UINT32_C(1)
+#define ARGON2_MAX_TIME UINT32_C(0xFFFFFFFF)
+
+/* Minimum and maximum password length in bytes */
+#define ARGON2_MIN_PWD_LENGTH UINT32_C(0)
+#define ARGON2_MAX_PWD_LENGTH UINT32_C(0xFFFFFFFF)
+
+/* Minimum and maximum associated data length in bytes */
+#define ARGON2_MIN_AD_LENGTH UINT32_C(0)
+#define ARGON2_MAX_AD_LENGTH UINT32_C(0xFFFFFFFF)
+
+/* Minimum and maximum salt length in bytes */
+#define ARGON2_MIN_SALT_LENGTH UINT32_C(8)
+#define ARGON2_MAX_SALT_LENGTH UINT32_C(0xFFFFFFFF)
+
+/* Minimum and maximum key length in bytes */
+#define ARGON2_MIN_SECRET UINT32_C(0)
+#define ARGON2_MAX_SECRET UINT32_C(0xFFFFFFFF)
+
+/* Flags to determine which fields are securely wiped (default = no wipe). */
+#define ARGON2_DEFAULT_FLAGS UINT32_C(0)
+#define ARGON2_FLAG_CLEAR_PASSWORD (UINT32_C(1) << 0)
+#define ARGON2_FLAG_CLEAR_SECRET (UINT32_C(1) << 1)
+
+/* Global flag to determine if we are wiping internal memory buffers. This flag
+ * is defined in core.c and defaults to 1 (wipe internal memory). */
+extern int FLAG_clear_internal_memory;
+
+/* Error codes */
+typedef enum Argon2_ErrorCodes {
+ ARGON2_OK = 0,
+
+ ARGON2_OUTPUT_PTR_NULL = -1,
+
+ ARGON2_OUTPUT_TOO_SHORT = -2,
+ ARGON2_OUTPUT_TOO_LONG = -3,
+
+ ARGON2_PWD_TOO_SHORT = -4,
+ ARGON2_PWD_TOO_LONG = -5,
+
+ ARGON2_SALT_TOO_SHORT = -6,
+ ARGON2_SALT_TOO_LONG = -7,
+
+ ARGON2_AD_TOO_SHORT = -8,
+ ARGON2_AD_TOO_LONG = -9,
+
+ ARGON2_SECRET_TOO_SHORT = -10,
+ ARGON2_SECRET_TOO_LONG = -11,
+
+ ARGON2_TIME_TOO_SMALL = -12,
+ ARGON2_TIME_TOO_LARGE = -13,
+
+ ARGON2_MEMORY_TOO_LITTLE = -14,
+ ARGON2_MEMORY_TOO_MUCH = -15,
+
+ ARGON2_LANES_TOO_FEW = -16,
+ ARGON2_LANES_TOO_MANY = -17,
+
+ ARGON2_PWD_PTR_MISMATCH = -18, /* NULL ptr with non-zero length */
+ ARGON2_SALT_PTR_MISMATCH = -19, /* NULL ptr with non-zero length */
+ ARGON2_SECRET_PTR_MISMATCH = -20, /* NULL ptr with non-zero length */
+ ARGON2_AD_PTR_MISMATCH = -21, /* NULL ptr with non-zero length */
+
+ ARGON2_MEMORY_ALLOCATION_ERROR = -22,
+
+ ARGON2_FREE_MEMORY_CBK_NULL = -23,
+ ARGON2_ALLOCATE_MEMORY_CBK_NULL = -24,
+
+ ARGON2_INCORRECT_PARAMETER = -25,
+ ARGON2_INCORRECT_TYPE = -26,
+
+ ARGON2_OUT_PTR_MISMATCH = -27,
+
+ ARGON2_THREADS_TOO_FEW = -28,
+ ARGON2_THREADS_TOO_MANY = -29,
+
+ ARGON2_MISSING_ARGS = -30,
+
+ ARGON2_ENCODING_FAIL = -31,
+
+ ARGON2_DECODING_FAIL = -32,
+
+ ARGON2_THREAD_FAIL = -33,
+
+ ARGON2_DECODING_LENGTH_FAIL = -34,
+
+ ARGON2_VERIFY_MISMATCH = -35
+} argon2_error_codes;
+
+/* Memory allocator types --- for external allocation */
+typedef int (*allocate_fptr)(uint8_t **memory, size_t bytes_to_allocate);
+typedef void (*deallocate_fptr)(uint8_t *memory, size_t bytes_to_allocate);
+
+/* Argon2 external data structures */
+
+/*
+ *****
+ * Context: structure to hold Argon2 inputs:
+ * output array and its length,
+ * password and its length,
+ * salt and its length,
+ * secret and its length,
+ * associated data and its length,
+ * number of passes, amount of used memory (in KBytes, can be rounded up a bit)
+ * number of parallel threads that will be run.
+ * All the parameters above affect the output hash value.
+ * Additionally, two function pointers can be provided to allocate and
+ * deallocate the memory (if NULL, memory will be allocated internally).
+ * Also, three flags indicate whether to erase password, secret as soon as they
+ * are pre-hashed (and thus not needed anymore), and the entire memory
+ *****
+ * Simplest situation: you have output array out[8], password is stored in
+ * pwd[32], salt is stored in salt[16], you do not have keys nor associated
+ * data. You need to spend 1 GB of RAM and you run 5 passes of Argon2d with
+ * 4 parallel lanes.
+ * You want to erase the password, but you're OK with last pass not being
+ * erased. You want to use the default memory allocator.
+ * Then you initialize:
+ Argon2_Context(out,8,pwd,32,salt,16,NULL,0,NULL,0,5,1<<20,4,4,NULL,NULL,true,false,false,false)
+ */
+typedef struct Argon2_Context {
+ uint8_t *out; /* output array */
+ uint32_t outlen; /* digest length */
+
+ uint8_t *pwd; /* password array */
+ uint32_t pwdlen; /* password length */
+
+ uint8_t *salt; /* salt array */
+ uint32_t saltlen; /* salt length */
+
+ uint8_t *secret; /* key array */
+ uint32_t secretlen; /* key length */
+
+ uint8_t *ad; /* associated data array */
+ uint32_t adlen; /* associated data length */
+
+ uint32_t t_cost; /* number of passes */
+ uint32_t m_cost; /* amount of memory requested (KB) */
+ uint32_t lanes; /* number of lanes */
+ uint32_t threads; /* maximum number of threads */
+
+ uint32_t version; /* version number */
+
+ allocate_fptr allocate_cbk; /* pointer to memory allocator */
+ deallocate_fptr free_cbk; /* pointer to memory deallocator */
+
+ uint32_t flags; /* array of bool options */
+} argon2_context;
+
+/* Argon2 primitive type */
+typedef enum Argon2_type {
+ Argon2_d = 0,
+ Argon2_i = 1,
+ Argon2_id = 2
+} argon2_type;
+
+/* Version of the algorithm */
+typedef enum Argon2_version {
+ ARGON2_VERSION_10 = 0x10,
+ ARGON2_VERSION_13 = 0x13,
+ ARGON2_VERSION_NUMBER = ARGON2_VERSION_13
+} argon2_version;
+
+/*
+ * Function that gives the string representation of an argon2_type.
+ * @param type The argon2_type that we want the string for
+ * @param uppercase Whether the string should have the first letter uppercase
+ * @return NULL if invalid type, otherwise the string representation.
+ */
+ARGON2_PUBLIC const char *argon2_type2string(argon2_type type, int uppercase);
+
+/*
+ * Function that performs memory-hard hashing with certain degree of parallelism
+ * @param context Pointer to the Argon2 internal structure
+ * @return Error code if smth is wrong, ARGON2_OK otherwise
+ */
+ARGON2_PUBLIC int argon2_ctx(argon2_context *context, argon2_type type);
+
+/**
+ * Hashes a password with Argon2i, producing an encoded hash
+ * @param t_cost Number of iterations
+ * @param m_cost Sets memory usage to m_cost kibibytes
+ * @param parallelism Number of threads and compute lanes
+ * @param pwd Pointer to password
+ * @param pwdlen Password size in bytes
+ * @param salt Pointer to salt
+ * @param saltlen Salt size in bytes
+ * @param hashlen Desired length of the hash in bytes
+ * @param encoded Buffer where to write the encoded hash
+ * @param encodedlen Size of the buffer (thus max size of the encoded hash)
+ * @pre Different parallelism levels will give different results
+ * @pre Returns ARGON2_OK if successful
+ */
+ARGON2_PUBLIC int argon2i_hash_encoded(const uint32_t t_cost,
+ const uint32_t m_cost,
+ const uint32_t parallelism,
+ const void *pwd, const size_t pwdlen,
+ const void *salt, const size_t saltlen,
+ const size_t hashlen, char *encoded,
+ const size_t encodedlen);
+
+/**
+ * Hashes a password with Argon2i, producing a raw hash at @hash
+ * @param t_cost Number of iterations
+ * @param m_cost Sets memory usage to m_cost kibibytes
+ * @param parallelism Number of threads and compute lanes
+ * @param pwd Pointer to password
+ * @param pwdlen Password size in bytes
+ * @param salt Pointer to salt
+ * @param saltlen Salt size in bytes
+ * @param hash Buffer where to write the raw hash - updated by the function
+ * @param hashlen Desired length of the hash in bytes
+ * @pre Different parallelism levels will give different results
+ * @pre Returns ARGON2_OK if successful
+ */
+ARGON2_PUBLIC int argon2i_hash_raw(const uint32_t t_cost, const uint32_t m_cost,
+ const uint32_t parallelism, const void *pwd,
+ const size_t pwdlen, const void *salt,
+ const size_t saltlen, void *hash,
+ const size_t hashlen);
+
+ARGON2_PUBLIC int argon2d_hash_encoded(const uint32_t t_cost,
+ const uint32_t m_cost,
+ const uint32_t parallelism,
+ const void *pwd, const size_t pwdlen,
+ const void *salt, const size_t saltlen,
+ const size_t hashlen, char *encoded,
+ const size_t encodedlen);
+
+ARGON2_PUBLIC int argon2d_hash_raw(const uint32_t t_cost, const uint32_t m_cost,
+ const uint32_t parallelism, const void *pwd,
+ const size_t pwdlen, const void *salt,
+ const size_t saltlen, void *hash,
+ const size_t hashlen);
+
+ARGON2_PUBLIC int argon2id_hash_encoded(const uint32_t t_cost,
+ const uint32_t m_cost,
+ const uint32_t parallelism,
+ const void *pwd, const size_t pwdlen,
+ const void *salt, const size_t saltlen,
+ const size_t hashlen, char *encoded,
+ const size_t encodedlen);
+
+ARGON2_PUBLIC int argon2id_hash_raw(const uint32_t t_cost,
+ const uint32_t m_cost,
+ const uint32_t parallelism, const void *pwd,
+ const size_t pwdlen, const void *salt,
+ const size_t saltlen, void *hash,
+ const size_t hashlen);
+
+/* generic function underlying the above ones */
+ARGON2_PUBLIC int argon2_hash(const uint32_t t_cost, const uint32_t m_cost,
+ const uint32_t parallelism, const void *pwd,
+ const size_t pwdlen, const void *salt,
+ const size_t saltlen, void *hash,
+ const size_t hashlen, char *encoded,
+ const size_t encodedlen, argon2_type type,
+ const uint32_t version);
+
+/**
+ * Verifies a password against an encoded string
+ * Encoded string is restricted as in validate_inputs()
+ * @param encoded String encoding parameters, salt, hash
+ * @param pwd Pointer to password
+ * @pre Returns ARGON2_OK if successful
+ */
+ARGON2_PUBLIC int argon2i_verify(const char *encoded, const void *pwd,
+ const size_t pwdlen);
+
+ARGON2_PUBLIC int argon2d_verify(const char *encoded, const void *pwd,
+ const size_t pwdlen);
+
+ARGON2_PUBLIC int argon2id_verify(const char *encoded, const void *pwd,
+ const size_t pwdlen);
+
+/* generic function underlying the above ones */
+ARGON2_PUBLIC int argon2_verify(const char *encoded, const void *pwd,
+ const size_t pwdlen, argon2_type type);
+
+/**
+ * Argon2d: Version of Argon2 that picks memory blocks depending
+ * on the password and salt. Only for side-channel-free
+ * environment!!
+ *****
+ * @param context Pointer to current Argon2 context
+ * @return Zero if successful, a non zero error code otherwise
+ */
+ARGON2_PUBLIC int argon2d_ctx(argon2_context *context);
+
+/**
+ * Argon2i: Version of Argon2 that picks memory blocks
+ * independent on the password and salt. Good for side-channels,
+ * but worse w.r.t. tradeoff attacks if only one pass is used.
+ *****
+ * @param context Pointer to current Argon2 context
+ * @return Zero if successful, a non zero error code otherwise
+ */
+ARGON2_PUBLIC int argon2i_ctx(argon2_context *context);
+
+/**
+ * Argon2id: Version of Argon2 where the first half-pass over memory is
+ * password-independent, the rest are password-dependent (on the password and
+ * salt). OK against side channels (they reduce to 1/2-pass Argon2i), and
+ * better with w.r.t. tradeoff attacks (similar to Argon2d).
+ *****
+ * @param context Pointer to current Argon2 context
+ * @return Zero if successful, a non zero error code otherwise
+ */
+ARGON2_PUBLIC int argon2id_ctx(argon2_context *context);
+
+/**
+ * Verify if a given password is correct for Argon2d hashing
+ * @param context Pointer to current Argon2 context
+ * @param hash The password hash to verify. The length of the hash is
+ * specified by the context outlen member
+ * @return Zero if successful, a non zero error code otherwise
+ */
+ARGON2_PUBLIC int argon2d_verify_ctx(argon2_context *context, const char *hash);
+
+/**
+ * Verify if a given password is correct for Argon2i hashing
+ * @param context Pointer to current Argon2 context
+ * @param hash The password hash to verify. The length of the hash is
+ * specified by the context outlen member
+ * @return Zero if successful, a non zero error code otherwise
+ */
+ARGON2_PUBLIC int argon2i_verify_ctx(argon2_context *context, const char *hash);
+
+/**
+ * Verify if a given password is correct for Argon2id hashing
+ * @param context Pointer to current Argon2 context
+ * @param hash The password hash to verify. The length of the hash is
+ * specified by the context outlen member
+ * @return Zero if successful, a non zero error code otherwise
+ */
+ARGON2_PUBLIC int argon2id_verify_ctx(argon2_context *context,
+ const char *hash);
+
+/* generic function underlying the above ones */
+ARGON2_PUBLIC int argon2_verify_ctx(argon2_context *context, const char *hash,
+ argon2_type type);
+
+/**
+ * Get the associated error message for given error code
+ * @return The error message associated with the given error code
+ */
+ARGON2_PUBLIC const char *argon2_error_message(int error_code);
+
+/**
+ * Returns the encoded hash length for the given input parameters
+ * @param t_cost Number of iterations
+ * @param m_cost Memory usage in kibibytes
+ * @param parallelism Number of threads; used to compute lanes
+ * @param saltlen Salt size in bytes
+ * @param hashlen Hash size in bytes
+ * @param type The argon2_type that we want the encoded length for
+ * @return The encoded hash length in bytes
+ */
+ARGON2_PUBLIC size_t argon2_encodedlen(uint32_t t_cost, uint32_t m_cost,
+ uint32_t parallelism, uint32_t saltlen,
+ uint32_t hashlen, argon2_type type);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/lib/crypto_backend/argon2/blake2/blake2-impl.h b/lib/crypto_backend/argon2/blake2/blake2-impl.h
new file mode 100644
index 0000000..dcac827
--- /dev/null
+++ b/lib/crypto_backend/argon2/blake2/blake2-impl.h
@@ -0,0 +1,154 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#ifndef PORTABLE_BLAKE2_IMPL_H
+#define PORTABLE_BLAKE2_IMPL_H
+
+#include <stdint.h>
+#include <string.h>
+
+#if defined(_MSC_VER)
+#define BLAKE2_INLINE __inline
+#elif defined(__GNUC__) || defined(__clang__)
+#define BLAKE2_INLINE __inline__
+#else
+#define BLAKE2_INLINE
+#endif
+
+/* Argon2 Team - Begin Code */
+/*
+ Not an exhaustive list, but should cover the majority of modern platforms
+ Additionally, the code will always be correct---this is only a performance
+ tweak.
+*/
+#if (defined(__BYTE_ORDER__) && \
+ (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || \
+ defined(__LITTLE_ENDIAN__) || defined(__ARMEL__) || defined(__MIPSEL__) || \
+ defined(__AARCH64EL__) || defined(__amd64__) || defined(__i386__) || \
+ defined(_M_IX86) || defined(_M_X64) || defined(_M_AMD64) || \
+ defined(_M_ARM)
+#define NATIVE_LITTLE_ENDIAN
+#endif
+/* Argon2 Team - End Code */
+
+static BLAKE2_INLINE uint32_t load32(const void *src) {
+#if defined(NATIVE_LITTLE_ENDIAN)
+ uint32_t w;
+ memcpy(&w, src, sizeof w);
+ return w;
+#else
+ const uint8_t *p = (const uint8_t *)src;
+ uint32_t w = *p++;
+ w |= (uint32_t)(*p++) << 8;
+ w |= (uint32_t)(*p++) << 16;
+ w |= (uint32_t)(*p++) << 24;
+ return w;
+#endif
+}
+
+static BLAKE2_INLINE uint64_t load64(const void *src) {
+#if defined(NATIVE_LITTLE_ENDIAN)
+ uint64_t w;
+ memcpy(&w, src, sizeof w);
+ return w;
+#else
+ const uint8_t *p = (const uint8_t *)src;
+ uint64_t w = *p++;
+ w |= (uint64_t)(*p++) << 8;
+ w |= (uint64_t)(*p++) << 16;
+ w |= (uint64_t)(*p++) << 24;
+ w |= (uint64_t)(*p++) << 32;
+ w |= (uint64_t)(*p++) << 40;
+ w |= (uint64_t)(*p++) << 48;
+ w |= (uint64_t)(*p++) << 56;
+ return w;
+#endif
+}
+
+static BLAKE2_INLINE void store32(void *dst, uint32_t w) {
+#if defined(NATIVE_LITTLE_ENDIAN)
+ memcpy(dst, &w, sizeof w);
+#else
+ uint8_t *p = (uint8_t *)dst;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+#endif
+}
+
+static BLAKE2_INLINE void store64(void *dst, uint64_t w) {
+#if defined(NATIVE_LITTLE_ENDIAN)
+ memcpy(dst, &w, sizeof w);
+#else
+ uint8_t *p = (uint8_t *)dst;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+#endif
+}
+
+static BLAKE2_INLINE uint64_t load48(const void *src) {
+ const uint8_t *p = (const uint8_t *)src;
+ uint64_t w = *p++;
+ w |= (uint64_t)(*p++) << 8;
+ w |= (uint64_t)(*p++) << 16;
+ w |= (uint64_t)(*p++) << 24;
+ w |= (uint64_t)(*p++) << 32;
+ w |= (uint64_t)(*p++) << 40;
+ return w;
+}
+
+static BLAKE2_INLINE void store48(void *dst, uint64_t w) {
+ uint8_t *p = (uint8_t *)dst;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+ w >>= 8;
+ *p++ = (uint8_t)w;
+}
+
+static BLAKE2_INLINE uint32_t rotr32(const uint32_t w, const unsigned c) {
+ return (w >> c) | (w << (32 - c));
+}
+
+static BLAKE2_INLINE uint64_t rotr64(const uint64_t w, const unsigned c) {
+ return (w >> c) | (w << (64 - c));
+}
+
+#endif
diff --git a/lib/crypto_backend/argon2/blake2/blake2.h b/lib/crypto_backend/argon2/blake2/blake2.h
new file mode 100644
index 0000000..0c1b0ee
--- /dev/null
+++ b/lib/crypto_backend/argon2/blake2/blake2.h
@@ -0,0 +1,89 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#ifndef PORTABLE_BLAKE2_H
+#define PORTABLE_BLAKE2_H
+
+#include "../argon2.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+enum blake2b_constant {
+ BLAKE2B_BLOCKBYTES = 128,
+ BLAKE2B_OUTBYTES = 64,
+ BLAKE2B_KEYBYTES = 64,
+ BLAKE2B_SALTBYTES = 16,
+ BLAKE2B_PERSONALBYTES = 16
+};
+
+#pragma pack(push, 1)
+typedef struct __blake2b_param {
+ uint8_t digest_length; /* 1 */
+ uint8_t key_length; /* 2 */
+ uint8_t fanout; /* 3 */
+ uint8_t depth; /* 4 */
+ uint32_t leaf_length; /* 8 */
+ uint64_t node_offset; /* 16 */
+ uint8_t node_depth; /* 17 */
+ uint8_t inner_length; /* 18 */
+ uint8_t reserved[14]; /* 32 */
+ uint8_t salt[BLAKE2B_SALTBYTES]; /* 48 */
+ uint8_t personal[BLAKE2B_PERSONALBYTES]; /* 64 */
+} blake2b_param;
+#pragma pack(pop)
+
+typedef struct __blake2b_state {
+ uint64_t h[8];
+ uint64_t t[2];
+ uint64_t f[2];
+ uint8_t buf[BLAKE2B_BLOCKBYTES];
+ unsigned buflen;
+ unsigned outlen;
+ uint8_t last_node;
+} blake2b_state;
+
+/* Ensure param structs have not been wrongly padded */
+/* Poor man's static_assert */
+enum {
+ blake2_size_check_0 = 1 / !!(CHAR_BIT == 8),
+ blake2_size_check_2 =
+ 1 / !!(sizeof(blake2b_param) == sizeof(uint64_t) * CHAR_BIT)
+};
+
+/* Streaming API */
+ARGON2_LOCAL int blake2b_init(blake2b_state *S, size_t outlen);
+ARGON2_LOCAL int blake2b_init_key(blake2b_state *S, size_t outlen, const void *key,
+ size_t keylen);
+ARGON2_LOCAL int blake2b_init_param(blake2b_state *S, const blake2b_param *P);
+ARGON2_LOCAL int blake2b_update(blake2b_state *S, const void *in, size_t inlen);
+ARGON2_LOCAL int blake2b_final(blake2b_state *S, void *out, size_t outlen);
+
+/* Simple API */
+ARGON2_LOCAL int blake2b(void *out, size_t outlen, const void *in, size_t inlen,
+ const void *key, size_t keylen);
+
+/* Argon2 Team - Begin Code */
+ARGON2_LOCAL int blake2b_long(void *out, size_t outlen, const void *in, size_t inlen);
+/* Argon2 Team - End Code */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/lib/crypto_backend/argon2/blake2/blake2b.c b/lib/crypto_backend/argon2/blake2/blake2b.c
new file mode 100644
index 0000000..d8f69e8
--- /dev/null
+++ b/lib/crypto_backend/argon2/blake2/blake2b.c
@@ -0,0 +1,392 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "blake2.h"
+#include "blake2-impl.h"
+
+void clear_internal_memory(void *v, size_t n);
+
+static const uint64_t blake2b_IV[8] = {
+ UINT64_C(0x6a09e667f3bcc908), UINT64_C(0xbb67ae8584caa73b),
+ UINT64_C(0x3c6ef372fe94f82b), UINT64_C(0xa54ff53a5f1d36f1),
+ UINT64_C(0x510e527fade682d1), UINT64_C(0x9b05688c2b3e6c1f),
+ UINT64_C(0x1f83d9abfb41bd6b), UINT64_C(0x5be0cd19137e2179)};
+
+static const unsigned int blake2b_sigma[12][16] = {
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3},
+ {11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4},
+ {7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8},
+ {9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13},
+ {2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9},
+ {12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11},
+ {13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10},
+ {6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5},
+ {10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3},
+};
+
+static BLAKE2_INLINE void blake2b_set_lastnode(blake2b_state *S) {
+ S->f[1] = (uint64_t)-1;
+}
+
+static BLAKE2_INLINE void blake2b_set_lastblock(blake2b_state *S) {
+ if (S->last_node) {
+ blake2b_set_lastnode(S);
+ }
+ S->f[0] = (uint64_t)-1;
+}
+
+static BLAKE2_INLINE void blake2b_increment_counter(blake2b_state *S,
+ uint64_t inc) {
+ S->t[0] += inc;
+ S->t[1] += (S->t[0] < inc);
+}
+
+static BLAKE2_INLINE void blake2b_invalidate_state(blake2b_state *S) {
+ clear_internal_memory(S, sizeof(*S)); /* wipe */
+ blake2b_set_lastblock(S); /* invalidate for further use */
+}
+
+static BLAKE2_INLINE void blake2b_init0(blake2b_state *S) {
+ memset(S, 0, sizeof(*S));
+ memcpy(S->h, blake2b_IV, sizeof(S->h));
+}
+
+int blake2b_init_param(blake2b_state *S, const blake2b_param *P) {
+ const unsigned char *p = (const unsigned char *)P;
+ unsigned int i;
+
+ if (NULL == P || NULL == S) {
+ return -1;
+ }
+
+ blake2b_init0(S);
+ /* IV XOR Parameter Block */
+ for (i = 0; i < 8; ++i) {
+ S->h[i] ^= load64(&p[i * sizeof(S->h[i])]);
+ }
+ S->outlen = P->digest_length;
+ return 0;
+}
+
+/* Sequential blake2b initialization */
+int blake2b_init(blake2b_state *S, size_t outlen) {
+ blake2b_param P;
+
+ if (S == NULL) {
+ return -1;
+ }
+
+ if ((outlen == 0) || (outlen > BLAKE2B_OUTBYTES)) {
+ blake2b_invalidate_state(S);
+ return -1;
+ }
+
+ /* Setup Parameter Block for unkeyed BLAKE2 */
+ P.digest_length = (uint8_t)outlen;
+ P.key_length = 0;
+ P.fanout = 1;
+ P.depth = 1;
+ P.leaf_length = 0;
+ P.node_offset = 0;
+ P.node_depth = 0;
+ P.inner_length = 0;
+ memset(P.reserved, 0, sizeof(P.reserved));
+ memset(P.salt, 0, sizeof(P.salt));
+ memset(P.personal, 0, sizeof(P.personal));
+
+ return blake2b_init_param(S, &P);
+}
+
+int blake2b_init_key(blake2b_state *S, size_t outlen, const void *key,
+ size_t keylen) {
+ blake2b_param P;
+
+ if (S == NULL) {
+ return -1;
+ }
+
+ if ((outlen == 0) || (outlen > BLAKE2B_OUTBYTES)) {
+ blake2b_invalidate_state(S);
+ return -1;
+ }
+
+ if ((key == 0) || (keylen == 0) || (keylen > BLAKE2B_KEYBYTES)) {
+ blake2b_invalidate_state(S);
+ return -1;
+ }
+
+ /* Setup Parameter Block for keyed BLAKE2 */
+ P.digest_length = (uint8_t)outlen;
+ P.key_length = (uint8_t)keylen;
+ P.fanout = 1;
+ P.depth = 1;
+ P.leaf_length = 0;
+ P.node_offset = 0;
+ P.node_depth = 0;
+ P.inner_length = 0;
+ memset(P.reserved, 0, sizeof(P.reserved));
+ memset(P.salt, 0, sizeof(P.salt));
+ memset(P.personal, 0, sizeof(P.personal));
+
+ if (blake2b_init_param(S, &P) < 0) {
+ blake2b_invalidate_state(S);
+ return -1;
+ }
+
+ {
+ uint8_t block[BLAKE2B_BLOCKBYTES];
+ memset(block, 0, BLAKE2B_BLOCKBYTES);
+ memcpy(block, key, keylen);
+ blake2b_update(S, block, BLAKE2B_BLOCKBYTES);
+ /* Burn the key from stack */
+ clear_internal_memory(block, BLAKE2B_BLOCKBYTES);
+ }
+ return 0;
+}
+
+static void blake2b_compress(blake2b_state *S, const uint8_t *block) {
+ uint64_t m[16];
+ uint64_t v[16];
+ unsigned int i, r;
+
+ for (i = 0; i < 16; ++i) {
+ m[i] = load64(block + i * sizeof(m[i]));
+ }
+
+ for (i = 0; i < 8; ++i) {
+ v[i] = S->h[i];
+ }
+
+ v[8] = blake2b_IV[0];
+ v[9] = blake2b_IV[1];
+ v[10] = blake2b_IV[2];
+ v[11] = blake2b_IV[3];
+ v[12] = blake2b_IV[4] ^ S->t[0];
+ v[13] = blake2b_IV[5] ^ S->t[1];
+ v[14] = blake2b_IV[6] ^ S->f[0];
+ v[15] = blake2b_IV[7] ^ S->f[1];
+
+#define G(r, i, a, b, c, d) \
+ do { \
+ a = a + b + m[blake2b_sigma[r][2 * i + 0]]; \
+ d = rotr64(d ^ a, 32); \
+ c = c + d; \
+ b = rotr64(b ^ c, 24); \
+ a = a + b + m[blake2b_sigma[r][2 * i + 1]]; \
+ d = rotr64(d ^ a, 16); \
+ c = c + d; \
+ b = rotr64(b ^ c, 63); \
+ } while ((void)0, 0)
+
+#define ROUND(r) \
+ do { \
+ G(r, 0, v[0], v[4], v[8], v[12]); \
+ G(r, 1, v[1], v[5], v[9], v[13]); \
+ G(r, 2, v[2], v[6], v[10], v[14]); \
+ G(r, 3, v[3], v[7], v[11], v[15]); \
+ G(r, 4, v[0], v[5], v[10], v[15]); \
+ G(r, 5, v[1], v[6], v[11], v[12]); \
+ G(r, 6, v[2], v[7], v[8], v[13]); \
+ G(r, 7, v[3], v[4], v[9], v[14]); \
+ } while ((void)0, 0)
+
+ for (r = 0; r < 12; ++r) {
+ ROUND(r);
+ }
+
+ for (i = 0; i < 8; ++i) {
+ S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
+ }
+
+#undef G
+#undef ROUND
+}
+
+int blake2b_update(blake2b_state *S, const void *in, size_t inlen) {
+ const uint8_t *pin = (const uint8_t *)in;
+
+ if (inlen == 0) {
+ return 0;
+ }
+
+ /* Sanity check */
+ if (S == NULL || in == NULL) {
+ return -1;
+ }
+
+ /* Is this a reused state? */
+ if (S->f[0] != 0) {
+ return -1;
+ }
+
+ if (S->buflen + inlen > BLAKE2B_BLOCKBYTES) {
+ /* Complete current block */
+ size_t left = S->buflen;
+ size_t fill = BLAKE2B_BLOCKBYTES - left;
+ memcpy(&S->buf[left], pin, fill);
+ blake2b_increment_counter(S, BLAKE2B_BLOCKBYTES);
+ blake2b_compress(S, S->buf);
+ S->buflen = 0;
+ inlen -= fill;
+ pin += fill;
+ /* Avoid buffer copies when possible */
+ while (inlen > BLAKE2B_BLOCKBYTES) {
+ blake2b_increment_counter(S, BLAKE2B_BLOCKBYTES);
+ blake2b_compress(S, pin);
+ inlen -= BLAKE2B_BLOCKBYTES;
+ pin += BLAKE2B_BLOCKBYTES;
+ }
+ }
+ memcpy(&S->buf[S->buflen], pin, inlen);
+ S->buflen += (unsigned int)inlen;
+ return 0;
+}
+
+int blake2b_final(blake2b_state *S, void *out, size_t outlen) {
+ uint8_t buffer[BLAKE2B_OUTBYTES] = {0};
+ unsigned int i;
+
+ /* Sanity checks */
+ if (S == NULL || out == NULL || outlen < S->outlen) {
+ return -1;
+ }
+
+ /* Is this a reused state? */
+ if (S->f[0] != 0) {
+ return -1;
+ }
+
+ blake2b_increment_counter(S, S->buflen);
+ blake2b_set_lastblock(S);
+ memset(&S->buf[S->buflen], 0, BLAKE2B_BLOCKBYTES - S->buflen); /* Padding */
+ blake2b_compress(S, S->buf);
+
+ for (i = 0; i < 8; ++i) { /* Output full hash to temp buffer */
+ store64(buffer + sizeof(S->h[i]) * i, S->h[i]);
+ }
+
+ memcpy(out, buffer, S->outlen);
+ clear_internal_memory(buffer, sizeof(buffer));
+ clear_internal_memory(S->buf, sizeof(S->buf));
+ clear_internal_memory(S->h, sizeof(S->h));
+ return 0;
+}
+
+int blake2b(void *out, size_t outlen, const void *in, size_t inlen,
+ const void *key, size_t keylen) {
+ blake2b_state S;
+ int ret = -1;
+
+ /* Verify parameters */
+ if (NULL == in && inlen > 0) {
+ goto fail;
+ }
+
+ if (NULL == out || outlen == 0 || outlen > BLAKE2B_OUTBYTES) {
+ goto fail;
+ }
+
+ if ((NULL == key && keylen > 0) || keylen > BLAKE2B_KEYBYTES) {
+ goto fail;
+ }
+
+ if (keylen > 0) {
+ if (blake2b_init_key(&S, outlen, key, keylen) < 0) {
+ goto fail;
+ }
+ } else {
+ if (blake2b_init(&S, outlen) < 0) {
+ goto fail;
+ }
+ }
+
+ if (blake2b_update(&S, in, inlen) < 0) {
+ goto fail;
+ }
+ ret = blake2b_final(&S, out, outlen);
+
+fail:
+ clear_internal_memory(&S, sizeof(S));
+ return ret;
+}
+
+/* Argon2 Team - Begin Code */
+int blake2b_long(void *pout, size_t outlen, const void *in, size_t inlen) {
+ uint8_t *out = (uint8_t *)pout;
+ blake2b_state blake_state;
+ uint8_t outlen_bytes[sizeof(uint32_t)] = {0};
+ int ret = -1;
+
+ if (outlen > UINT32_MAX) {
+ goto fail;
+ }
+
+ /* Ensure little-endian byte order! */
+ store32(outlen_bytes, (uint32_t)outlen);
+
+#define TRY(statement) \
+ do { \
+ ret = statement; \
+ if (ret < 0) { \
+ goto fail; \
+ } \
+ } while ((void)0, 0)
+
+ if (outlen <= BLAKE2B_OUTBYTES) {
+ TRY(blake2b_init(&blake_state, outlen));
+ TRY(blake2b_update(&blake_state, outlen_bytes, sizeof(outlen_bytes)));
+ TRY(blake2b_update(&blake_state, in, inlen));
+ TRY(blake2b_final(&blake_state, out, outlen));
+ } else {
+ uint32_t toproduce;
+ uint8_t out_buffer[BLAKE2B_OUTBYTES];
+ uint8_t in_buffer[BLAKE2B_OUTBYTES];
+ TRY(blake2b_init(&blake_state, BLAKE2B_OUTBYTES));
+ TRY(blake2b_update(&blake_state, outlen_bytes, sizeof(outlen_bytes)));
+ TRY(blake2b_update(&blake_state, in, inlen));
+ TRY(blake2b_final(&blake_state, out_buffer, BLAKE2B_OUTBYTES));
+ memcpy(out, out_buffer, BLAKE2B_OUTBYTES / 2);
+ out += BLAKE2B_OUTBYTES / 2;
+ toproduce = (uint32_t)outlen - BLAKE2B_OUTBYTES / 2;
+
+ while (toproduce > BLAKE2B_OUTBYTES) {
+ memcpy(in_buffer, out_buffer, BLAKE2B_OUTBYTES);
+ TRY(blake2b(out_buffer, BLAKE2B_OUTBYTES, in_buffer,
+ BLAKE2B_OUTBYTES, NULL, 0));
+ memcpy(out, out_buffer, BLAKE2B_OUTBYTES / 2);
+ out += BLAKE2B_OUTBYTES / 2;
+ toproduce -= BLAKE2B_OUTBYTES / 2;
+ }
+
+ memcpy(in_buffer, out_buffer, BLAKE2B_OUTBYTES);
+ TRY(blake2b(out_buffer, toproduce, in_buffer, BLAKE2B_OUTBYTES, NULL,
+ 0));
+ memcpy(out, out_buffer, toproduce);
+ }
+fail:
+ clear_internal_memory(&blake_state, sizeof(blake_state));
+ return ret;
+#undef TRY
+}
+/* Argon2 Team - End Code */
diff --git a/lib/crypto_backend/argon2/blake2/blamka-round-opt.h b/lib/crypto_backend/argon2/blake2/blamka-round-opt.h
new file mode 100644
index 0000000..3127f2a
--- /dev/null
+++ b/lib/crypto_backend/argon2/blake2/blamka-round-opt.h
@@ -0,0 +1,471 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#ifndef BLAKE_ROUND_MKA_OPT_H
+#define BLAKE_ROUND_MKA_OPT_H
+
+#include "blake2-impl.h"
+
+#include <emmintrin.h>
+#if defined(__SSSE3__)
+#include <tmmintrin.h> /* for _mm_shuffle_epi8 and _mm_alignr_epi8 */
+#endif
+
+#if defined(__XOP__) && (defined(__GNUC__) || defined(__clang__))
+#include <x86intrin.h>
+#endif
+
+#if !defined(__AVX512F__)
+#if !defined(__AVX2__)
+#if !defined(__XOP__)
+#if defined(__SSSE3__)
+#define r16 \
+ (_mm_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9))
+#define r24 \
+ (_mm_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10))
+#define _mm_roti_epi64(x, c) \
+ (-(c) == 32) \
+ ? _mm_shuffle_epi32((x), _MM_SHUFFLE(2, 3, 0, 1)) \
+ : (-(c) == 24) \
+ ? _mm_shuffle_epi8((x), r24) \
+ : (-(c) == 16) \
+ ? _mm_shuffle_epi8((x), r16) \
+ : (-(c) == 63) \
+ ? _mm_xor_si128(_mm_srli_epi64((x), -(c)), \
+ _mm_add_epi64((x), (x))) \
+ : _mm_xor_si128(_mm_srli_epi64((x), -(c)), \
+ _mm_slli_epi64((x), 64 - (-(c))))
+#else /* defined(__SSE2__) */
+#define _mm_roti_epi64(r, c) \
+ _mm_xor_si128(_mm_srli_epi64((r), -(c)), _mm_slli_epi64((r), 64 - (-(c))))
+#endif
+#else
+#endif
+
+static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) {
+ const __m128i z = _mm_mul_epu32(x, y);
+ return _mm_add_epi64(_mm_add_epi64(x, y), _mm_add_epi64(z, z));
+}
+
+#define G1(A0, B0, C0, D0, A1, B1, C1, D1) \
+ do { \
+ A0 = fBlaMka(A0, B0); \
+ A1 = fBlaMka(A1, B1); \
+ \
+ D0 = _mm_xor_si128(D0, A0); \
+ D1 = _mm_xor_si128(D1, A1); \
+ \
+ D0 = _mm_roti_epi64(D0, -32); \
+ D1 = _mm_roti_epi64(D1, -32); \
+ \
+ C0 = fBlaMka(C0, D0); \
+ C1 = fBlaMka(C1, D1); \
+ \
+ B0 = _mm_xor_si128(B0, C0); \
+ B1 = _mm_xor_si128(B1, C1); \
+ \
+ B0 = _mm_roti_epi64(B0, -24); \
+ B1 = _mm_roti_epi64(B1, -24); \
+ } while ((void)0, 0)
+
+#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
+ do { \
+ A0 = fBlaMka(A0, B0); \
+ A1 = fBlaMka(A1, B1); \
+ \
+ D0 = _mm_xor_si128(D0, A0); \
+ D1 = _mm_xor_si128(D1, A1); \
+ \
+ D0 = _mm_roti_epi64(D0, -16); \
+ D1 = _mm_roti_epi64(D1, -16); \
+ \
+ C0 = fBlaMka(C0, D0); \
+ C1 = fBlaMka(C1, D1); \
+ \
+ B0 = _mm_xor_si128(B0, C0); \
+ B1 = _mm_xor_si128(B1, C1); \
+ \
+ B0 = _mm_roti_epi64(B0, -63); \
+ B1 = _mm_roti_epi64(B1, -63); \
+ } while ((void)0, 0)
+
+#if defined(__SSSE3__)
+#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
+ do { \
+ __m128i t0 = _mm_alignr_epi8(B1, B0, 8); \
+ __m128i t1 = _mm_alignr_epi8(B0, B1, 8); \
+ B0 = t0; \
+ B1 = t1; \
+ \
+ t0 = C0; \
+ C0 = C1; \
+ C1 = t0; \
+ \
+ t0 = _mm_alignr_epi8(D1, D0, 8); \
+ t1 = _mm_alignr_epi8(D0, D1, 8); \
+ D0 = t1; \
+ D1 = t0; \
+ } while ((void)0, 0)
+
+#define UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
+ do { \
+ __m128i t0 = _mm_alignr_epi8(B0, B1, 8); \
+ __m128i t1 = _mm_alignr_epi8(B1, B0, 8); \
+ B0 = t0; \
+ B1 = t1; \
+ \
+ t0 = C0; \
+ C0 = C1; \
+ C1 = t0; \
+ \
+ t0 = _mm_alignr_epi8(D0, D1, 8); \
+ t1 = _mm_alignr_epi8(D1, D0, 8); \
+ D0 = t1; \
+ D1 = t0; \
+ } while ((void)0, 0)
+#else /* SSE2 */
+#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
+ do { \
+ __m128i t0 = D0; \
+ __m128i t1 = B0; \
+ D0 = C0; \
+ C0 = C1; \
+ C1 = D0; \
+ D0 = _mm_unpackhi_epi64(D1, _mm_unpacklo_epi64(t0, t0)); \
+ D1 = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(D1, D1)); \
+ B0 = _mm_unpackhi_epi64(B0, _mm_unpacklo_epi64(B1, B1)); \
+ B1 = _mm_unpackhi_epi64(B1, _mm_unpacklo_epi64(t1, t1)); \
+ } while ((void)0, 0)
+
+#define UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
+ do { \
+ __m128i t0, t1; \
+ t0 = C0; \
+ C0 = C1; \
+ C1 = t0; \
+ t0 = B0; \
+ t1 = D0; \
+ B0 = _mm_unpackhi_epi64(B1, _mm_unpacklo_epi64(B0, B0)); \
+ B1 = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(B1, B1)); \
+ D0 = _mm_unpackhi_epi64(D0, _mm_unpacklo_epi64(D1, D1)); \
+ D1 = _mm_unpackhi_epi64(D1, _mm_unpacklo_epi64(t1, t1)); \
+ } while ((void)0, 0)
+#endif
+
+#define BLAKE2_ROUND(A0, A1, B0, B1, C0, C1, D0, D1) \
+ do { \
+ G1(A0, B0, C0, D0, A1, B1, C1, D1); \
+ G2(A0, B0, C0, D0, A1, B1, C1, D1); \
+ \
+ DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
+ \
+ G1(A0, B0, C0, D0, A1, B1, C1, D1); \
+ G2(A0, B0, C0, D0, A1, B1, C1, D1); \
+ \
+ UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
+ } while ((void)0, 0)
+#else /* __AVX2__ */
+
+#include <immintrin.h>
+
+#define rotr32(x) _mm256_shuffle_epi32(x, _MM_SHUFFLE(2, 3, 0, 1))
+#define rotr24(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10, 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10))
+#define rotr16(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9, 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9))
+#define rotr63(x) _mm256_xor_si256(_mm256_srli_epi64((x), 63), _mm256_add_epi64((x), (x)))
+
+#define G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ do { \
+ __m256i ml = _mm256_mul_epu32(A0, B0); \
+ ml = _mm256_add_epi64(ml, ml); \
+ A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml)); \
+ D0 = _mm256_xor_si256(D0, A0); \
+ D0 = rotr32(D0); \
+ \
+ ml = _mm256_mul_epu32(C0, D0); \
+ ml = _mm256_add_epi64(ml, ml); \
+ C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml)); \
+ \
+ B0 = _mm256_xor_si256(B0, C0); \
+ B0 = rotr24(B0); \
+ \
+ ml = _mm256_mul_epu32(A1, B1); \
+ ml = _mm256_add_epi64(ml, ml); \
+ A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml)); \
+ D1 = _mm256_xor_si256(D1, A1); \
+ D1 = rotr32(D1); \
+ \
+ ml = _mm256_mul_epu32(C1, D1); \
+ ml = _mm256_add_epi64(ml, ml); \
+ C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml)); \
+ \
+ B1 = _mm256_xor_si256(B1, C1); \
+ B1 = rotr24(B1); \
+ } while((void)0, 0);
+
+#define G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ do { \
+ __m256i ml = _mm256_mul_epu32(A0, B0); \
+ ml = _mm256_add_epi64(ml, ml); \
+ A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml)); \
+ D0 = _mm256_xor_si256(D0, A0); \
+ D0 = rotr16(D0); \
+ \
+ ml = _mm256_mul_epu32(C0, D0); \
+ ml = _mm256_add_epi64(ml, ml); \
+ C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml)); \
+ B0 = _mm256_xor_si256(B0, C0); \
+ B0 = rotr63(B0); \
+ \
+ ml = _mm256_mul_epu32(A1, B1); \
+ ml = _mm256_add_epi64(ml, ml); \
+ A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml)); \
+ D1 = _mm256_xor_si256(D1, A1); \
+ D1 = rotr16(D1); \
+ \
+ ml = _mm256_mul_epu32(C1, D1); \
+ ml = _mm256_add_epi64(ml, ml); \
+ C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml)); \
+ B1 = _mm256_xor_si256(B1, C1); \
+ B1 = rotr63(B1); \
+ } while((void)0, 0);
+
+#define DIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \
+ do { \
+ B0 = _mm256_permute4x64_epi64(B0, _MM_SHUFFLE(0, 3, 2, 1)); \
+ C0 = _mm256_permute4x64_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \
+ D0 = _mm256_permute4x64_epi64(D0, _MM_SHUFFLE(2, 1, 0, 3)); \
+ \
+ B1 = _mm256_permute4x64_epi64(B1, _MM_SHUFFLE(0, 3, 2, 1)); \
+ C1 = _mm256_permute4x64_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \
+ D1 = _mm256_permute4x64_epi64(D1, _MM_SHUFFLE(2, 1, 0, 3)); \
+ } while((void)0, 0);
+
+#define DIAGONALIZE_2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ do { \
+ __m256i tmp1 = _mm256_blend_epi32(B0, B1, 0xCC); \
+ __m256i tmp2 = _mm256_blend_epi32(B0, B1, 0x33); \
+ B1 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
+ B0 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
+ \
+ tmp1 = C0; \
+ C0 = C1; \
+ C1 = tmp1; \
+ \
+ tmp1 = _mm256_blend_epi32(D0, D1, 0xCC); \
+ tmp2 = _mm256_blend_epi32(D0, D1, 0x33); \
+ D0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
+ D1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
+ } while(0);
+
+#define UNDIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \
+ do { \
+ B0 = _mm256_permute4x64_epi64(B0, _MM_SHUFFLE(2, 1, 0, 3)); \
+ C0 = _mm256_permute4x64_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \
+ D0 = _mm256_permute4x64_epi64(D0, _MM_SHUFFLE(0, 3, 2, 1)); \
+ \
+ B1 = _mm256_permute4x64_epi64(B1, _MM_SHUFFLE(2, 1, 0, 3)); \
+ C1 = _mm256_permute4x64_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \
+ D1 = _mm256_permute4x64_epi64(D1, _MM_SHUFFLE(0, 3, 2, 1)); \
+ } while((void)0, 0);
+
+#define UNDIAGONALIZE_2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ do { \
+ __m256i tmp1 = _mm256_blend_epi32(B0, B1, 0xCC); \
+ __m256i tmp2 = _mm256_blend_epi32(B0, B1, 0x33); \
+ B0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
+ B1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
+ \
+ tmp1 = C0; \
+ C0 = C1; \
+ C1 = tmp1; \
+ \
+ tmp1 = _mm256_blend_epi32(D0, D1, 0x33); \
+ tmp2 = _mm256_blend_epi32(D0, D1, 0xCC); \
+ D0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
+ D1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
+ } while((void)0, 0);
+
+#define BLAKE2_ROUND_1(A0, A1, B0, B1, C0, C1, D0, D1) \
+ do{ \
+ G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ \
+ DIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \
+ \
+ G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ \
+ UNDIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \
+ } while((void)0, 0);
+
+#define BLAKE2_ROUND_2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ do{ \
+ G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ \
+ DIAGONALIZE_2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ \
+ G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ \
+ UNDIAGONALIZE_2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ } while((void)0, 0);
+
+#endif /* __AVX2__ */
+
+#else /* __AVX512F__ */
+
+#include <immintrin.h>
+
+#define ror64(x, n) _mm512_ror_epi64((x), (n))
+
+static __m512i muladd(__m512i x, __m512i y)
+{
+ __m512i z = _mm512_mul_epu32(x, y);
+ return _mm512_add_epi64(_mm512_add_epi64(x, y), _mm512_add_epi64(z, z));
+}
+
+#define G1(A0, B0, C0, D0, A1, B1, C1, D1) \
+ do { \
+ A0 = muladd(A0, B0); \
+ A1 = muladd(A1, B1); \
+\
+ D0 = _mm512_xor_si512(D0, A0); \
+ D1 = _mm512_xor_si512(D1, A1); \
+\
+ D0 = ror64(D0, 32); \
+ D1 = ror64(D1, 32); \
+\
+ C0 = muladd(C0, D0); \
+ C1 = muladd(C1, D1); \
+\
+ B0 = _mm512_xor_si512(B0, C0); \
+ B1 = _mm512_xor_si512(B1, C1); \
+\
+ B0 = ror64(B0, 24); \
+ B1 = ror64(B1, 24); \
+ } while ((void)0, 0)
+
+#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
+ do { \
+ A0 = muladd(A0, B0); \
+ A1 = muladd(A1, B1); \
+\
+ D0 = _mm512_xor_si512(D0, A0); \
+ D1 = _mm512_xor_si512(D1, A1); \
+\
+ D0 = ror64(D0, 16); \
+ D1 = ror64(D1, 16); \
+\
+ C0 = muladd(C0, D0); \
+ C1 = muladd(C1, D1); \
+\
+ B0 = _mm512_xor_si512(B0, C0); \
+ B1 = _mm512_xor_si512(B1, C1); \
+\
+ B0 = ror64(B0, 63); \
+ B1 = ror64(B1, 63); \
+ } while ((void)0, 0)
+
+#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
+ do { \
+ B0 = _mm512_permutex_epi64(B0, _MM_SHUFFLE(0, 3, 2, 1)); \
+ B1 = _mm512_permutex_epi64(B1, _MM_SHUFFLE(0, 3, 2, 1)); \
+\
+ C0 = _mm512_permutex_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \
+ C1 = _mm512_permutex_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \
+\
+ D0 = _mm512_permutex_epi64(D0, _MM_SHUFFLE(2, 1, 0, 3)); \
+ D1 = _mm512_permutex_epi64(D1, _MM_SHUFFLE(2, 1, 0, 3)); \
+ } while ((void)0, 0)
+
+#define UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
+ do { \
+ B0 = _mm512_permutex_epi64(B0, _MM_SHUFFLE(2, 1, 0, 3)); \
+ B1 = _mm512_permutex_epi64(B1, _MM_SHUFFLE(2, 1, 0, 3)); \
+\
+ C0 = _mm512_permutex_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \
+ C1 = _mm512_permutex_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \
+\
+ D0 = _mm512_permutex_epi64(D0, _MM_SHUFFLE(0, 3, 2, 1)); \
+ D1 = _mm512_permutex_epi64(D1, _MM_SHUFFLE(0, 3, 2, 1)); \
+ } while ((void)0, 0)
+
+#define BLAKE2_ROUND(A0, B0, C0, D0, A1, B1, C1, D1) \
+ do { \
+ G1(A0, B0, C0, D0, A1, B1, C1, D1); \
+ G2(A0, B0, C0, D0, A1, B1, C1, D1); \
+\
+ DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
+\
+ G1(A0, B0, C0, D0, A1, B1, C1, D1); \
+ G2(A0, B0, C0, D0, A1, B1, C1, D1); \
+\
+ UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
+ } while ((void)0, 0)
+
+#define SWAP_HALVES(A0, A1) \
+ do { \
+ __m512i t0, t1; \
+ t0 = _mm512_shuffle_i64x2(A0, A1, _MM_SHUFFLE(1, 0, 1, 0)); \
+ t1 = _mm512_shuffle_i64x2(A0, A1, _MM_SHUFFLE(3, 2, 3, 2)); \
+ A0 = t0; \
+ A1 = t1; \
+ } while((void)0, 0)
+
+#define SWAP_QUARTERS(A0, A1) \
+ do { \
+ SWAP_HALVES(A0, A1); \
+ A0 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A0); \
+ A1 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A1); \
+ } while((void)0, 0)
+
+#define UNSWAP_QUARTERS(A0, A1) \
+ do { \
+ A0 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A0); \
+ A1 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A1); \
+ SWAP_HALVES(A0, A1); \
+ } while((void)0, 0)
+
+#define BLAKE2_ROUND_1(A0, C0, B0, D0, A1, C1, B1, D1) \
+ do { \
+ SWAP_HALVES(A0, B0); \
+ SWAP_HALVES(C0, D0); \
+ SWAP_HALVES(A1, B1); \
+ SWAP_HALVES(C1, D1); \
+ BLAKE2_ROUND(A0, B0, C0, D0, A1, B1, C1, D1); \
+ SWAP_HALVES(A0, B0); \
+ SWAP_HALVES(C0, D0); \
+ SWAP_HALVES(A1, B1); \
+ SWAP_HALVES(C1, D1); \
+ } while ((void)0, 0)
+
+#define BLAKE2_ROUND_2(A0, A1, B0, B1, C0, C1, D0, D1) \
+ do { \
+ SWAP_QUARTERS(A0, A1); \
+ SWAP_QUARTERS(B0, B1); \
+ SWAP_QUARTERS(C0, C1); \
+ SWAP_QUARTERS(D0, D1); \
+ BLAKE2_ROUND(A0, B0, C0, D0, A1, B1, C1, D1); \
+ UNSWAP_QUARTERS(A0, A1); \
+ UNSWAP_QUARTERS(B0, B1); \
+ UNSWAP_QUARTERS(C0, C1); \
+ UNSWAP_QUARTERS(D0, D1); \
+ } while ((void)0, 0)
+
+#endif /* __AVX512F__ */
+#endif /* BLAKE_ROUND_MKA_OPT_H */
diff --git a/lib/crypto_backend/argon2/blake2/blamka-round-ref.h b/lib/crypto_backend/argon2/blake2/blamka-round-ref.h
new file mode 100644
index 0000000..16cfc1c
--- /dev/null
+++ b/lib/crypto_backend/argon2/blake2/blamka-round-ref.h
@@ -0,0 +1,56 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#ifndef BLAKE_ROUND_MKA_H
+#define BLAKE_ROUND_MKA_H
+
+#include "blake2.h"
+#include "blake2-impl.h"
+
+/* designed by the Lyra PHC team */
+static BLAKE2_INLINE uint64_t fBlaMka(uint64_t x, uint64_t y) {
+ const uint64_t m = UINT64_C(0xFFFFFFFF);
+ const uint64_t xy = (x & m) * (y & m);
+ return x + y + 2 * xy;
+}
+
+#define G(a, b, c, d) \
+ do { \
+ a = fBlaMka(a, b); \
+ d = rotr64(d ^ a, 32); \
+ c = fBlaMka(c, d); \
+ b = rotr64(b ^ c, 24); \
+ a = fBlaMka(a, b); \
+ d = rotr64(d ^ a, 16); \
+ c = fBlaMka(c, d); \
+ b = rotr64(b ^ c, 63); \
+ } while ((void)0, 0)
+
+#define BLAKE2_ROUND_NOMSG(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, \
+ v12, v13, v14, v15) \
+ do { \
+ G(v0, v4, v8, v12); \
+ G(v1, v5, v9, v13); \
+ G(v2, v6, v10, v14); \
+ G(v3, v7, v11, v15); \
+ G(v0, v5, v10, v15); \
+ G(v1, v6, v11, v12); \
+ G(v2, v7, v8, v13); \
+ G(v3, v4, v9, v14); \
+ } while ((void)0, 0)
+
+#endif
diff --git a/lib/crypto_backend/argon2/core.c b/lib/crypto_backend/argon2/core.c
new file mode 100644
index 0000000..f128d84
--- /dev/null
+++ b/lib/crypto_backend/argon2/core.c
@@ -0,0 +1,646 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+/*For memory wiping*/
+#ifdef _MSC_VER
+#include <windows.h>
+#include <winbase.h> /* For SecureZeroMemory */
+#endif
+#if defined __STDC_LIB_EXT1__
+#define __STDC_WANT_LIB_EXT1__ 1
+#endif
+#define VC_GE_2005(version) (version >= 1400)
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "core.h"
+#include "thread.h"
+#include "blake2/blake2.h"
+#include "blake2/blake2-impl.h"
+
+#ifdef GENKAT
+#include "genkat.h"
+#endif
+
+#if defined(__clang__)
+#if __has_attribute(optnone)
+#define NOT_OPTIMIZED __attribute__((optnone))
+#endif
+#elif defined(__GNUC__)
+#define GCC_VERSION \
+ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION >= 40400
+#define NOT_OPTIMIZED __attribute__((optimize("O0")))
+#endif
+#endif
+#ifndef NOT_OPTIMIZED
+#define NOT_OPTIMIZED
+#endif
+
+/***************Instance and Position constructors**********/
+void init_block_value(block *b, uint8_t in) { memset(b->v, in, sizeof(b->v)); }
+
+void copy_block(block *dst, const block *src) {
+ memcpy(dst->v, src->v, sizeof(uint64_t) * ARGON2_QWORDS_IN_BLOCK);
+}
+
+void xor_block(block *dst, const block *src) {
+ int i;
+ for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
+ dst->v[i] ^= src->v[i];
+ }
+}
+
+static void load_block(block *dst, const void *input) {
+ unsigned i;
+ for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
+ dst->v[i] = load64((const uint8_t *)input + i * sizeof(dst->v[i]));
+ }
+}
+
+static void store_block(void *output, const block *src) {
+ unsigned i;
+ for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
+ store64((uint8_t *)output + i * sizeof(src->v[i]), src->v[i]);
+ }
+}
+
+/***************Memory functions*****************/
+
+int allocate_memory(const argon2_context *context, uint8_t **memory,
+ size_t num, size_t size) {
+ size_t memory_size = num*size;
+ if (memory == NULL) {
+ return ARGON2_MEMORY_ALLOCATION_ERROR;
+ }
+
+ /* 1. Check for multiplication overflow */
+ if (size != 0 && memory_size / size != num) {
+ return ARGON2_MEMORY_ALLOCATION_ERROR;
+ }
+
+ /* 2. Try to allocate with appropriate allocator */
+ if (context->allocate_cbk) {
+ (context->allocate_cbk)(memory, memory_size);
+ } else {
+ *memory = malloc(memory_size);
+ }
+
+ if (*memory == NULL) {
+ return ARGON2_MEMORY_ALLOCATION_ERROR;
+ }
+
+ return ARGON2_OK;
+}
+
+void free_memory(const argon2_context *context, uint8_t *memory,
+ size_t num, size_t size) {
+ size_t memory_size = num*size;
+ clear_internal_memory(memory, memory_size);
+ if (context->free_cbk) {
+ (context->free_cbk)(memory, memory_size);
+ } else {
+ free(memory);
+ }
+}
+
+#if defined(_MSC_VER) && VC_GE_2005(_MSC_VER)
+void secure_wipe_memory(void *v, size_t n) {
+ SecureZeroMemory(v, n);
+}
+#elif defined memset_s
+void secure_wipe_memory(void *v, size_t n) {
+ memset_s(v, n, 0, n);
+}
+#elif defined(HAVE_EXPLICIT_BZERO)
+void secure_wipe_memory(void *v, size_t n) {
+ explicit_bzero(v, n);
+}
+#else
+void NOT_OPTIMIZED secure_wipe_memory(void *v, size_t n) {
+ static void *(*const volatile memset_sec)(void *, int, size_t) = &memset;
+ memset_sec(v, 0, n);
+}
+#endif
+
+/* Memory clear flag defaults to true. */
+int FLAG_clear_internal_memory = 1;
+void clear_internal_memory(void *v, size_t n) {
+ if (FLAG_clear_internal_memory && v) {
+ secure_wipe_memory(v, n);
+ }
+}
+
+void finalize(const argon2_context *context, argon2_instance_t *instance) {
+ if (context != NULL && instance != NULL) {
+ block blockhash;
+ uint32_t l;
+
+ copy_block(&blockhash, instance->memory + instance->lane_length - 1);
+
+ /* XOR the last blocks */
+ for (l = 1; l < instance->lanes; ++l) {
+ uint32_t last_block_in_lane =
+ l * instance->lane_length + (instance->lane_length - 1);
+ xor_block(&blockhash, instance->memory + last_block_in_lane);
+ }
+
+ /* Hash the result */
+ {
+ uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
+ store_block(blockhash_bytes, &blockhash);
+ blake2b_long(context->out, context->outlen, blockhash_bytes,
+ ARGON2_BLOCK_SIZE);
+ /* clear blockhash and blockhash_bytes */
+ clear_internal_memory(blockhash.v, ARGON2_BLOCK_SIZE);
+ clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
+ }
+
+#ifdef GENKAT
+ print_tag(context->out, context->outlen);
+#endif
+
+ free_memory(context, (uint8_t *)instance->memory,
+ instance->memory_blocks, sizeof(block));
+ }
+}
+
+uint32_t index_alpha(const argon2_instance_t *instance,
+ const argon2_position_t *position, uint32_t pseudo_rand,
+ int same_lane) {
+ /*
+ * Pass 0:
+ * This lane : all already finished segments plus already constructed
+ * blocks in this segment
+ * Other lanes : all already finished segments
+ * Pass 1+:
+ * This lane : (SYNC_POINTS - 1) last segments plus already constructed
+ * blocks in this segment
+ * Other lanes : (SYNC_POINTS - 1) last segments
+ */
+ uint32_t reference_area_size;
+ uint64_t relative_position;
+ uint32_t start_position, absolute_position;
+
+ if (0 == position->pass) {
+ /* First pass */
+ if (0 == position->slice) {
+ /* First slice */
+ reference_area_size =
+ position->index - 1; /* all but the previous */
+ } else {
+ if (same_lane) {
+ /* The same lane => add current segment */
+ reference_area_size =
+ position->slice * instance->segment_length +
+ position->index - 1;
+ } else {
+ reference_area_size =
+ position->slice * instance->segment_length +
+ ((position->index == 0) ? (-1) : 0);
+ }
+ }
+ } else {
+ /* Second pass */
+ if (same_lane) {
+ reference_area_size = instance->lane_length -
+ instance->segment_length + position->index -
+ 1;
+ } else {
+ reference_area_size = instance->lane_length -
+ instance->segment_length +
+ ((position->index == 0) ? (-1) : 0);
+ }
+ }
+
+ /* 1.2.4. Mapping pseudo_rand to 0..<reference_area_size-1> and produce
+ * relative position */
+ relative_position = pseudo_rand;
+ relative_position = relative_position * relative_position >> 32;
+ relative_position = reference_area_size - 1 -
+ (reference_area_size * relative_position >> 32);
+
+ /* 1.2.5 Computing starting position */
+ start_position = 0;
+
+ if (0 != position->pass) {
+ start_position = (position->slice == ARGON2_SYNC_POINTS - 1)
+ ? 0
+ : (position->slice + 1) * instance->segment_length;
+ }
+
+ /* 1.2.6. Computing absolute position */
+ absolute_position = (start_position + relative_position) %
+ instance->lane_length; /* absolute position */
+ return absolute_position;
+}
+
+/* Single-threaded version for p=1 case */
+static int fill_memory_blocks_st(argon2_instance_t *instance) {
+ uint32_t r, s, l;
+
+ for (r = 0; r < instance->passes; ++r) {
+ for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
+ for (l = 0; l < instance->lanes; ++l) {
+ argon2_position_t position = {r, l, (uint8_t)s, 0};
+ fill_segment(instance, position);
+ }
+ }
+#ifdef GENKAT
+ internal_kat(instance, r); /* Print all memory blocks */
+#endif
+ }
+ return ARGON2_OK;
+}
+
+#if !defined(ARGON2_NO_THREADS)
+
+#ifdef _WIN32
+static unsigned __stdcall fill_segment_thr(void *thread_data)
+#else
+static void *fill_segment_thr(void *thread_data)
+#endif
+{
+ argon2_thread_data *my_data = thread_data;
+ fill_segment(my_data->instance_ptr, my_data->pos);
+ return 0;
+}
+
+/* Multi-threaded version for p > 1 case */
+static int fill_memory_blocks_mt(argon2_instance_t *instance) {
+ uint32_t r, s;
+ argon2_thread_handle_t *thread = NULL;
+ argon2_thread_data *thr_data = NULL;
+ int rc = ARGON2_OK;
+
+ /* 1. Allocating space for threads */
+ thread = calloc(instance->lanes, sizeof(argon2_thread_handle_t));
+ if (thread == NULL) {
+ rc = ARGON2_MEMORY_ALLOCATION_ERROR;
+ goto fail;
+ }
+
+ thr_data = calloc(instance->lanes, sizeof(argon2_thread_data));
+ if (thr_data == NULL) {
+ rc = ARGON2_MEMORY_ALLOCATION_ERROR;
+ goto fail;
+ }
+
+ for (r = 0; r < instance->passes; ++r) {
+ for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
+ uint32_t l, ll;
+
+ /* 2. Calling threads */
+ for (l = 0; l < instance->lanes; ++l) {
+ argon2_position_t position;
+
+ /* 2.1 Join a thread if limit is exceeded */
+ if (l >= instance->threads) {
+ if (argon2_thread_join(thread[l - instance->threads])) {
+ rc = ARGON2_THREAD_FAIL;
+ goto fail;
+ }
+ }
+
+ /* 2.2 Create thread */
+ position.pass = r;
+ position.lane = l;
+ position.slice = (uint8_t)s;
+ position.index = 0;
+ thr_data[l].instance_ptr =
+ instance; /* preparing the thread input */
+ memcpy(&(thr_data[l].pos), &position,
+ sizeof(argon2_position_t));
+ if (argon2_thread_create(&thread[l], &fill_segment_thr,
+ (void *)&thr_data[l])) {
+ /* Wait for already running threads */
+ for (ll = 0; ll < l; ++ll)
+ argon2_thread_join(thread[ll]);
+ rc = ARGON2_THREAD_FAIL;
+ goto fail;
+ }
+
+ /* fill_segment(instance, position); */
+ /*Non-thread equivalent of the lines above */
+ }
+
+ /* 3. Joining remaining threads */
+ for (l = instance->lanes - instance->threads; l < instance->lanes;
+ ++l) {
+ if (argon2_thread_join(thread[l])) {
+ rc = ARGON2_THREAD_FAIL;
+ goto fail;
+ }
+ }
+ }
+
+#ifdef GENKAT
+ internal_kat(instance, r); /* Print all memory blocks */
+#endif
+ }
+
+fail:
+ if (thread != NULL) {
+ free(thread);
+ }
+ if (thr_data != NULL) {
+ free(thr_data);
+ }
+ return rc;
+}
+
+#endif /* ARGON2_NO_THREADS */
+
+int fill_memory_blocks(argon2_instance_t *instance) {
+ if (instance == NULL || instance->lanes == 0) {
+ return ARGON2_INCORRECT_PARAMETER;
+ }
+#if defined(ARGON2_NO_THREADS)
+ return fill_memory_blocks_st(instance);
+#else
+ return instance->threads == 1 ?
+ fill_memory_blocks_st(instance) : fill_memory_blocks_mt(instance);
+#endif
+}
+
+int validate_inputs(const argon2_context *context) {
+ if (NULL == context) {
+ return ARGON2_INCORRECT_PARAMETER;
+ }
+
+ if (NULL == context->out) {
+ return ARGON2_OUTPUT_PTR_NULL;
+ }
+
+ /* Validate output length */
+ if (ARGON2_MIN_OUTLEN > context->outlen) {
+ return ARGON2_OUTPUT_TOO_SHORT;
+ }
+
+ if (ARGON2_MAX_OUTLEN < context->outlen) {
+ return ARGON2_OUTPUT_TOO_LONG;
+ }
+
+ /* Validate password (required param) */
+ if (NULL == context->pwd) {
+ if (0 != context->pwdlen) {
+ return ARGON2_PWD_PTR_MISMATCH;
+ }
+ }
+#if ARGON2_MIN_PWD_LENGTH > 0 /* cryptsetup: fix gcc warning */
+ if (ARGON2_MIN_PWD_LENGTH > context->pwdlen) {
+ return ARGON2_PWD_TOO_SHORT;
+ }
+#endif
+ if (ARGON2_MAX_PWD_LENGTH < context->pwdlen) {
+ return ARGON2_PWD_TOO_LONG;
+ }
+
+ /* Validate salt (required param) */
+ if (NULL == context->salt) {
+ if (0 != context->saltlen) {
+ return ARGON2_SALT_PTR_MISMATCH;
+ }
+ }
+
+ if (ARGON2_MIN_SALT_LENGTH > context->saltlen) {
+ return ARGON2_SALT_TOO_SHORT;
+ }
+
+ if (ARGON2_MAX_SALT_LENGTH < context->saltlen) {
+ return ARGON2_SALT_TOO_LONG;
+ }
+
+ /* Validate secret (optional param) */
+ if (NULL == context->secret) {
+ if (0 != context->secretlen) {
+ return ARGON2_SECRET_PTR_MISMATCH;
+ }
+ } else {
+#if ARGON2_MIN_SECRET > 0 /* cryptsetup: fix gcc warning */
+ if (ARGON2_MIN_SECRET > context->secretlen) {
+ return ARGON2_SECRET_TOO_SHORT;
+ }
+#endif
+ if (ARGON2_MAX_SECRET < context->secretlen) {
+ return ARGON2_SECRET_TOO_LONG;
+ }
+ }
+
+ /* Validate associated data (optional param) */
+ if (NULL == context->ad) {
+ if (0 != context->adlen) {
+ return ARGON2_AD_PTR_MISMATCH;
+ }
+ } else {
+#if ARGON2_MIN_AD_LENGTH > 0 /* cryptsetup: fix gcc warning */
+ if (ARGON2_MIN_AD_LENGTH > context->adlen) {
+ return ARGON2_AD_TOO_SHORT;
+ }
+#endif
+ if (ARGON2_MAX_AD_LENGTH < context->adlen) {
+ return ARGON2_AD_TOO_LONG;
+ }
+ }
+
+ /* Validate memory cost */
+ if (ARGON2_MIN_MEMORY > context->m_cost) {
+ return ARGON2_MEMORY_TOO_LITTLE;
+ }
+#if 0 /* UINT32_MAX, cryptsetup: fix gcc warning */
+ if (ARGON2_MAX_MEMORY < context->m_cost) {
+ return ARGON2_MEMORY_TOO_MUCH;
+ }
+#endif
+ if (context->m_cost < 8 * context->lanes) {
+ return ARGON2_MEMORY_TOO_LITTLE;
+ }
+
+ /* Validate time cost */
+ if (ARGON2_MIN_TIME > context->t_cost) {
+ return ARGON2_TIME_TOO_SMALL;
+ }
+
+ if (ARGON2_MAX_TIME < context->t_cost) {
+ return ARGON2_TIME_TOO_LARGE;
+ }
+
+ /* Validate lanes */
+ if (ARGON2_MIN_LANES > context->lanes) {
+ return ARGON2_LANES_TOO_FEW;
+ }
+
+ if (ARGON2_MAX_LANES < context->lanes) {
+ return ARGON2_LANES_TOO_MANY;
+ }
+
+ /* Validate threads */
+ if (ARGON2_MIN_THREADS > context->threads) {
+ return ARGON2_THREADS_TOO_FEW;
+ }
+
+ if (ARGON2_MAX_THREADS < context->threads) {
+ return ARGON2_THREADS_TOO_MANY;
+ }
+
+ if (NULL != context->allocate_cbk && NULL == context->free_cbk) {
+ return ARGON2_FREE_MEMORY_CBK_NULL;
+ }
+
+ if (NULL == context->allocate_cbk && NULL != context->free_cbk) {
+ return ARGON2_ALLOCATE_MEMORY_CBK_NULL;
+ }
+
+ return ARGON2_OK;
+}
+
+void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance) {
+ uint32_t l;
+ /* Make the first and second block in each lane as G(H0||0||i) or
+ G(H0||1||i) */
+ uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
+ for (l = 0; l < instance->lanes; ++l) {
+
+ store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 0);
+ store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4, l);
+ blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
+ ARGON2_PREHASH_SEED_LENGTH);
+ load_block(&instance->memory[l * instance->lane_length + 0],
+ blockhash_bytes);
+
+ store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 1);
+ blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
+ ARGON2_PREHASH_SEED_LENGTH);
+ load_block(&instance->memory[l * instance->lane_length + 1],
+ blockhash_bytes);
+ }
+ clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
+}
+
+void initial_hash(uint8_t *blockhash, argon2_context *context,
+ argon2_type type) {
+ blake2b_state BlakeHash;
+ uint8_t value[sizeof(uint32_t)];
+
+ if (NULL == context || NULL == blockhash) {
+ return;
+ }
+
+ blake2b_init(&BlakeHash, ARGON2_PREHASH_DIGEST_LENGTH);
+
+ store32(&value, context->lanes);
+ blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
+
+ store32(&value, context->outlen);
+ blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
+
+ store32(&value, context->m_cost);
+ blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
+
+ store32(&value, context->t_cost);
+ blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
+
+ store32(&value, context->version);
+ blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
+
+ store32(&value, (uint32_t)type);
+ blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
+
+ store32(&value, context->pwdlen);
+ blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
+
+ if (context->pwd != NULL) {
+ blake2b_update(&BlakeHash, (const uint8_t *)context->pwd,
+ context->pwdlen);
+
+ if (context->flags & ARGON2_FLAG_CLEAR_PASSWORD) {
+ secure_wipe_memory(context->pwd, context->pwdlen);
+ context->pwdlen = 0;
+ }
+ }
+
+ store32(&value, context->saltlen);
+ blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
+
+ if (context->salt != NULL) {
+ blake2b_update(&BlakeHash, (const uint8_t *)context->salt,
+ context->saltlen);
+ }
+
+ store32(&value, context->secretlen);
+ blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
+
+ if (context->secret != NULL) {
+ blake2b_update(&BlakeHash, (const uint8_t *)context->secret,
+ context->secretlen);
+
+ if (context->flags & ARGON2_FLAG_CLEAR_SECRET) {
+ secure_wipe_memory(context->secret, context->secretlen);
+ context->secretlen = 0;
+ }
+ }
+
+ store32(&value, context->adlen);
+ blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
+
+ if (context->ad != NULL) {
+ blake2b_update(&BlakeHash, (const uint8_t *)context->ad,
+ context->adlen);
+ }
+
+ blake2b_final(&BlakeHash, blockhash, ARGON2_PREHASH_DIGEST_LENGTH);
+}
+
+int initialize(argon2_instance_t *instance, argon2_context *context) {
+ uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH];
+ int result = ARGON2_OK;
+
+ if (instance == NULL || context == NULL)
+ return ARGON2_INCORRECT_PARAMETER;
+ instance->context_ptr = context;
+
+ /* 1. Memory allocation */
+ result = allocate_memory(context, (uint8_t **)&(instance->memory),
+ instance->memory_blocks, sizeof(block));
+ if (result != ARGON2_OK) {
+ return result;
+ }
+
+ /* 2. Initial hashing */
+ /* H_0 + 8 extra bytes to produce the first blocks */
+ /* uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; */
+ /* Hashing all inputs */
+ initial_hash(blockhash, context, instance->type);
+ /* Zeroing 8 extra bytes */
+ clear_internal_memory(blockhash + ARGON2_PREHASH_DIGEST_LENGTH,
+ ARGON2_PREHASH_SEED_LENGTH -
+ ARGON2_PREHASH_DIGEST_LENGTH);
+
+#ifdef GENKAT
+ initial_kat(blockhash, context, instance->type);
+#endif
+
+ /* 3. Creating first blocks, we always have at least two blocks in a slice
+ */
+ fill_first_blocks(blockhash, instance);
+ /* Clearing the hash */
+ clear_internal_memory(blockhash, ARGON2_PREHASH_SEED_LENGTH);
+
+ return ARGON2_OK;
+}
diff --git a/lib/crypto_backend/argon2/core.h b/lib/crypto_backend/argon2/core.h
new file mode 100644
index 0000000..59e2564
--- /dev/null
+++ b/lib/crypto_backend/argon2/core.h
@@ -0,0 +1,228 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#ifndef ARGON2_CORE_H
+#define ARGON2_CORE_H
+
+#include "argon2.h"
+
+#define CONST_CAST(x) (x)(uintptr_t)
+
+/**********************Argon2 internal constants*******************************/
+
+enum argon2_core_constants {
+ /* Memory block size in bytes */
+ ARGON2_BLOCK_SIZE = 1024,
+ ARGON2_QWORDS_IN_BLOCK = ARGON2_BLOCK_SIZE / 8,
+ ARGON2_OWORDS_IN_BLOCK = ARGON2_BLOCK_SIZE / 16,
+ ARGON2_HWORDS_IN_BLOCK = ARGON2_BLOCK_SIZE / 32,
+ ARGON2_512BIT_WORDS_IN_BLOCK = ARGON2_BLOCK_SIZE / 64,
+
+ /* Number of pseudo-random values generated by one call to Blake in Argon2i
+ to
+ generate reference block positions */
+ ARGON2_ADDRESSES_IN_BLOCK = 128,
+
+ /* Pre-hashing digest length and its extension*/
+ ARGON2_PREHASH_DIGEST_LENGTH = 64,
+ ARGON2_PREHASH_SEED_LENGTH = 72
+};
+
+/*************************Argon2 internal data types***********************/
+
+/*
+ * Structure for the (1KB) memory block implemented as 128 64-bit words.
+ * Memory blocks can be copied, XORed. Internal words can be accessed by [] (no
+ * bounds checking).
+ */
+typedef struct block_ { uint64_t v[ARGON2_QWORDS_IN_BLOCK]; } block;
+
+/*****************Functions that work with the block******************/
+
+/* Initialize each byte of the block with @in */
+void init_block_value(block *b, uint8_t in);
+
+/* Copy block @src to block @dst */
+void copy_block(block *dst, const block *src);
+
+/* XOR @src onto @dst bytewise */
+void xor_block(block *dst, const block *src);
+
+/*
+ * Argon2 instance: memory pointer, number of passes, amount of memory, type,
+ * and derived values.
+ * Used to evaluate the number and location of blocks to construct in each
+ * thread
+ */
+typedef struct Argon2_instance_t {
+ block *memory; /* Memory pointer */
+ uint32_t version;
+ uint32_t passes; /* Number of passes */
+ uint32_t memory_blocks; /* Number of blocks in memory */
+ uint32_t segment_length;
+ uint32_t lane_length;
+ uint32_t lanes;
+ uint32_t threads;
+ argon2_type type;
+ int print_internals; /* whether to print the memory blocks */
+ argon2_context *context_ptr; /* points back to original context */
+} argon2_instance_t;
+
+/*
+ * Argon2 position: where we construct the block right now. Used to distribute
+ * work between threads.
+ */
+typedef struct Argon2_position_t {
+ uint32_t pass;
+ uint32_t lane;
+ uint8_t slice;
+ uint32_t index;
+} argon2_position_t;
+
+/*Struct that holds the inputs for thread handling FillSegment*/
+typedef struct Argon2_thread_data {
+ argon2_instance_t *instance_ptr;
+ argon2_position_t pos;
+} argon2_thread_data;
+
+/*************************Argon2 core functions********************************/
+
+/* Allocates memory to the given pointer, uses the appropriate allocator as
+ * specified in the context. Total allocated memory is num*size.
+ * @param context argon2_context which specifies the allocator
+ * @param memory pointer to the pointer to the memory
+ * @param size the size in bytes for each element to be allocated
+ * @param num the number of elements to be allocated
+ * @return ARGON2_OK if @memory is a valid pointer and memory is allocated
+ */
+int allocate_memory(const argon2_context *context, uint8_t **memory,
+ size_t num, size_t size);
+
+/*
+ * Frees memory at the given pointer, uses the appropriate deallocator as
+ * specified in the context. Also cleans the memory using clear_internal_memory.
+ * @param context argon2_context which specifies the deallocator
+ * @param memory pointer to buffer to be freed
+ * @param size the size in bytes for each element to be deallocated
+ * @param num the number of elements to be deallocated
+ */
+void free_memory(const argon2_context *context, uint8_t *memory,
+ size_t num, size_t size);
+
+/* Function that securely cleans the memory. This ignores any flags set
+ * regarding clearing memory. Usually one just calls clear_internal_memory.
+ * @param mem Pointer to the memory
+ * @param s Memory size in bytes
+ */
+void secure_wipe_memory(void *v, size_t n);
+
+/* Function that securely clears the memory if FLAG_clear_internal_memory is
+ * set. If the flag isn't set, this function does nothing.
+ * @param mem Pointer to the memory
+ * @param s Memory size in bytes
+ */
+void clear_internal_memory(void *v, size_t n);
+
+/*
+ * Computes absolute position of reference block in the lane following a skewed
+ * distribution and using a pseudo-random value as input
+ * @param instance Pointer to the current instance
+ * @param position Pointer to the current position
+ * @param pseudo_rand 32-bit pseudo-random value used to determine the position
+ * @param same_lane Indicates if the block will be taken from the current lane.
+ * If so we can reference the current segment
+ * @pre All pointers must be valid
+ */
+uint32_t index_alpha(const argon2_instance_t *instance,
+ const argon2_position_t *position, uint32_t pseudo_rand,
+ int same_lane);
+
+/*
+ * Function that validates all inputs against predefined restrictions and return
+ * an error code
+ * @param context Pointer to current Argon2 context
+ * @return ARGON2_OK if everything is all right, otherwise one of error codes
+ * (all defined in <argon2.h>
+ */
+int validate_inputs(const argon2_context *context);
+
+/*
+ * Hashes all the inputs into @a blockhash[PREHASH_DIGEST_LENGTH], clears
+ * password and secret if needed
+ * @param context Pointer to the Argon2 internal structure containing memory
+ * pointer, and parameters for time and space requirements.
+ * @param blockhash Buffer for pre-hashing digest
+ * @param type Argon2 type
+ * @pre @a blockhash must have at least @a PREHASH_DIGEST_LENGTH bytes
+ * allocated
+ */
+void initial_hash(uint8_t *blockhash, argon2_context *context,
+ argon2_type type);
+
+/*
+ * Function creates first 2 blocks per lane
+ * @param instance Pointer to the current instance
+ * @param blockhash Pointer to the pre-hashing digest
+ * @pre blockhash must point to @a PREHASH_SEED_LENGTH allocated values
+ */
+void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance);
+
+/*
+ * Function allocates memory, hashes the inputs with Blake, and creates first
+ * two blocks. Returns the pointer to the main memory with 2 blocks per lane
+ * initialized
+ * @param context Pointer to the Argon2 internal structure containing memory
+ * pointer, and parameters for time and space requirements.
+ * @param instance Current Argon2 instance
+ * @return Zero if successful, -1 if memory failed to allocate. @context->state
+ * will be modified if successful.
+ */
+int initialize(argon2_instance_t *instance, argon2_context *context);
+
+/*
+ * XORing the last block of each lane, hashing it, making the tag. Deallocates
+ * the memory.
+ * @param context Pointer to current Argon2 context (use only the out parameters
+ * from it)
+ * @param instance Pointer to current instance of Argon2
+ * @pre instance->state must point to necessary amount of memory
+ * @pre context->out must point to outlen bytes of memory
+ * @pre if context->free_cbk is not NULL, it should point to a function that
+ * deallocates memory
+ */
+void finalize(const argon2_context *context, argon2_instance_t *instance);
+
+/*
+ * Function that fills the segment using previous segments also from other
+ * threads
+ * @param context current context
+ * @param instance Pointer to the current instance
+ * @param position Current position
+ * @pre all block pointers must be valid
+ */
+void fill_segment(const argon2_instance_t *instance,
+ argon2_position_t position);
+
+/*
+ * Function that fills the entire memory t_cost times based on the first two
+ * blocks in each lane
+ * @param instance Pointer to the current instance
+ * @return ARGON2_OK if successful, @context->state
+ */
+int fill_memory_blocks(argon2_instance_t *instance);
+
+#endif
diff --git a/lib/crypto_backend/argon2/encoding.c b/lib/crypto_backend/argon2/encoding.c
new file mode 100644
index 0000000..a717263
--- /dev/null
+++ b/lib/crypto_backend/argon2/encoding.c
@@ -0,0 +1,462 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include "encoding.h"
+#include "core.h"
+
+/*
+ * Example code for a decoder and encoder of "hash strings", with Argon2
+ * parameters.
+ *
+ * This code comprises three sections:
+ *
+ * -- The first section contains generic Base64 encoding and decoding
+ * functions. It is conceptually applicable to any hash function
+ * implementation that uses Base64 to encode and decode parameters,
+ * salts and outputs. It could be made into a library, provided that
+ * the relevant functions are made public (non-static) and be given
+ * reasonable names to avoid collisions with other functions.
+ *
+ * -- The second section is specific to Argon2. It encodes and decodes
+ * the parameters, salts and outputs. It does not compute the hash
+ * itself.
+ *
+ * The code was originally written by Thomas Pornin <pornin@bolet.org>,
+ * to whom comments and remarks may be sent. It is released under what
+ * should amount to Public Domain or its closest equivalent; the
+ * following mantra is supposed to incarnate that fact with all the
+ * proper legal rituals:
+ *
+ * ---------------------------------------------------------------------
+ * This file is provided under the terms of Creative Commons CC0 1.0
+ * Public Domain Dedication. To the extent possible under law, the
+ * author (Thomas Pornin) has waived all copyright and related or
+ * neighboring rights to this file. This work is published from: Canada.
+ * ---------------------------------------------------------------------
+ *
+ * Copyright (c) 2015 Thomas Pornin
+ */
+
+/* ==================================================================== */
+/*
+ * Common code; could be shared between different hash functions.
+ *
+ * Note: the Base64 functions below assume that uppercase letters (resp.
+ * lowercase letters) have consecutive numerical codes, that fit on 8
+ * bits. All modern systems use ASCII-compatible charsets, where these
+ * properties are true. If you are stuck with a dinosaur of a system
+ * that still defaults to EBCDIC then you already have much bigger
+ * interoperability issues to deal with.
+ */
+
+/*
+ * Some macros for constant-time comparisons. These work over values in
+ * the 0..255 range. Returned value is 0x00 on "false", 0xFF on "true".
+ */
+#define EQ(x, y) ((((0U - ((unsigned)(x) ^ (unsigned)(y))) >> 8) & 0xFF) ^ 0xFF)
+#define GT(x, y) ((((unsigned)(y) - (unsigned)(x)) >> 8) & 0xFF)
+#define GE(x, y) (GT(y, x) ^ 0xFF)
+#define LT(x, y) GT(y, x)
+#define LE(x, y) GE(y, x)
+
+/*
+ * Convert value x (0..63) to corresponding Base64 character.
+ */
+static int b64_byte_to_char(unsigned x) {
+ return (LT(x, 26) & (x + 'A')) |
+ (GE(x, 26) & LT(x, 52) & (x + ('a' - 26))) |
+ (GE(x, 52) & LT(x, 62) & (x + ('0' - 52))) | (EQ(x, 62) & '+') |
+ (EQ(x, 63) & '/');
+}
+
+/*
+ * Convert character c to the corresponding 6-bit value. If character c
+ * is not a Base64 character, then 0xFF (255) is returned.
+ */
+static unsigned b64_char_to_byte(int c) {
+ unsigned x;
+
+ x = (GE(c, 'A') & LE(c, 'Z') & (c - 'A')) |
+ (GE(c, 'a') & LE(c, 'z') & (c - ('a' - 26))) |
+ (GE(c, '0') & LE(c, '9') & (c - ('0' - 52))) | (EQ(c, '+') & 62) |
+ (EQ(c, '/') & 63);
+ return x | (EQ(x, 0) & (EQ(c, 'A') ^ 0xFF));
+}
+
+/*
+ * Convert some bytes to Base64. 'dst_len' is the length (in characters)
+ * of the output buffer 'dst'; if that buffer is not large enough to
+ * receive the result (including the terminating 0), then (size_t)-1
+ * is returned. Otherwise, the zero-terminated Base64 string is written
+ * in the buffer, and the output length (counted WITHOUT the terminating
+ * zero) is returned.
+ */
+static size_t to_base64(char *dst, size_t dst_len, const void *src,
+ size_t src_len) {
+ size_t olen;
+ const unsigned char *buf;
+ unsigned acc, acc_len;
+
+ olen = (src_len / 3) << 2;
+ switch (src_len % 3) {
+ case 2:
+ olen++;
+ /* fall through */
+ case 1:
+ olen += 2;
+ break;
+ }
+ if (dst_len <= olen) {
+ return (size_t)-1;
+ }
+ acc = 0;
+ acc_len = 0;
+ buf = (const unsigned char *)src;
+ while (src_len-- > 0) {
+ acc = (acc << 8) + (*buf++);
+ acc_len += 8;
+ while (acc_len >= 6) {
+ acc_len -= 6;
+ *dst++ = (char)b64_byte_to_char((acc >> acc_len) & 0x3F);
+ }
+ }
+ if (acc_len > 0) {
+ *dst++ = (char)b64_byte_to_char((acc << (6 - acc_len)) & 0x3F);
+ }
+ *dst++ = 0;
+ return olen;
+}
+
+/*
+ * Decode Base64 chars into bytes. The '*dst_len' value must initially
+ * contain the length of the output buffer '*dst'; when the decoding
+ * ends, the actual number of decoded bytes is written back in
+ * '*dst_len'.
+ *
+ * Decoding stops when a non-Base64 character is encountered, or when
+ * the output buffer capacity is exceeded. If an error occurred (output
+ * buffer is too small, invalid last characters leading to unprocessed
+ * buffered bits), then NULL is returned; otherwise, the returned value
+ * points to the first non-Base64 character in the source stream, which
+ * may be the terminating zero.
+ */
+static const char *from_base64(void *dst, size_t *dst_len, const char *src) {
+ size_t len;
+ unsigned char *buf;
+ unsigned acc, acc_len;
+
+ buf = (unsigned char *)dst;
+ len = 0;
+ acc = 0;
+ acc_len = 0;
+ for (;;) {
+ unsigned d;
+
+ d = b64_char_to_byte(*src);
+ if (d == 0xFF) {
+ break;
+ }
+ src++;
+ acc = (acc << 6) + d;
+ acc_len += 6;
+ if (acc_len >= 8) {
+ acc_len -= 8;
+ if ((len++) >= *dst_len) {
+ return NULL;
+ }
+ *buf++ = (acc >> acc_len) & 0xFF;
+ }
+ }
+
+ /*
+ * If the input length is equal to 1 modulo 4 (which is
+ * invalid), then there will remain 6 unprocessed bits;
+ * otherwise, only 0, 2 or 4 bits are buffered. The buffered
+ * bits must also all be zero.
+ */
+ if (acc_len > 4 || (acc & (((unsigned)1 << acc_len) - 1)) != 0) {
+ return NULL;
+ }
+ *dst_len = len;
+ return src;
+}
+
+/*
+ * Decode decimal integer from 'str'; the value is written in '*v'.
+ * Returned value is a pointer to the next non-decimal character in the
+ * string. If there is no digit at all, or the value encoding is not
+ * minimal (extra leading zeros), or the value does not fit in an
+ * 'unsigned long', then NULL is returned.
+ */
+static const char *decode_decimal(const char *str, unsigned long *v) {
+ const char *orig;
+ unsigned long acc;
+
+ acc = 0;
+ for (orig = str;; str++) {
+ int c;
+
+ c = *str;
+ if (c < '0' || c > '9') {
+ break;
+ }
+ c -= '0';
+ if (acc > (ULONG_MAX / 10)) {
+ return NULL;
+ }
+ acc *= 10;
+ if ((unsigned long)c > (ULONG_MAX - acc)) {
+ return NULL;
+ }
+ acc += (unsigned long)c;
+ }
+ if (str == orig || (*orig == '0' && str != (orig + 1))) {
+ return NULL;
+ }
+ *v = acc;
+ return str;
+}
+
+/* ==================================================================== */
+/*
+ * Code specific to Argon2.
+ *
+ * The code below applies the following format:
+ *
+ * $argon2<T>[$v=<num>]$m=<num>,t=<num>,p=<num>$<bin>$<bin>
+ *
+ * where <T> is either 'd', 'id', or 'i', <num> is a decimal integer (positive,
+ * fits in an 'unsigned long'), and <bin> is Base64-encoded data (no '=' padding
+ * characters, no newline or whitespace).
+ *
+ * The last two binary chunks (encoded in Base64) are, in that order,
+ * the salt and the output. Both are required. The binary salt length and the
+ * output length must be in the allowed ranges defined in argon2.h.
+ *
+ * The ctx struct must contain buffers large enough to hold the salt and pwd
+ * when it is fed into decode_string.
+ */
+
+int decode_string(argon2_context *ctx, const char *str, argon2_type type) {
+
+/* check for prefix */
+#define CC(prefix) \
+ do { \
+ size_t cc_len = strlen(prefix); \
+ if (strncmp(str, prefix, cc_len) != 0) { \
+ return ARGON2_DECODING_FAIL; \
+ } \
+ str += cc_len; \
+ } while ((void)0, 0)
+
+/* optional prefix checking with supplied code */
+#define CC_opt(prefix, code) \
+ do { \
+ size_t cc_len = strlen(prefix); \
+ if (strncmp(str, prefix, cc_len) == 0) { \
+ str += cc_len; \
+ { code; } \
+ } \
+ } while ((void)0, 0)
+
+/* Decoding prefix into decimal */
+#define DECIMAL(x) \
+ do { \
+ unsigned long dec_x; \
+ str = decode_decimal(str, &dec_x); \
+ if (str == NULL) { \
+ return ARGON2_DECODING_FAIL; \
+ } \
+ (x) = dec_x; \
+ } while ((void)0, 0)
+
+
+/* Decoding prefix into uint32_t decimal */
+#define DECIMAL_U32(x) \
+ do { \
+ unsigned long dec_x; \
+ str = decode_decimal(str, &dec_x); \
+ if (str == NULL || dec_x > UINT32_MAX) { \
+ return ARGON2_DECODING_FAIL; \
+ } \
+ (x) = (uint32_t)dec_x; \
+ } while ((void)0, 0)
+
+
+/* Decoding base64 into a binary buffer */
+#define BIN(buf, max_len, len) \
+ do { \
+ size_t bin_len = (max_len); \
+ str = from_base64(buf, &bin_len, str); \
+ if (str == NULL || bin_len > UINT32_MAX) { \
+ return ARGON2_DECODING_FAIL; \
+ } \
+ (len) = (uint32_t)bin_len; \
+ } while ((void)0, 0)
+
+ size_t maxsaltlen = ctx->saltlen;
+ size_t maxoutlen = ctx->outlen;
+ int validation_result;
+ const char* type_string;
+
+ /* We should start with the argon2_type we are using */
+ type_string = argon2_type2string(type, 0);
+ if (!type_string) {
+ return ARGON2_INCORRECT_TYPE;
+ }
+
+ CC("$");
+ CC(type_string);
+
+ /* Reading the version number if the default is suppressed */
+ ctx->version = ARGON2_VERSION_10;
+ CC_opt("$v=", DECIMAL_U32(ctx->version));
+
+ CC("$m=");
+ DECIMAL_U32(ctx->m_cost);
+ CC(",t=");
+ DECIMAL_U32(ctx->t_cost);
+ CC(",p=");
+ DECIMAL_U32(ctx->lanes);
+ ctx->threads = ctx->lanes;
+
+ CC("$");
+ BIN(ctx->salt, maxsaltlen, ctx->saltlen);
+ CC("$");
+ BIN(ctx->out, maxoutlen, ctx->outlen);
+
+ /* The rest of the fields get the default values */
+ ctx->secret = NULL;
+ ctx->secretlen = 0;
+ ctx->ad = NULL;
+ ctx->adlen = 0;
+ ctx->allocate_cbk = NULL;
+ ctx->free_cbk = NULL;
+ ctx->flags = ARGON2_DEFAULT_FLAGS;
+
+ /* On return, must have valid context */
+ validation_result = validate_inputs(ctx);
+ if (validation_result != ARGON2_OK) {
+ return validation_result;
+ }
+
+ /* Can't have any additional characters */
+ if (*str == 0) {
+ return ARGON2_OK;
+ } else {
+ return ARGON2_DECODING_FAIL;
+ }
+#undef CC
+#undef CC_opt
+#undef DECIMAL
+#undef BIN
+}
+
+int encode_string(char *dst, size_t dst_len, argon2_context *ctx,
+ argon2_type type) {
+#define SS(str) \
+ do { \
+ size_t pp_len = strlen(str); \
+ if (pp_len >= dst_len) { \
+ return ARGON2_ENCODING_FAIL; \
+ } \
+ memcpy(dst, str, pp_len + 1); \
+ dst += pp_len; \
+ dst_len -= pp_len; \
+ } while ((void)0, 0)
+
+#define SX(x) \
+ do { \
+ char tmp[30]; \
+ sprintf(tmp, "%lu", (unsigned long)(x)); \
+ SS(tmp); \
+ } while ((void)0, 0)
+
+#define SB(buf, len) \
+ do { \
+ size_t sb_len = to_base64(dst, dst_len, buf, len); \
+ if (sb_len == (size_t)-1) { \
+ return ARGON2_ENCODING_FAIL; \
+ } \
+ dst += sb_len; \
+ dst_len -= sb_len; \
+ } while ((void)0, 0)
+
+ const char* type_string = argon2_type2string(type, 0);
+ int validation_result = validate_inputs(ctx);
+
+ if (!type_string) {
+ return ARGON2_ENCODING_FAIL;
+ }
+
+ if (validation_result != ARGON2_OK) {
+ return validation_result;
+ }
+
+
+ SS("$");
+ SS(type_string);
+
+ SS("$v=");
+ SX(ctx->version);
+
+ SS("$m=");
+ SX(ctx->m_cost);
+ SS(",t=");
+ SX(ctx->t_cost);
+ SS(",p=");
+ SX(ctx->lanes);
+
+ SS("$");
+ SB(ctx->salt, ctx->saltlen);
+
+ SS("$");
+ SB(ctx->out, ctx->outlen);
+ return ARGON2_OK;
+
+#undef SS
+#undef SX
+#undef SB
+}
+
+size_t b64len(uint32_t len) {
+ size_t olen = ((size_t)len / 3) << 2;
+
+ switch (len % 3) {
+ case 2:
+ olen++;
+ /* fall through */
+ case 1:
+ olen += 2;
+ break;
+ }
+
+ return olen;
+}
+
+size_t numlen(uint32_t num) {
+ size_t len = 1;
+ while (num >= 10) {
+ ++len;
+ num = num / 10;
+ }
+ return len;
+}
diff --git a/lib/crypto_backend/argon2/encoding.h b/lib/crypto_backend/argon2/encoding.h
new file mode 100644
index 0000000..5b8b2dd
--- /dev/null
+++ b/lib/crypto_backend/argon2/encoding.h
@@ -0,0 +1,57 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#ifndef ENCODING_H
+#define ENCODING_H
+#include "argon2.h"
+
+#define ARGON2_MAX_DECODED_LANES UINT32_C(255)
+#define ARGON2_MIN_DECODED_SALT_LEN UINT32_C(8)
+#define ARGON2_MIN_DECODED_OUT_LEN UINT32_C(12)
+
+/*
+* encode an Argon2 hash string into the provided buffer. 'dst_len'
+* contains the size, in characters, of the 'dst' buffer; if 'dst_len'
+* is less than the number of required characters (including the
+* terminating 0), then this function returns ARGON2_ENCODING_ERROR.
+*
+* on success, ARGON2_OK is returned.
+*/
+int encode_string(char *dst, size_t dst_len, argon2_context *ctx,
+ argon2_type type);
+
+/*
+* Decodes an Argon2 hash string into the provided structure 'ctx'.
+* The only fields that must be set prior to this call are ctx.saltlen and
+* ctx.outlen (which must be the maximal salt and out length values that are
+* allowed), ctx.salt and ctx.out (which must be buffers of the specified
+* length), and ctx.pwd and ctx.pwdlen which must hold a valid password.
+*
+* Invalid input string causes an error. On success, the ctx is valid and all
+* fields have been initialized.
+*
+* Returned value is ARGON2_OK on success, other ARGON2_ codes on error.
+*/
+int decode_string(argon2_context *ctx, const char *str, argon2_type type);
+
+/* Returns the length of the encoded byte stream with length len */
+size_t b64len(uint32_t len);
+
+/* Returns the length of the encoded number num */
+size_t numlen(uint32_t num);
+
+#endif
diff --git a/lib/crypto_backend/argon2/opt.c b/lib/crypto_backend/argon2/opt.c
new file mode 100644
index 0000000..6c5e403
--- /dev/null
+++ b/lib/crypto_backend/argon2/opt.c
@@ -0,0 +1,283 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "argon2.h"
+#include "core.h"
+
+#include "blake2/blake2.h"
+#include "blake2/blamka-round-opt.h"
+
+/*
+ * Function fills a new memory block and optionally XORs the old block over the new one.
+ * Memory must be initialized.
+ * @param state Pointer to the just produced block. Content will be updated(!)
+ * @param ref_block Pointer to the reference block
+ * @param next_block Pointer to the block to be XORed over. May coincide with @ref_block
+ * @param with_xor Whether to XOR into the new block (1) or just overwrite (0)
+ * @pre all block pointers must be valid
+ */
+#if defined(__AVX512F__)
+static void fill_block(__m512i *state, const block *ref_block,
+ block *next_block, int with_xor) {
+ __m512i block_XY[ARGON2_512BIT_WORDS_IN_BLOCK];
+ unsigned int i;
+
+ if (with_xor) {
+ for (i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++) {
+ state[i] = _mm512_xor_si512(
+ state[i], _mm512_loadu_si512((const __m512i *)ref_block->v + i));
+ block_XY[i] = _mm512_xor_si512(
+ state[i], _mm512_loadu_si512((const __m512i *)next_block->v + i));
+ }
+ } else {
+ for (i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++) {
+ block_XY[i] = state[i] = _mm512_xor_si512(
+ state[i], _mm512_loadu_si512((const __m512i *)ref_block->v + i));
+ }
+ }
+
+ for (i = 0; i < 2; ++i) {
+ BLAKE2_ROUND_1(
+ state[8 * i + 0], state[8 * i + 1], state[8 * i + 2], state[8 * i + 3],
+ state[8 * i + 4], state[8 * i + 5], state[8 * i + 6], state[8 * i + 7]);
+ }
+
+ for (i = 0; i < 2; ++i) {
+ BLAKE2_ROUND_2(
+ state[2 * 0 + i], state[2 * 1 + i], state[2 * 2 + i], state[2 * 3 + i],
+ state[2 * 4 + i], state[2 * 5 + i], state[2 * 6 + i], state[2 * 7 + i]);
+ }
+
+ for (i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++) {
+ state[i] = _mm512_xor_si512(state[i], block_XY[i]);
+ _mm512_storeu_si512((__m512i *)next_block->v + i, state[i]);
+ }
+}
+#elif defined(__AVX2__)
+static void fill_block(__m256i *state, const block *ref_block,
+ block *next_block, int with_xor) {
+ __m256i block_XY[ARGON2_HWORDS_IN_BLOCK];
+ unsigned int i;
+
+ if (with_xor) {
+ for (i = 0; i < ARGON2_HWORDS_IN_BLOCK; i++) {
+ state[i] = _mm256_xor_si256(
+ state[i], _mm256_loadu_si256((const __m256i *)ref_block->v + i));
+ block_XY[i] = _mm256_xor_si256(
+ state[i], _mm256_loadu_si256((const __m256i *)next_block->v + i));
+ }
+ } else {
+ for (i = 0; i < ARGON2_HWORDS_IN_BLOCK; i++) {
+ block_XY[i] = state[i] = _mm256_xor_si256(
+ state[i], _mm256_loadu_si256((const __m256i *)ref_block->v + i));
+ }
+ }
+
+ for (i = 0; i < 4; ++i) {
+ BLAKE2_ROUND_1(state[8 * i + 0], state[8 * i + 4], state[8 * i + 1], state[8 * i + 5],
+ state[8 * i + 2], state[8 * i + 6], state[8 * i + 3], state[8 * i + 7]);
+ }
+
+ for (i = 0; i < 4; ++i) {
+ BLAKE2_ROUND_2(state[ 0 + i], state[ 4 + i], state[ 8 + i], state[12 + i],
+ state[16 + i], state[20 + i], state[24 + i], state[28 + i]);
+ }
+
+ for (i = 0; i < ARGON2_HWORDS_IN_BLOCK; i++) {
+ state[i] = _mm256_xor_si256(state[i], block_XY[i]);
+ _mm256_storeu_si256((__m256i *)next_block->v + i, state[i]);
+ }
+}
+#else
+static void fill_block(__m128i *state, const block *ref_block,
+ block *next_block, int with_xor) {
+ __m128i block_XY[ARGON2_OWORDS_IN_BLOCK];
+ unsigned int i;
+
+ if (with_xor) {
+ for (i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++) {
+ state[i] = _mm_xor_si128(
+ state[i], _mm_loadu_si128((const __m128i *)ref_block->v + i));
+ block_XY[i] = _mm_xor_si128(
+ state[i], _mm_loadu_si128((const __m128i *)next_block->v + i));
+ }
+ } else {
+ for (i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++) {
+ block_XY[i] = state[i] = _mm_xor_si128(
+ state[i], _mm_loadu_si128((const __m128i *)ref_block->v + i));
+ }
+ }
+
+ for (i = 0; i < 8; ++i) {
+ BLAKE2_ROUND(state[8 * i + 0], state[8 * i + 1], state[8 * i + 2],
+ state[8 * i + 3], state[8 * i + 4], state[8 * i + 5],
+ state[8 * i + 6], state[8 * i + 7]);
+ }
+
+ for (i = 0; i < 8; ++i) {
+ BLAKE2_ROUND(state[8 * 0 + i], state[8 * 1 + i], state[8 * 2 + i],
+ state[8 * 3 + i], state[8 * 4 + i], state[8 * 5 + i],
+ state[8 * 6 + i], state[8 * 7 + i]);
+ }
+
+ for (i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++) {
+ state[i] = _mm_xor_si128(state[i], block_XY[i]);
+ _mm_storeu_si128((__m128i *)next_block->v + i, state[i]);
+ }
+}
+#endif
+
+static void next_addresses(block *address_block, block *input_block) {
+ /*Temporary zero-initialized blocks*/
+#if defined(__AVX512F__)
+ __m512i zero_block[ARGON2_512BIT_WORDS_IN_BLOCK];
+ __m512i zero2_block[ARGON2_512BIT_WORDS_IN_BLOCK];
+#elif defined(__AVX2__)
+ __m256i zero_block[ARGON2_HWORDS_IN_BLOCK];
+ __m256i zero2_block[ARGON2_HWORDS_IN_BLOCK];
+#else
+ __m128i zero_block[ARGON2_OWORDS_IN_BLOCK];
+ __m128i zero2_block[ARGON2_OWORDS_IN_BLOCK];
+#endif
+
+ memset(zero_block, 0, sizeof(zero_block));
+ memset(zero2_block, 0, sizeof(zero2_block));
+
+ /*Increasing index counter*/
+ input_block->v[6]++;
+
+ /*First iteration of G*/
+ fill_block(zero_block, input_block, address_block, 0);
+
+ /*Second iteration of G*/
+ fill_block(zero2_block, address_block, address_block, 0);
+}
+
+void fill_segment(const argon2_instance_t *instance,
+ argon2_position_t position) {
+ block *ref_block = NULL, *curr_block = NULL;
+ block address_block, input_block;
+ uint64_t pseudo_rand, ref_index, ref_lane;
+ uint32_t prev_offset, curr_offset;
+ uint32_t starting_index, i;
+#if defined(__AVX512F__)
+ __m512i state[ARGON2_512BIT_WORDS_IN_BLOCK];
+#elif defined(__AVX2__)
+ __m256i state[ARGON2_HWORDS_IN_BLOCK];
+#else
+ __m128i state[ARGON2_OWORDS_IN_BLOCK];
+#endif
+ int data_independent_addressing;
+
+ if (instance == NULL) {
+ return;
+ }
+
+ data_independent_addressing =
+ (instance->type == Argon2_i) ||
+ (instance->type == Argon2_id && (position.pass == 0) &&
+ (position.slice < ARGON2_SYNC_POINTS / 2));
+
+ if (data_independent_addressing) {
+ init_block_value(&input_block, 0);
+
+ input_block.v[0] = position.pass;
+ input_block.v[1] = position.lane;
+ input_block.v[2] = position.slice;
+ input_block.v[3] = instance->memory_blocks;
+ input_block.v[4] = instance->passes;
+ input_block.v[5] = instance->type;
+ }
+
+ starting_index = 0;
+
+ if ((0 == position.pass) && (0 == position.slice)) {
+ starting_index = 2; /* we have already generated the first two blocks */
+
+ /* Don't forget to generate the first block of addresses: */
+ if (data_independent_addressing) {
+ next_addresses(&address_block, &input_block);
+ }
+ }
+
+ /* Offset of the current block */
+ curr_offset = position.lane * instance->lane_length +
+ position.slice * instance->segment_length + starting_index;
+
+ if (0 == curr_offset % instance->lane_length) {
+ /* Last block in this lane */
+ prev_offset = curr_offset + instance->lane_length - 1;
+ } else {
+ /* Previous block */
+ prev_offset = curr_offset - 1;
+ }
+
+ memcpy(state, ((instance->memory + prev_offset)->v), ARGON2_BLOCK_SIZE);
+
+ for (i = starting_index; i < instance->segment_length;
+ ++i, ++curr_offset, ++prev_offset) {
+ /*1.1 Rotating prev_offset if needed */
+ if (curr_offset % instance->lane_length == 1) {
+ prev_offset = curr_offset - 1;
+ }
+
+ /* 1.2 Computing the index of the reference block */
+ /* 1.2.1 Taking pseudo-random value from the previous block */
+ if (data_independent_addressing) {
+ if (i % ARGON2_ADDRESSES_IN_BLOCK == 0) {
+ next_addresses(&address_block, &input_block);
+ }
+ pseudo_rand = address_block.v[i % ARGON2_ADDRESSES_IN_BLOCK];
+ } else {
+ pseudo_rand = instance->memory[prev_offset].v[0];
+ }
+
+ /* 1.2.2 Computing the lane of the reference block */
+ ref_lane = ((pseudo_rand >> 32)) % instance->lanes;
+
+ if ((position.pass == 0) && (position.slice == 0)) {
+ /* Can not reference other lanes yet */
+ ref_lane = position.lane;
+ }
+
+ /* 1.2.3 Computing the number of possible reference block within the
+ * lane.
+ */
+ position.index = i;
+ ref_index = index_alpha(instance, &position, pseudo_rand & 0xFFFFFFFF,
+ ref_lane == position.lane);
+
+ /* 2 Creating a new block */
+ ref_block =
+ instance->memory + instance->lane_length * ref_lane + ref_index;
+ curr_block = instance->memory + curr_offset;
+ if (ARGON2_VERSION_10 == instance->version) {
+ /* version 1.2.1 and earlier: overwrite, not XOR */
+ fill_block(state, ref_block, curr_block, 0);
+ } else {
+ if(0 == position.pass) {
+ fill_block(state, ref_block, curr_block, 0);
+ } else {
+ fill_block(state, ref_block, curr_block, 1);
+ }
+ }
+ }
+}
diff --git a/lib/crypto_backend/argon2/ref.c b/lib/crypto_backend/argon2/ref.c
new file mode 100644
index 0000000..10e45eb
--- /dev/null
+++ b/lib/crypto_backend/argon2/ref.c
@@ -0,0 +1,194 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "argon2.h"
+#include "core.h"
+
+#include "blake2/blamka-round-ref.h"
+#include "blake2/blake2-impl.h"
+#include "blake2/blake2.h"
+
+
+/*
+ * Function fills a new memory block and optionally XORs the old block over the new one.
+ * @next_block must be initialized.
+ * @param prev_block Pointer to the previous block
+ * @param ref_block Pointer to the reference block
+ * @param next_block Pointer to the block to be constructed
+ * @param with_xor Whether to XOR into the new block (1) or just overwrite (0)
+ * @pre all block pointers must be valid
+ */
+static void fill_block(const block *prev_block, const block *ref_block,
+ block *next_block, int with_xor) {
+ block blockR, block_tmp;
+ unsigned i;
+
+ copy_block(&blockR, ref_block);
+ xor_block(&blockR, prev_block);
+ copy_block(&block_tmp, &blockR);
+ /* Now blockR = ref_block + prev_block and block_tmp = ref_block + prev_block */
+ if (with_xor) {
+ /* Saving the next block contents for XOR over: */
+ xor_block(&block_tmp, next_block);
+ /* Now blockR = ref_block + prev_block and
+ block_tmp = ref_block + prev_block + next_block */
+ }
+
+ /* Apply Blake2 on columns of 64-bit words: (0,1,...,15) , then
+ (16,17,..31)... finally (112,113,...127) */
+ for (i = 0; i < 8; ++i) {
+ BLAKE2_ROUND_NOMSG(
+ blockR.v[16 * i], blockR.v[16 * i + 1], blockR.v[16 * i + 2],
+ blockR.v[16 * i + 3], blockR.v[16 * i + 4], blockR.v[16 * i + 5],
+ blockR.v[16 * i + 6], blockR.v[16 * i + 7], blockR.v[16 * i + 8],
+ blockR.v[16 * i + 9], blockR.v[16 * i + 10], blockR.v[16 * i + 11],
+ blockR.v[16 * i + 12], blockR.v[16 * i + 13], blockR.v[16 * i + 14],
+ blockR.v[16 * i + 15]);
+ }
+
+ /* Apply Blake2 on rows of 64-bit words: (0,1,16,17,...112,113), then
+ (2,3,18,19,...,114,115).. finally (14,15,30,31,...,126,127) */
+ for (i = 0; i < 8; i++) {
+ BLAKE2_ROUND_NOMSG(
+ blockR.v[2 * i], blockR.v[2 * i + 1], blockR.v[2 * i + 16],
+ blockR.v[2 * i + 17], blockR.v[2 * i + 32], blockR.v[2 * i + 33],
+ blockR.v[2 * i + 48], blockR.v[2 * i + 49], blockR.v[2 * i + 64],
+ blockR.v[2 * i + 65], blockR.v[2 * i + 80], blockR.v[2 * i + 81],
+ blockR.v[2 * i + 96], blockR.v[2 * i + 97], blockR.v[2 * i + 112],
+ blockR.v[2 * i + 113]);
+ }
+
+ copy_block(next_block, &block_tmp);
+ xor_block(next_block, &blockR);
+}
+
+static void next_addresses(block *address_block, block *input_block,
+ const block *zero_block) {
+ input_block->v[6]++;
+ fill_block(zero_block, input_block, address_block, 0);
+ fill_block(zero_block, address_block, address_block, 0);
+}
+
+void fill_segment(const argon2_instance_t *instance,
+ argon2_position_t position) {
+ block *ref_block = NULL, *curr_block = NULL;
+ block address_block, input_block, zero_block;
+ uint64_t pseudo_rand, ref_index, ref_lane;
+ uint32_t prev_offset, curr_offset;
+ uint32_t starting_index;
+ uint32_t i;
+ int data_independent_addressing;
+
+ if (instance == NULL) {
+ return;
+ }
+
+ data_independent_addressing =
+ (instance->type == Argon2_i) ||
+ (instance->type == Argon2_id && (position.pass == 0) &&
+ (position.slice < ARGON2_SYNC_POINTS / 2));
+
+ if (data_independent_addressing) {
+ init_block_value(&zero_block, 0);
+ init_block_value(&input_block, 0);
+
+ input_block.v[0] = position.pass;
+ input_block.v[1] = position.lane;
+ input_block.v[2] = position.slice;
+ input_block.v[3] = instance->memory_blocks;
+ input_block.v[4] = instance->passes;
+ input_block.v[5] = instance->type;
+ }
+
+ starting_index = 0;
+
+ if ((0 == position.pass) && (0 == position.slice)) {
+ starting_index = 2; /* we have already generated the first two blocks */
+
+ /* Don't forget to generate the first block of addresses: */
+ if (data_independent_addressing) {
+ next_addresses(&address_block, &input_block, &zero_block);
+ }
+ }
+
+ /* Offset of the current block */
+ curr_offset = position.lane * instance->lane_length +
+ position.slice * instance->segment_length + starting_index;
+
+ if (0 == curr_offset % instance->lane_length) {
+ /* Last block in this lane */
+ prev_offset = curr_offset + instance->lane_length - 1;
+ } else {
+ /* Previous block */
+ prev_offset = curr_offset - 1;
+ }
+
+ for (i = starting_index; i < instance->segment_length;
+ ++i, ++curr_offset, ++prev_offset) {
+ /*1.1 Rotating prev_offset if needed */
+ if (curr_offset % instance->lane_length == 1) {
+ prev_offset = curr_offset - 1;
+ }
+
+ /* 1.2 Computing the index of the reference block */
+ /* 1.2.1 Taking pseudo-random value from the previous block */
+ if (data_independent_addressing) {
+ if (i % ARGON2_ADDRESSES_IN_BLOCK == 0) {
+ next_addresses(&address_block, &input_block, &zero_block);
+ }
+ pseudo_rand = address_block.v[i % ARGON2_ADDRESSES_IN_BLOCK];
+ } else {
+ pseudo_rand = instance->memory[prev_offset].v[0];
+ }
+
+ /* 1.2.2 Computing the lane of the reference block */
+ ref_lane = ((pseudo_rand >> 32)) % instance->lanes;
+
+ if ((position.pass == 0) && (position.slice == 0)) {
+ /* Can not reference other lanes yet */
+ ref_lane = position.lane;
+ }
+
+ /* 1.2.3 Computing the number of possible reference block within the
+ * lane.
+ */
+ position.index = i;
+ ref_index = index_alpha(instance, &position, pseudo_rand & 0xFFFFFFFF,
+ ref_lane == position.lane);
+
+ /* 2 Creating a new block */
+ ref_block =
+ instance->memory + instance->lane_length * ref_lane + ref_index;
+ curr_block = instance->memory + curr_offset;
+ if (ARGON2_VERSION_10 == instance->version) {
+ /* version 1.2.1 and earlier: overwrite, not XOR */
+ fill_block(instance->memory + prev_offset, ref_block, curr_block, 0);
+ } else {
+ if(0 == position.pass) {
+ fill_block(instance->memory + prev_offset, ref_block,
+ curr_block, 0);
+ } else {
+ fill_block(instance->memory + prev_offset, ref_block,
+ curr_block, 1);
+ }
+ }
+ }
+}
diff --git a/lib/crypto_backend/argon2/thread.c b/lib/crypto_backend/argon2/thread.c
new file mode 100644
index 0000000..9fd15ed
--- /dev/null
+++ b/lib/crypto_backend/argon2/thread.c
@@ -0,0 +1,49 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#if !defined(ARGON2_NO_THREADS)
+
+#include "thread.h"
+#if defined(_WIN32)
+#include <windows.h>
+#endif
+
+int argon2_thread_create(argon2_thread_handle_t *handle,
+ argon2_thread_func_t func, void *args) {
+ if (NULL == handle || func == NULL) {
+ return -1;
+ }
+#if defined(_WIN32)
+ *handle = _beginthreadex(NULL, 0, func, args, 0, NULL);
+ return *handle != 0 ? 0 : -1;
+#else
+ return pthread_create(handle, NULL, func, args);
+#endif
+}
+
+int argon2_thread_join(argon2_thread_handle_t handle) {
+#if defined(_WIN32)
+ if (WaitForSingleObject((HANDLE)handle, INFINITE) == WAIT_OBJECT_0) {
+ return CloseHandle((HANDLE)handle) != 0 ? 0 : -1;
+ }
+ return -1;
+#else
+ return pthread_join(handle, NULL);
+#endif
+}
+
+#endif /* ARGON2_NO_THREADS */
diff --git a/lib/crypto_backend/argon2/thread.h b/lib/crypto_backend/argon2/thread.h
new file mode 100644
index 0000000..478e260
--- /dev/null
+++ b/lib/crypto_backend/argon2/thread.h
@@ -0,0 +1,62 @@
+/*
+ * Argon2 reference source code package - reference C implementations
+ *
+ * Copyright 2015
+ * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
+ *
+ * You may use this work under the terms of a Creative Commons CC0 1.0
+ * License/Waiver or the Apache Public License 2.0, at your option. The terms of
+ * these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * You should have received a copy of both of these licenses along with this
+ * software. If not, they may be obtained at the above URLs.
+ */
+
+#ifndef ARGON2_THREAD_H
+#define ARGON2_THREAD_H
+
+#if !defined(ARGON2_NO_THREADS)
+
+/*
+ Here we implement an abstraction layer for the simpĺe requirements
+ of the Argon2 code. We only require 3 primitives---thread creation,
+ joining, and termination---so full emulation of the pthreads API
+ is unwarranted. Currently we wrap pthreads and Win32 threads.
+
+ The API defines 2 types: the function pointer type,
+ argon2_thread_func_t,
+ and the type of the thread handle---argon2_thread_handle_t.
+*/
+#if defined(_WIN32)
+#include <process.h>
+typedef unsigned(__stdcall *argon2_thread_func_t)(void *);
+typedef uintptr_t argon2_thread_handle_t;
+#else
+#include <pthread.h>
+typedef void *(*argon2_thread_func_t)(void *);
+typedef pthread_t argon2_thread_handle_t;
+#endif
+
+/* Creates a thread
+ * @param handle pointer to a thread handle, which is the output of this
+ * function. Must not be NULL.
+ * @param func A function pointer for the thread's entry point. Must not be
+ * NULL.
+ * @param args Pointer that is passed as an argument to @func. May be NULL.
+ * @return 0 if @handle and @func are valid pointers and a thread is successfully
+ * created.
+ */
+int argon2_thread_create(argon2_thread_handle_t *handle,
+ argon2_thread_func_t func, void *args);
+
+/* Waits for a thread to terminate
+ * @param handle Handle to a thread created with argon2_thread_create.
+ * @return 0 if @handle is a valid handle, and joining completed successfully.
+*/
+int argon2_thread_join(argon2_thread_handle_t handle);
+
+#endif /* ARGON2_NO_THREADS */
+#endif
diff --git a/lib/crypto_backend/argon2_generic.c b/lib/crypto_backend/argon2_generic.c
new file mode 100644
index 0000000..0ce67da
--- /dev/null
+++ b/lib/crypto_backend/argon2_generic.c
@@ -0,0 +1,79 @@
+/*
+ * Argon2 PBKDF2 library wrapper
+ *
+ * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include "crypto_backend_internal.h"
+#if HAVE_ARGON2_H
+#include <argon2.h>
+#else
+#include "argon2/argon2.h"
+#endif
+
+#define CONST_CAST(x) (x)(uintptr_t)
+
+int argon2(const char *type, const char *password, size_t password_length,
+ const char *salt, size_t salt_length,
+ char *key, size_t key_length,
+ uint32_t iterations, uint32_t memory, uint32_t parallel)
+{
+#if !USE_INTERNAL_ARGON2 && !HAVE_ARGON2_H
+ return -EINVAL;
+#else
+ argon2_type atype;
+ argon2_context context = {
+ .flags = ARGON2_DEFAULT_FLAGS,
+ .version = ARGON2_VERSION_NUMBER,
+ .t_cost = (uint32_t)iterations,
+ .m_cost = (uint32_t)memory,
+ .lanes = (uint32_t)parallel,
+ .threads = (uint32_t)parallel,
+ .out = (uint8_t *)key,
+ .outlen = (uint32_t)key_length,
+ .pwd = CONST_CAST(uint8_t *)password,
+ .pwdlen = (uint32_t)password_length,
+ .salt = CONST_CAST(uint8_t *)salt,
+ .saltlen = (uint32_t)salt_length,
+ };
+ int r;
+
+ if (!strcmp(type, "argon2i"))
+ atype = Argon2_i;
+ else if(!strcmp(type, "argon2id"))
+ atype = Argon2_id;
+ else
+ return -EINVAL;
+
+ switch (argon2_ctx(&context, atype)) {
+ case ARGON2_OK:
+ r = 0;
+ break;
+ case ARGON2_MEMORY_ALLOCATION_ERROR:
+ case ARGON2_FREE_MEMORY_CBK_NULL:
+ case ARGON2_ALLOCATE_MEMORY_CBK_NULL:
+ r = -ENOMEM;
+ break;
+ default:
+ r = -EINVAL;
+ }
+
+ return r;
+#endif
+}
diff --git a/lib/crypto_backend/base64.c b/lib/crypto_backend/base64.c
new file mode 100644
index 0000000..42f70cb
--- /dev/null
+++ b/lib/crypto_backend/base64.c
@@ -0,0 +1,276 @@
+/*
+ * Base64 "Not encryption" helpers, copied and adapted from systemd project.
+ *
+ * Copyright (C) 2010 Lennart Poettering
+ *
+ * cryptsetup related changes
+ * Copyright (C) 2021-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <limits.h>
+
+#include "crypto_backend.h"
+
+#define WHITESPACE " \t\n\r"
+
+/* https://tools.ietf.org/html/rfc4648#section-4 */
+static char base64char(int x)
+{
+ static const char table[64] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789+/";
+ return table[x & 63];
+}
+
+static int unbase64char(char c)
+{
+ unsigned offset;
+
+ if (c >= 'A' && c <= 'Z')
+ return c - 'A';
+
+ offset = 'Z' - 'A' + 1;
+
+ if (c >= 'a' && c <= 'z')
+ return c - 'a' + offset;
+
+ offset += 'z' - 'a' + 1;
+
+ if (c >= '0' && c <= '9')
+ return c - '0' + offset;
+
+ offset += '9' - '0' + 1;
+
+ if (c == '+')
+ return offset;
+
+ offset++;
+
+ if (c == '/')
+ return offset;
+
+ return -EINVAL;
+}
+
+int crypt_base64_encode(char **out, size_t *out_length, const char *in, size_t in_length)
+{
+ char *r, *z;
+ const uint8_t *x;
+
+ assert(in || in_length == 0);
+ assert(out);
+
+ /* three input bytes makes four output bytes, padding is added so we must round up */
+ z = r = malloc(4 * (in_length + 2) / 3 + 1);
+ if (!r)
+ return -ENOMEM;
+
+ for (x = (const uint8_t *)in; x < (const uint8_t*)in + (in_length / 3) * 3; x += 3) {
+ /* x[0] == XXXXXXXX; x[1] == YYYYYYYY; x[2] == ZZZZZZZZ */
+ *(z++) = base64char(x[0] >> 2); /* 00XXXXXX */
+ *(z++) = base64char((x[0] & 3) << 4 | x[1] >> 4); /* 00XXYYYY */
+ *(z++) = base64char((x[1] & 15) << 2 | x[2] >> 6); /* 00YYYYZZ */
+ *(z++) = base64char(x[2] & 63); /* 00ZZZZZZ */
+ }
+
+ switch (in_length % 3) {
+ case 2:
+ *(z++) = base64char(x[0] >> 2); /* 00XXXXXX */
+ *(z++) = base64char((x[0] & 3) << 4 | x[1] >> 4); /* 00XXYYYY */
+ *(z++) = base64char((x[1] & 15) << 2); /* 00YYYY00 */
+ *(z++) = '=';
+
+ break;
+ case 1:
+ *(z++) = base64char(x[0] >> 2); /* 00XXXXXX */
+ *(z++) = base64char((x[0] & 3) << 4); /* 00XX0000 */
+ *(z++) = '=';
+ *(z++) = '=';
+
+ break;
+ }
+
+ *z = 0;
+ *out = r;
+ if (out_length)
+ *out_length = z - r;
+ return 0;
+}
+
+static int unbase64_next(const char **p, size_t *l)
+{
+ int ret;
+
+ assert(p);
+ assert(l);
+
+ /* Find the next non-whitespace character, and decode it. If we find padding, we return it as INT_MAX. We
+ * greedily skip all preceding and all following whitespace. */
+
+ for (;;) {
+ if (*l == 0)
+ return -EPIPE;
+
+ if (!strchr(WHITESPACE, **p))
+ break;
+
+ /* Skip leading whitespace */
+ (*p)++, (*l)--;
+ }
+
+ if (**p == '=')
+ ret = INT_MAX; /* return padding as INT_MAX */
+ else {
+ ret = unbase64char(**p);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (;;) {
+ (*p)++, (*l)--;
+
+ if (*l == 0)
+ break;
+ if (!strchr(WHITESPACE, **p))
+ break;
+
+ /* Skip following whitespace */
+ }
+
+ return ret;
+}
+
+int crypt_base64_decode(char **out, size_t *out_length, const char *in, size_t in_length)
+{
+ uint8_t *buf = NULL;
+ const char *x;
+ uint8_t *z;
+ size_t len;
+ int r;
+
+ assert(in || in_length == 0);
+ assert(out);
+ assert(out_length);
+
+ if (in_length == (size_t) -1)
+ in_length = strlen(in);
+
+ /* A group of four input bytes needs three output bytes, in case of padding we need to add two or three extra
+ * bytes. Note that this calculation is an upper boundary, as we ignore whitespace while decoding */
+ len = (in_length / 4) * 3 + (in_length % 4 != 0 ? (in_length % 4) - 1 : 0);
+
+ buf = malloc(len + 1);
+ if (!buf)
+ return -ENOMEM;
+
+ for (x = in, z = buf;;) {
+ int a, b, c, d; /* a == 00XXXXXX; b == 00YYYYYY; c == 00ZZZZZZ; d == 00WWWWWW */
+
+ a = unbase64_next(&x, &in_length);
+ if (a == -EPIPE) /* End of string */
+ break;
+ if (a < 0) {
+ r = a;
+ goto err;
+ }
+ if (a == INT_MAX) { /* Padding is not allowed at the beginning of a 4ch block */
+ r = -EINVAL;
+ goto err;
+ }
+
+ b = unbase64_next(&x, &in_length);
+ if (b < 0) {
+ r = b;
+ goto err;
+ }
+ if (b == INT_MAX) { /* Padding is not allowed at the second character of a 4ch block either */
+ r = -EINVAL;
+ goto err;
+ }
+
+ c = unbase64_next(&x, &in_length);
+ if (c < 0) {
+ r = c;
+ goto err;
+ }
+
+ d = unbase64_next(&x, &in_length);
+ if (d < 0) {
+ r = d;
+ goto err;
+ }
+
+ if (c == INT_MAX) { /* Padding at the third character */
+
+ if (d != INT_MAX) { /* If the third character is padding, the fourth must be too */
+ r = -EINVAL;
+ goto err;
+ }
+
+ /* b == 00YY0000 */
+ if (b & 15) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ if (in_length > 0) { /* Trailing rubbish? */
+ r = -ENAMETOOLONG;
+ goto err;
+ }
+
+ *(z++) = (uint8_t) a << 2 | (uint8_t) (b >> 4); /* XXXXXXYY */
+ break;
+ }
+
+ if (d == INT_MAX) {
+ /* c == 00ZZZZ00 */
+ if (c & 3) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ if (in_length > 0) { /* Trailing rubbish? */
+ r = -ENAMETOOLONG;
+ goto err;
+ }
+
+ *(z++) = (uint8_t) a << 2 | (uint8_t) b >> 4; /* XXXXXXYY */
+ *(z++) = (uint8_t) b << 4 | (uint8_t) c >> 2; /* YYYYZZZZ */
+ break;
+ }
+
+ *(z++) = (uint8_t) a << 2 | (uint8_t) b >> 4; /* XXXXXXYY */
+ *(z++) = (uint8_t) b << 4 | (uint8_t) c >> 2; /* YYYYZZZZ */
+ *(z++) = (uint8_t) c << 6 | (uint8_t) d; /* ZZWWWWWW */
+ }
+
+ *z = 0;
+
+ *out_length = (size_t) (z - buf);
+ *out = (char *)buf;
+ return 0;
+err:
+ free(buf);
+
+ /* Ignore other errors in crypt_backend */
+ if (r != -ENOMEM)
+ r = -EINVAL;
+
+ return r;
+}
diff --git a/lib/crypto_backend/cipher_check.c b/lib/crypto_backend/cipher_check.c
new file mode 100644
index 0000000..98ec1a5
--- /dev/null
+++ b/lib/crypto_backend/cipher_check.c
@@ -0,0 +1,161 @@
+/*
+ * Cipher performance check
+ *
+ * Copyright (C) 2018-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2018-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <time.h>
+#include "crypto_backend_internal.h"
+
+#ifndef CLOCK_MONOTONIC_RAW
+#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
+#endif
+
+/*
+ * This is not simulating storage, so using disk block causes extreme overhead.
+ * Let's use some fixed block size where results are more reliable...
+ */
+#define CIPHER_BLOCK_BYTES 65536
+
+/*
+ * If the measured value is lower, encrypted buffer is probably too small
+ * and calculated values are not reliable.
+ */
+#define CIPHER_TIME_MIN_MS 0.001
+
+/*
+ * The whole test depends on Linux kernel usermode crypto API for now.
+ * (The same implementations are used in dm-crypt though.)
+ */
+
+static int time_ms(struct timespec *start, struct timespec *end, double *ms)
+{
+ double start_ms, end_ms;
+
+ start_ms = start->tv_sec * 1000.0 + start->tv_nsec / (1000.0 * 1000);
+ end_ms = end->tv_sec * 1000.0 + end->tv_nsec / (1000.0 * 1000);
+
+ *ms = end_ms - start_ms;
+ return 0;
+}
+
+static int cipher_perf_one(const char *name, const char *mode, char *buffer, size_t buffer_size,
+ const char *key, size_t key_size, const char *iv, size_t iv_size, int enc)
+{
+ struct crypt_cipher_kernel cipher;
+ size_t done = 0, block = CIPHER_BLOCK_BYTES;
+ int r;
+
+ if (buffer_size < block)
+ block = buffer_size;
+
+ r = crypt_cipher_init_kernel(&cipher, name, mode, key, key_size);
+ if (r < 0)
+ return r;
+
+ while (done < buffer_size) {
+ if ((done + block) > buffer_size)
+ block = buffer_size - done;
+
+ if (enc)
+ r = crypt_cipher_encrypt_kernel(&cipher, &buffer[done], &buffer[done],
+ block, iv, iv_size);
+ else
+ r = crypt_cipher_decrypt_kernel(&cipher, &buffer[done], &buffer[done],
+ block, iv, iv_size);
+ if (r < 0)
+ break;
+
+ done += block;
+ }
+
+ crypt_cipher_destroy_kernel(&cipher);
+
+ return r;
+}
+static int cipher_measure(const char *name, const char *mode, char *buffer, size_t buffer_size,
+ const char *key, size_t key_size, const char *iv, size_t iv_size,
+ int encrypt, double *ms)
+{
+ struct timespec start, end;
+ int r;
+
+ /*
+ * Using getrusage would be better here but the precision
+ * is not adequate, so better stick with CLOCK_MONOTONIC
+ */
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &start) < 0)
+ return -EINVAL;
+
+ r = cipher_perf_one(name, mode, buffer, buffer_size, key, key_size, iv, iv_size, encrypt);
+ if (r < 0)
+ return r;
+
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &end) < 0)
+ return -EINVAL;
+
+ r = time_ms(&start, &end, ms);
+ if (r < 0)
+ return r;
+
+ if (*ms < CIPHER_TIME_MIN_MS)
+ return -ERANGE;
+
+ return 0;
+}
+
+static double speed_mbs(unsigned long bytes, double ms)
+{
+ double speed = bytes, s = ms / 1000.;
+
+ return speed / (1024 * 1024) / s;
+}
+
+int crypt_cipher_perf_kernel(const char *name, const char *mode, char *buffer, size_t buffer_size,
+ const char *key, size_t key_size, const char *iv, size_t iv_size,
+ double *encryption_mbs, double *decryption_mbs)
+{
+ double ms_enc, ms_dec, ms;
+ int r, repeat_enc, repeat_dec;
+
+ ms_enc = 0.0;
+ repeat_enc = 1;
+ while (ms_enc < 1000.0) {
+ r = cipher_measure(name, mode, buffer, buffer_size, key, key_size, iv, iv_size, 1, &ms);
+ if (r < 0)
+ return r;
+ ms_enc += ms;
+ repeat_enc++;
+ }
+
+ ms_dec = 0.0;
+ repeat_dec = 1;
+ while (ms_dec < 1000.0) {
+ r = cipher_measure(name, mode, buffer, buffer_size, key, key_size, iv, iv_size, 0, &ms);
+ if (r < 0)
+ return r;
+ ms_dec += ms;
+ repeat_dec++;
+ }
+
+ *encryption_mbs = speed_mbs(buffer_size * repeat_enc, ms_enc);
+ *decryption_mbs = speed_mbs(buffer_size * repeat_dec, ms_dec);
+
+ return 0;
+}
diff --git a/lib/crypto_backend/cipher_generic.c b/lib/crypto_backend/cipher_generic.c
new file mode 100644
index 0000000..b3a4407
--- /dev/null
+++ b/lib/crypto_backend/cipher_generic.c
@@ -0,0 +1,89 @@
+/*
+ * Linux kernel cipher generic utilities
+ *
+ * Copyright (C) 2018-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2018-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <string.h>
+#include <stdbool.h>
+#include <errno.h>
+#include "crypto_backend.h"
+
+struct cipher_alg {
+ const char *name;
+ const char *mode;
+ int blocksize;
+ bool wrapped_key;
+};
+
+static const struct cipher_alg cipher_algs[] = {
+ { "cipher_null", NULL, 16, false },
+ { "aes", NULL, 16, false },
+ { "serpent", NULL, 16, false },
+ { "twofish", NULL, 16, false },
+ { "anubis", NULL, 16, false },
+ { "blowfish", NULL, 8, false },
+ { "camellia", NULL, 16, false },
+ { "cast5", NULL, 8, false },
+ { "cast6", NULL, 16, false },
+ { "des", NULL, 8, false },
+ { "des3_ede", NULL, 8, false },
+ { "khazad", NULL, 8, false },
+ { "seed", NULL, 16, false },
+ { "tea", NULL, 8, false },
+ { "xtea", NULL, 8, false },
+ { "paes", NULL, 16, true }, /* protected AES, s390 wrapped key scheme */
+ { "xchacha12,aes", "adiantum", 32, false },
+ { "xchacha20,aes", "adiantum", 32, false },
+ { "sm4", NULL, 16, false },
+ { NULL, NULL, 0, false }
+};
+
+static const struct cipher_alg *_get_alg(const char *name, const char *mode)
+{
+ int i = 0;
+
+ while (name && cipher_algs[i].name) {
+ if (!strcasecmp(name, cipher_algs[i].name))
+ if (!mode || !cipher_algs[i].mode ||
+ !strncasecmp(mode, cipher_algs[i].mode, strlen(cipher_algs[i].mode)))
+ return &cipher_algs[i];
+ i++;
+ }
+ return NULL;
+}
+
+int crypt_cipher_ivsize(const char *name, const char *mode)
+{
+ const struct cipher_alg *ca = _get_alg(name, mode);
+
+ if (!ca)
+ return -EINVAL;
+
+ if (mode && !strcasecmp(mode, "ecb"))
+ return 0;
+
+ return ca->blocksize;
+}
+
+int crypt_cipher_wrapped_key(const char *name, const char *mode)
+{
+ const struct cipher_alg *ca = _get_alg(name, mode);
+
+ return ca ? (int)ca->wrapped_key : 0;
+}
diff --git a/lib/crypto_backend/crc32.c b/lib/crypto_backend/crc32.c
new file mode 100644
index 0000000..9009b02
--- /dev/null
+++ b/lib/crypto_backend/crc32.c
@@ -0,0 +1,183 @@
+/*
+ * COPYRIGHT (C) 1986 Gary S. Brown. You may use this program, or
+ * code or tables extracted from it, as desired without restriction.
+ *
+ * First, the polynomial itself and its table of feedback terms. The
+ * polynomial is
+ * X^32+X^26+X^23+X^22+X^16+X^12+X^11+X^10+X^8+X^7+X^5+X^4+X^2+X^1+X^0
+ *
+ * Note that we take it "backwards" and put the highest-order term in
+ * the lowest-order bit. The X^32 term is "implied"; the LSB is the
+ * X^31 term, etc. The X^0 term (usually shown as "+1") results in
+ * the MSB being 1.
+ *
+ * Note that the usual hardware shift register implementation, which
+ * is what we're using (we're merely optimizing it by doing eight-bit
+ * chunks at a time) shifts bits into the lowest-order term. In our
+ * implementation, that means shifting towards the right. Why do we
+ * do it this way? Because the calculated CRC must be transmitted in
+ * order from highest-order term to lowest-order term. UARTs transmit
+ * characters in order from LSB to MSB. By storing the CRC this way,
+ * we hand it to the UART in the order low-byte to high-byte; the UART
+ * sends each low-bit to high-bit; and the result is transmission bit
+ * by bit from highest- to lowest-order term without requiring any bit
+ * shuffling on our part. Reception works similarly.
+ *
+ * The feedback terms table consists of 256, 32-bit entries. Notes
+ *
+ * The table can be generated at runtime if desired; code to do so
+ * is shown later. It might not be obvious, but the feedback
+ * terms simply represent the results of eight shift/xor opera-
+ * tions for all combinations of data and CRC register values.
+ *
+ * The values must be right-shifted by eight bits by the "updcrc"
+ * logic; the shift must be unsigned (bring in zeroes). On some
+ * hardware you could probably optimize the shift in assembler by
+ * using byte-swap instructions.
+ * polynomial $edb88320
+ *
+ */
+
+#include <stdio.h>
+
+#include "crypto_backend.h"
+
+static const uint32_t crc32_tab[] = {
+ 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
+ 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
+ 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
+ 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
+ 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
+ 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
+ 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
+ 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
+ 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
+ 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
+ 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
+ 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
+ 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
+ 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
+ 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
+ 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
+ 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
+ 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
+ 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
+ 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
+ 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
+ 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
+ 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
+ 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
+ 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
+ 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
+ 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
+ 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
+ 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
+ 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
+ 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
+ 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
+ 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
+ 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
+ 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
+ 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
+ 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
+ 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
+ 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
+ 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
+ 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
+ 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
+ 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
+ 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
+ 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
+ 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
+ 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
+ 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
+ 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
+ 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
+ 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
+ 0x2d02ef8dL
+};
+
+static const uint32_t crc32c_tab[] = {
+ 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L, 0xC79A971FL,
+ 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL, 0x8AD958CFL, 0x78B2DBCCL,
+ 0x6BE22838L, 0x9989AB3BL, 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L,
+ 0x5E133C24L, 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL,
+ 0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L, 0x9A879FA0L,
+ 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L, 0x5D1D08BFL, 0xAF768BBCL,
+ 0xBC267848L, 0x4E4DFB4BL, 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L,
+ 0x33ED7D2AL, 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L,
+ 0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L, 0x6DFE410EL,
+ 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL, 0x30E349B1L, 0xC288CAB2L,
+ 0xD1D83946L, 0x23B3BA45L, 0xF779DEAEL, 0x05125DADL, 0x1642AE59L,
+ 0xE4292D5AL, 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL,
+ 0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L, 0x417B1DBCL,
+ 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L, 0x86E18AA3L, 0x748A09A0L,
+ 0x67DAFA54L, 0x95B17957L, 0xCBA24573L, 0x39C9C670L, 0x2A993584L,
+ 0xD8F2B687L, 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L,
+ 0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L, 0x96BF4DCCL,
+ 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L, 0xDBFC821CL, 0x2997011FL,
+ 0x3AC7F2EBL, 0xC8AC71E8L, 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L,
+ 0x0F36E6F7L, 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L,
+ 0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L, 0xEB1FCBADL,
+ 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L, 0x2C855CB2L, 0xDEEEDFB1L,
+ 0xCDBE2C45L, 0x3FD5AF46L, 0x7198540DL, 0x83F3D70EL, 0x90A324FAL,
+ 0x62C8A7F9L, 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L,
+ 0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L, 0x3CDB9BDDL,
+ 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L, 0x82F63B78L, 0x709DB87BL,
+ 0x63CD4B8FL, 0x91A6C88CL, 0x456CAC67L, 0xB7072F64L, 0xA457DC90L,
+ 0x563C5F93L, 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L,
+ 0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL, 0x92A8FC17L,
+ 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L, 0x55326B08L, 0xA759E80BL,
+ 0xB4091BFFL, 0x466298FCL, 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL,
+ 0x0B21572CL, 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L,
+ 0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L, 0x65D122B9L,
+ 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL, 0x2892ED69L, 0xDAF96E6AL,
+ 0xC9A99D9EL, 0x3BC21E9DL, 0xEF087A76L, 0x1D63F975L, 0x0E330A81L,
+ 0xFC588982L, 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL,
+ 0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L, 0x38CC2A06L,
+ 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L, 0xFF56BD19L, 0x0D3D3E1AL,
+ 0x1E6DCDEEL, 0xEC064EEDL, 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L,
+ 0xD0DDD530L, 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL,
+ 0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL, 0x8ECEE914L,
+ 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L, 0xD3D3E1ABL, 0x21B862A8L,
+ 0x32E8915CL, 0xC083125FL, 0x144976B4L, 0xE622F5B7L, 0xF5720643L,
+ 0x07198540L, 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L,
+ 0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL, 0xE330A81AL,
+ 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL, 0x24AA3F05L, 0xD6C1BC06L,
+ 0xC5914FF2L, 0x37FACCF1L, 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L,
+ 0x7AB90321L, 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL,
+ 0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L, 0x34F4F86AL,
+ 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL, 0x79B737BAL, 0x8BDCB4B9L,
+ 0x988C474DL, 0x6AE7C44EL, 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L,
+ 0xAD7D5351L
+};
+
+/*
+ * This a generic crc32() function, it takes seed as an argument,
+ * and does __not__ xor at the end. Then individual users can do
+ * whatever they need.
+ */
+static uint32_t compute_crc32(
+ const uint32_t *crc32_tab,
+ uint32_t seed,
+ const unsigned char *buf,
+ size_t len)
+{
+ uint32_t crc = seed;
+ const unsigned char *p = buf;
+
+ while(len-- > 0)
+ crc = crc32_tab[(crc ^ *p++) & 0xff] ^ (crc >> 8);
+
+ return crc;
+}
+
+uint32_t crypt_crc32(uint32_t seed, const unsigned char *buf, size_t len)
+{
+ return compute_crc32(crc32_tab, seed, buf, len);
+}
+
+uint32_t crypt_crc32c(uint32_t seed, const unsigned char *buf, size_t len)
+{
+ return compute_crc32(crc32c_tab, seed, buf, len);
+}
diff --git a/lib/crypto_backend/crypto_backend.h b/lib/crypto_backend/crypto_backend.h
new file mode 100644
index 0000000..88562e9
--- /dev/null
+++ b/lib/crypto_backend/crypto_backend.h
@@ -0,0 +1,161 @@
+/*
+ * crypto backend implementation
+ *
+ * Copyright (C) 2010-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef _CRYPTO_BACKEND_H
+#define _CRYPTO_BACKEND_H
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+#ifdef HAVE_UCHAR_H
+#include <uchar.h>
+#else
+#define char32_t uint32_t
+#define char16_t uint16_t
+#endif
+
+struct crypt_hash;
+struct crypt_hmac;
+struct crypt_cipher;
+struct crypt_storage;
+
+int crypt_backend_init(bool fips);
+void crypt_backend_destroy(void);
+
+#define CRYPT_BACKEND_KERNEL (1 << 0) /* Crypto uses kernel part, for benchmark */
+#define CRYPT_BACKEND_PBKDF2_INT (1 << 1) /* Iteration in PBKDF2 is signed int and can overflow */
+
+uint32_t crypt_backend_flags(void);
+const char *crypt_backend_version(void);
+
+/* HASH */
+int crypt_hash_size(const char *name);
+int crypt_hash_init(struct crypt_hash **ctx, const char *name);
+int crypt_hash_write(struct crypt_hash *ctx, const char *buffer, size_t length);
+int crypt_hash_final(struct crypt_hash *ctx, char *buffer, size_t length);
+void crypt_hash_destroy(struct crypt_hash *ctx);
+
+/* HMAC */
+int crypt_hmac_size(const char *name);
+int crypt_hmac_init(struct crypt_hmac **ctx, const char *name,
+ const void *key, size_t key_length);
+int crypt_hmac_write(struct crypt_hmac *ctx, const char *buffer, size_t length);
+int crypt_hmac_final(struct crypt_hmac *ctx, char *buffer, size_t length);
+void crypt_hmac_destroy(struct crypt_hmac *ctx);
+
+/* RNG (if fips parameter set, must provide FIPS compliance) */
+enum { CRYPT_RND_NORMAL = 0, CRYPT_RND_KEY = 1, CRYPT_RND_SALT = 2 };
+int crypt_backend_rng(char *buffer, size_t length, int quality, int fips);
+
+
+/* PBKDF*/
+struct crypt_pbkdf_limits {
+ uint32_t min_iterations, max_iterations;
+ uint32_t min_memory, max_memory, min_bench_memory;
+ uint32_t min_parallel, max_parallel;
+};
+
+int crypt_pbkdf_get_limits(const char *kdf, struct crypt_pbkdf_limits *l);
+int crypt_pbkdf(const char *kdf, const char *hash,
+ const char *password, size_t password_length,
+ const char *salt, size_t salt_length,
+ char *key, size_t key_length,
+ uint32_t iterations, uint32_t memory, uint32_t parallel);
+int crypt_pbkdf_perf(const char *kdf, const char *hash,
+ const char *password, size_t password_size,
+ const char *salt, size_t salt_size,
+ size_t volume_key_size, uint32_t time_ms,
+ uint32_t max_memory_kb, uint32_t parallel_threads,
+ uint32_t *iterations_out, uint32_t *memory_out,
+ int (*progress)(uint32_t time_ms, void *usrptr), void *usrptr);
+
+/* CRC32 */
+uint32_t crypt_crc32(uint32_t seed, const unsigned char *buf, size_t len);
+uint32_t crypt_crc32c(uint32_t seed, const unsigned char *buf, size_t len);
+
+/* Base64 */
+int crypt_base64_encode(char **out, size_t *out_length, const char *in, size_t in_length);
+int crypt_base64_decode(char **out, size_t *out_length, const char *in, size_t in_length);
+
+/* UTF8/16 */
+int crypt_utf16_to_utf8(char **out, const char16_t *s, size_t length /* bytes! */);
+int crypt_utf8_to_utf16(char16_t **out, const char *s, size_t length);
+
+/* Block ciphers */
+int crypt_cipher_ivsize(const char *name, const char *mode);
+int crypt_cipher_wrapped_key(const char *name, const char *mode);
+int crypt_cipher_init(struct crypt_cipher **ctx, const char *name,
+ const char *mode, const void *key, size_t key_length);
+void crypt_cipher_destroy(struct crypt_cipher *ctx);
+int crypt_cipher_encrypt(struct crypt_cipher *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length);
+int crypt_cipher_decrypt(struct crypt_cipher *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length);
+bool crypt_cipher_kernel_only(struct crypt_cipher *ctx);
+
+/* Benchmark of kernel cipher performance */
+int crypt_cipher_perf_kernel(const char *name, const char *mode, char *buffer, size_t buffer_size,
+ const char *key, size_t key_size, const char *iv, size_t iv_size,
+ double *encryption_mbs, double *decryption_mbs);
+
+/* Check availability of a cipher (in kernel only) */
+int crypt_cipher_check_kernel(const char *name, const char *mode,
+ const char *integrity, size_t key_length);
+
+/* Storage encryption wrappers */
+int crypt_storage_init(struct crypt_storage **ctx, size_t sector_size,
+ const char *cipher, const char *cipher_mode,
+ const void *key, size_t key_length, bool large_iv);
+void crypt_storage_destroy(struct crypt_storage *ctx);
+int crypt_storage_decrypt(struct crypt_storage *ctx, uint64_t iv_offset,
+ uint64_t length, char *buffer);
+int crypt_storage_encrypt(struct crypt_storage *ctx, uint64_t iv_offset,
+ uint64_t length, char *buffer);
+
+bool crypt_storage_kernel_only(struct crypt_storage *ctx);
+
+/* Temporary Bitlk helper */
+int crypt_bitlk_decrypt_key(const void *key, size_t key_length,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length,
+ const char *tag, size_t tag_length);
+
+/* Memzero helper (memset on stack can be optimized out) */
+static inline void crypt_backend_memzero(void *s, size_t n)
+{
+#ifdef HAVE_EXPLICIT_BZERO
+ explicit_bzero(s, n);
+#else
+ volatile uint8_t *p = (volatile uint8_t *)s;
+ while(n--) *p++ = 0;
+#endif
+}
+
+/* Memcmp helper (memcmp in constant time) */
+int crypt_backend_memeq(const void *m1, const void *m2, size_t n);
+
+/* crypto backend running in FIPS mode */
+bool crypt_fips_mode(void);
+
+#endif /* _CRYPTO_BACKEND_H */
diff --git a/lib/crypto_backend/crypto_backend_internal.h b/lib/crypto_backend/crypto_backend_internal.h
new file mode 100644
index 0000000..9b1cc69
--- /dev/null
+++ b/lib/crypto_backend/crypto_backend_internal.h
@@ -0,0 +1,75 @@
+/*
+ * crypto backend implementation
+ *
+ * Copyright (C) 2010-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef _CRYPTO_BACKEND_INTERNAL_H
+#define _CRYPTO_BACKEND_INTERNAL_H
+
+#include "crypto_backend.h"
+
+/* internal PBKDF2 implementation */
+int pkcs5_pbkdf2(const char *hash,
+ const char *P, size_t Plen,
+ const char *S, size_t Slen,
+ unsigned int c,
+ unsigned int dkLen, char *DK,
+ unsigned int hash_block_size);
+
+/* Argon2 implementation wrapper */
+int argon2(const char *type, const char *password, size_t password_length,
+ const char *salt, size_t salt_length,
+ char *key, size_t key_length,
+ uint32_t iterations, uint32_t memory, uint32_t parallel);
+
+/* Block ciphers: fallback to kernel crypto API */
+
+struct crypt_cipher_kernel {
+ int tfmfd;
+ int opfd;
+};
+
+int crypt_cipher_init_kernel(struct crypt_cipher_kernel *ctx, const char *name,
+ const char *mode, const void *key, size_t key_length);
+int crypt_cipher_encrypt_kernel(struct crypt_cipher_kernel *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length);
+int crypt_cipher_decrypt_kernel(struct crypt_cipher_kernel *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length);
+void crypt_cipher_destroy_kernel(struct crypt_cipher_kernel *ctx);
+int crypt_bitlk_decrypt_key_kernel(const void *key, size_t key_length,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length,
+ const char *tag, size_t tag_length);
+
+/* Internal implementation for constant time memory comparison */
+static inline int crypt_internal_memeq(const void *m1, const void *m2, size_t n)
+{
+ const unsigned char *_m1 = (const unsigned char *) m1;
+ const unsigned char *_m2 = (const unsigned char *) m2;
+ unsigned char result = 0;
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ result |= _m1[i] ^ _m2[i];
+
+ return result;
+}
+
+#endif /* _CRYPTO_BACKEND_INTERNAL_H */
diff --git a/lib/crypto_backend/crypto_cipher_kernel.c b/lib/crypto_backend/crypto_cipher_kernel.c
new file mode 100644
index 0000000..3460717
--- /dev/null
+++ b/lib/crypto_backend/crypto_cipher_kernel.c
@@ -0,0 +1,351 @@
+/*
+ * Linux kernel userspace API crypto backend implementation (skcipher)
+ *
+ * Copyright (C) 2012-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2012-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include "crypto_backend_internal.h"
+
+#ifdef ENABLE_AF_ALG
+
+#include <linux/if_alg.h>
+
+#ifndef AF_ALG
+#define AF_ALG 38
+#endif
+#ifndef SOL_ALG
+#define SOL_ALG 279
+#endif
+
+#ifndef ALG_SET_AEAD_AUTHSIZE
+#define ALG_SET_AEAD_AUTHSIZE 5
+#endif
+
+/*
+ * ciphers
+ *
+ * ENOENT - algorithm not available
+ * ENOTSUP - AF_ALG family not available
+ * (but cannot check specifically for skcipher API)
+ */
+static int _crypt_cipher_init(struct crypt_cipher_kernel *ctx,
+ const void *key, size_t key_length,
+ size_t tag_length, struct sockaddr_alg *sa)
+{
+ if (!ctx)
+ return -EINVAL;
+
+ ctx->opfd = -1;
+ ctx->tfmfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
+ if (ctx->tfmfd < 0) {
+ crypt_cipher_destroy_kernel(ctx);
+ return -ENOTSUP;
+ }
+
+ if (bind(ctx->tfmfd, (struct sockaddr *)sa, sizeof(*sa)) < 0) {
+ crypt_cipher_destroy_kernel(ctx);
+ return -ENOENT;
+ }
+
+ if (setsockopt(ctx->tfmfd, SOL_ALG, ALG_SET_KEY, key, key_length) < 0) {
+ crypt_cipher_destroy_kernel(ctx);
+ return -EINVAL;
+ }
+
+ if (tag_length && setsockopt(ctx->tfmfd, SOL_ALG, ALG_SET_AEAD_AUTHSIZE, NULL, tag_length) < 0) {
+ crypt_cipher_destroy_kernel(ctx);
+ return -EINVAL;
+ }
+
+ ctx->opfd = accept(ctx->tfmfd, NULL, 0);
+ if (ctx->opfd < 0) {
+ crypt_cipher_destroy_kernel(ctx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int crypt_cipher_init_kernel(struct crypt_cipher_kernel *ctx, const char *name,
+ const char *mode, const void *key, size_t key_length)
+{
+ struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "skcipher",
+ };
+ int r;
+
+ if (!strcmp(name, "cipher_null"))
+ key_length = 0;
+
+ r = snprintf((char *)sa.salg_name, sizeof(sa.salg_name), "%s(%s)", mode, name);
+ if (r < 0 || (size_t)r >= sizeof(sa.salg_name))
+ return -EINVAL;
+
+ return _crypt_cipher_init(ctx, key, key_length, 0, &sa);
+}
+
+/* The in/out should be aligned to page boundary */
+static int _crypt_cipher_crypt(struct crypt_cipher_kernel *ctx,
+ const char *in, size_t in_length,
+ char *out, size_t out_length,
+ const char *iv, size_t iv_length,
+ uint32_t direction)
+{
+ int r = 0;
+ ssize_t len;
+ struct af_alg_iv *alg_iv;
+ struct cmsghdr *header;
+ uint32_t *type;
+ struct iovec iov = {
+ .iov_base = (void*)(uintptr_t)in,
+ .iov_len = in_length,
+ };
+ int iv_msg_size = iv ? CMSG_SPACE(sizeof(*alg_iv) + iv_length) : 0;
+ char buffer[CMSG_SPACE(sizeof(*type)) + iv_msg_size];
+ struct msghdr msg = {
+ .msg_control = buffer,
+ .msg_controllen = sizeof(buffer),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+
+ if (!in || !out || !in_length)
+ return -EINVAL;
+
+ if ((!iv && iv_length) || (iv && !iv_length))
+ return -EINVAL;
+
+ memset(buffer, 0, sizeof(buffer));
+
+ /* Set encrypt/decrypt operation */
+ header = CMSG_FIRSTHDR(&msg);
+ if (!header)
+ return -EINVAL;
+
+ header->cmsg_level = SOL_ALG;
+ header->cmsg_type = ALG_SET_OP;
+ header->cmsg_len = CMSG_LEN(sizeof(*type));
+ type = (void*)CMSG_DATA(header);
+ *type = direction;
+
+ /* Set IV */
+ if (iv) {
+ header = CMSG_NXTHDR(&msg, header);
+ if (!header)
+ return -EINVAL;
+
+ header->cmsg_level = SOL_ALG;
+ header->cmsg_type = ALG_SET_IV;
+ header->cmsg_len = iv_msg_size;
+ alg_iv = (void*)CMSG_DATA(header);
+ alg_iv->ivlen = iv_length;
+ memcpy(alg_iv->iv, iv, iv_length);
+ }
+
+ len = sendmsg(ctx->opfd, &msg, 0);
+ if (len != (ssize_t)(in_length))
+ r = -EIO;
+ else {
+ len = read(ctx->opfd, out, out_length);
+ if (len != (ssize_t)out_length)
+ r = -EIO;
+ }
+
+ crypt_backend_memzero(buffer, sizeof(buffer));
+ return r;
+}
+
+int crypt_cipher_encrypt_kernel(struct crypt_cipher_kernel *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ return _crypt_cipher_crypt(ctx, in, length, out, length,
+ iv, iv_length, ALG_OP_ENCRYPT);
+}
+
+int crypt_cipher_decrypt_kernel(struct crypt_cipher_kernel *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ return _crypt_cipher_crypt(ctx, in, length, out, length,
+ iv, iv_length, ALG_OP_DECRYPT);
+}
+
+void crypt_cipher_destroy_kernel(struct crypt_cipher_kernel *ctx)
+{
+ if (ctx->tfmfd >= 0)
+ close(ctx->tfmfd);
+ if (ctx->opfd >= 0)
+ close(ctx->opfd);
+
+ ctx->tfmfd = -1;
+ ctx->opfd = -1;
+}
+
+int crypt_cipher_check_kernel(const char *name, const char *mode,
+ const char *integrity, size_t key_length)
+{
+ struct crypt_cipher_kernel c;
+ char mode_name[64], tmp_salg_name[180], *real_mode = NULL, *cipher_iv = NULL, *key;
+ const char *salg_type;
+ bool aead;
+ int r;
+ struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ };
+
+ aead = integrity && strcmp(integrity, "none");
+
+ /* Remove IV if present */
+ if (mode) {
+ strncpy(mode_name, mode, sizeof(mode_name));
+ mode_name[sizeof(mode_name) - 1] = 0;
+ cipher_iv = strchr(mode_name, '-');
+ if (cipher_iv) {
+ *cipher_iv = '\0';
+ real_mode = mode_name;
+ }
+ }
+
+ salg_type = aead ? "aead" : "skcipher";
+ r = snprintf((char *)sa.salg_type, sizeof(sa.salg_type), "%s", salg_type);
+ if (r < 0 || (size_t)r >= sizeof(sa.salg_name))
+ return -EINVAL;
+
+ memset(tmp_salg_name, 0, sizeof(tmp_salg_name));
+
+ /* FIXME: this is duplicating a part of devmapper backend */
+ if (aead && !strcmp(integrity, "poly1305"))
+ r = snprintf(tmp_salg_name, sizeof(tmp_salg_name), "rfc7539(%s,%s)", name, integrity);
+ else if (!real_mode)
+ r = snprintf(tmp_salg_name, sizeof(tmp_salg_name), "%s", name);
+ else if (aead && !strcmp(real_mode, "ccm"))
+ r = snprintf(tmp_salg_name, sizeof(tmp_salg_name), "rfc4309(%s(%s))", real_mode, name);
+ else
+ r = snprintf(tmp_salg_name, sizeof(tmp_salg_name), "%s(%s)", real_mode, name);
+
+ if (r < 0 || (size_t)r >= sizeof(tmp_salg_name))
+ return -EINVAL;
+
+ memcpy(sa.salg_name, tmp_salg_name, sizeof(sa.salg_name));
+
+ key = malloc(key_length);
+ if (!key)
+ return -ENOMEM;
+
+ /* We cannot use RNG yet, any key works here, tweak the first part if it is split key (XTS). */
+ memset(key, 0xab, key_length);
+ *key = 0xef;
+
+ r = _crypt_cipher_init(&c, key, key_length, 0, &sa);
+ crypt_cipher_destroy_kernel(&c);
+ free(key);
+
+ return r;
+}
+
+int crypt_bitlk_decrypt_key_kernel(const void *key, size_t key_length,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length,
+ const char *tag, size_t tag_length)
+{
+ struct crypt_cipher_kernel c;
+ struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "aead",
+ .salg_name = "ccm(aes)",
+ };
+ int r;
+ char buffer[128], ccm_iv[16];
+
+ if (length + tag_length > sizeof(buffer))
+ return -EINVAL;
+
+ if (iv_length > sizeof(ccm_iv) - 2)
+ return -EINVAL;
+
+ r = _crypt_cipher_init(&c, key, key_length, tag_length, &sa);
+ if (r < 0)
+ return r;
+
+ memcpy(buffer, in, length);
+ memcpy(buffer + length, tag, tag_length);
+
+ /* CCM IV - RFC3610 */
+ memset(ccm_iv, 0, sizeof(ccm_iv));
+ ccm_iv[0] = 15 - iv_length - 1;
+ memcpy(ccm_iv + 1, iv, iv_length);
+ memset(ccm_iv + 1 + iv_length, 0, ccm_iv[0] + 1);
+ iv_length = sizeof(ccm_iv);
+
+ r = _crypt_cipher_crypt(&c, buffer, length + tag_length, out, length,
+ ccm_iv, iv_length, ALG_OP_DECRYPT);
+
+ crypt_cipher_destroy_kernel(&c);
+ crypt_backend_memzero(buffer, sizeof(buffer));
+
+ return r;
+}
+
+#else /* ENABLE_AF_ALG */
+int crypt_cipher_init_kernel(struct crypt_cipher_kernel *ctx, const char *name,
+ const char *mode, const void *key, size_t key_length)
+{
+ return -ENOTSUP;
+}
+
+void crypt_cipher_destroy_kernel(struct crypt_cipher_kernel *ctx)
+{
+ return;
+}
+
+int crypt_cipher_encrypt_kernel(struct crypt_cipher_kernel *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ return -EINVAL;
+}
+int crypt_cipher_decrypt_kernel(struct crypt_cipher_kernel *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ return -EINVAL;
+}
+int crypt_cipher_check_kernel(const char *name, const char *mode,
+ const char *integrity, size_t key_length)
+{
+ /* Cannot check, expect success. */
+ return 0;
+}
+int crypt_bitlk_decrypt_key_kernel(const void *key, size_t key_length,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length,
+ const char *tag, size_t tag_length)
+{
+ return -ENOTSUP;
+}
+#endif
diff --git a/lib/crypto_backend/crypto_gcrypt.c b/lib/crypto_backend/crypto_gcrypt.c
new file mode 100644
index 0000000..e974aa8
--- /dev/null
+++ b/lib/crypto_backend/crypto_gcrypt.c
@@ -0,0 +1,573 @@
+/*
+ * GCRYPT crypto backend implementation
+ *
+ * Copyright (C) 2010-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <gcrypt.h>
+#include "crypto_backend_internal.h"
+
+static int crypto_backend_initialised = 0;
+static int crypto_backend_secmem = 1;
+static int crypto_backend_whirlpool_bug = -1;
+static char version[64];
+
+struct crypt_hash {
+ gcry_md_hd_t hd;
+ int hash_id;
+ int hash_len;
+};
+
+struct crypt_hmac {
+ gcry_md_hd_t hd;
+ int hash_id;
+ int hash_len;
+};
+
+struct crypt_cipher {
+ bool use_kernel;
+ union {
+ struct crypt_cipher_kernel kernel;
+ gcry_cipher_hd_t hd;
+ } u;
+};
+
+struct hash_alg {
+ const char *name;
+ const char *gcrypt_name;
+};
+
+/*
+ * Test for wrong Whirlpool variant,
+ * Ref: https://lists.gnupg.org/pipermail/gcrypt-devel/2014-January/002889.html
+ */
+static void crypt_hash_test_whirlpool_bug(void)
+{
+ struct crypt_hash *h;
+ char buf[2] = "\0\0", hash_out1[64], hash_out2[64];
+ int r;
+
+ if (crypto_backend_whirlpool_bug >= 0)
+ return;
+
+ crypto_backend_whirlpool_bug = 0;
+ if (crypt_hash_init(&h, "whirlpool"))
+ return;
+
+ /* One shot */
+ if ((r = crypt_hash_write(h, &buf[0], 2)) ||
+ (r = crypt_hash_final(h, hash_out1, 64))) {
+ crypt_hash_destroy(h);
+ return;
+ }
+
+ /* Split buf (crypt_hash_final resets hash state) */
+ if ((r = crypt_hash_write(h, &buf[0], 1)) ||
+ (r = crypt_hash_write(h, &buf[1], 1)) ||
+ (r = crypt_hash_final(h, hash_out2, 64))) {
+ crypt_hash_destroy(h);
+ return;
+ }
+
+ crypt_hash_destroy(h);
+
+ if (memcmp(hash_out1, hash_out2, 64))
+ crypto_backend_whirlpool_bug = 1;
+}
+
+int crypt_backend_init(bool fips __attribute__((unused)))
+{
+ int r;
+
+ if (crypto_backend_initialised)
+ return 0;
+
+ if (!gcry_control (GCRYCTL_INITIALIZATION_FINISHED_P)) {
+ if (!gcry_check_version (GCRYPT_REQ_VERSION)) {
+ return -ENOSYS;
+ }
+
+/* If gcrypt compiled to support POSIX 1003.1e capabilities,
+ * it drops all privileges during secure memory initialisation.
+ * For now, the only workaround is to disable secure memory in gcrypt.
+ * cryptsetup always need at least cap_sys_admin privilege for dm-ioctl
+ * and it locks its memory space anyway.
+ */
+#if 0
+ gcry_control (GCRYCTL_DISABLE_SECMEM);
+ crypto_backend_secmem = 0;
+#else
+
+ gcry_control (GCRYCTL_SUSPEND_SECMEM_WARN);
+ gcry_control (GCRYCTL_INIT_SECMEM, 16384, 0);
+ gcry_control (GCRYCTL_RESUME_SECMEM_WARN);
+#endif
+ gcry_control (GCRYCTL_INITIALIZATION_FINISHED, 0);
+ }
+
+ crypto_backend_initialised = 1;
+ crypt_hash_test_whirlpool_bug();
+
+ r = snprintf(version, sizeof(version), "gcrypt %s%s%s",
+ gcry_check_version(NULL),
+ crypto_backend_secmem ? "" : ", secmem disabled",
+ crypto_backend_whirlpool_bug > 0 ? ", flawed whirlpool" : "");
+ if (r < 0 || (size_t)r >= sizeof(version))
+ return -EINVAL;
+
+ return 0;
+}
+
+void crypt_backend_destroy(void)
+{
+ if (crypto_backend_initialised)
+ gcry_control(GCRYCTL_TERM_SECMEM);
+
+ crypto_backend_initialised = 0;
+}
+
+const char *crypt_backend_version(void)
+{
+ return crypto_backend_initialised ? version : "";
+}
+
+uint32_t crypt_backend_flags(void)
+{
+ return 0;
+}
+
+static const char *crypt_hash_compat_name(const char *name, unsigned int *flags)
+{
+ const char *hash_name = name;
+ int i;
+ static struct hash_alg hash_algs[] = {
+ { "blake2b-160", "blake2b_160" },
+ { "blake2b-256", "blake2b_256" },
+ { "blake2b-384", "blake2b_384" },
+ { "blake2b-512", "blake2b_512" },
+ { "blake2s-128", "blake2s_128" },
+ { "blake2s-160", "blake2s_160" },
+ { "blake2s-224", "blake2s_224" },
+ { "blake2s-256", "blake2s_256" },
+ { NULL, NULL, }};
+
+ if (!name)
+ return NULL;
+
+ /* "whirlpool_gcryptbug" is out shortcut to flawed whirlpool
+ * in libgcrypt < 1.6.0 */
+ if (!strcasecmp(name, "whirlpool_gcryptbug")) {
+#if GCRYPT_VERSION_NUMBER >= 0x010601
+ if (flags)
+ *flags |= GCRY_MD_FLAG_BUGEMU1;
+#endif
+ hash_name = "whirlpool";
+ }
+
+ i = 0;
+ while (hash_algs[i].name) {
+ if (!strcasecmp(name, hash_algs[i].name)) {
+ hash_name = hash_algs[i].gcrypt_name;
+ break;
+ }
+ i++;
+ }
+
+ return hash_name;
+}
+
+/* HASH */
+int crypt_hash_size(const char *name)
+{
+ int hash_id;
+
+ assert(crypto_backend_initialised);
+
+ hash_id = gcry_md_map_name(crypt_hash_compat_name(name, NULL));
+ if (!hash_id)
+ return -EINVAL;
+
+ return gcry_md_get_algo_dlen(hash_id);
+}
+
+int crypt_hash_init(struct crypt_hash **ctx, const char *name)
+{
+ struct crypt_hash *h;
+ unsigned int flags = 0;
+
+ assert(crypto_backend_initialised);
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ h->hash_id = gcry_md_map_name(crypt_hash_compat_name(name, &flags));
+ if (!h->hash_id) {
+ free(h);
+ return -EINVAL;
+ }
+
+ if (gcry_md_open(&h->hd, h->hash_id, flags)) {
+ free(h);
+ return -EINVAL;
+ }
+
+ h->hash_len = gcry_md_get_algo_dlen(h->hash_id);
+ *ctx = h;
+ return 0;
+}
+
+static void crypt_hash_restart(struct crypt_hash *ctx)
+{
+ gcry_md_reset(ctx->hd);
+}
+
+int crypt_hash_write(struct crypt_hash *ctx, const char *buffer, size_t length)
+{
+ gcry_md_write(ctx->hd, buffer, length);
+ return 0;
+}
+
+int crypt_hash_final(struct crypt_hash *ctx, char *buffer, size_t length)
+{
+ unsigned char *hash;
+
+ if (length > (size_t)ctx->hash_len)
+ return -EINVAL;
+
+ hash = gcry_md_read(ctx->hd, ctx->hash_id);
+ if (!hash)
+ return -EINVAL;
+
+ memcpy(buffer, hash, length);
+ crypt_hash_restart(ctx);
+
+ return 0;
+}
+
+void crypt_hash_destroy(struct crypt_hash *ctx)
+{
+ gcry_md_close(ctx->hd);
+ memset(ctx, 0, sizeof(*ctx));
+ free(ctx);
+}
+
+/* HMAC */
+int crypt_hmac_size(const char *name)
+{
+ return crypt_hash_size(name);
+}
+
+int crypt_hmac_init(struct crypt_hmac **ctx, const char *name,
+ const void *key, size_t key_length)
+{
+ struct crypt_hmac *h;
+ unsigned int flags = GCRY_MD_FLAG_HMAC;
+
+ assert(crypto_backend_initialised);
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ h->hash_id = gcry_md_map_name(crypt_hash_compat_name(name, &flags));
+ if (!h->hash_id) {
+ free(h);
+ return -EINVAL;
+ }
+
+ if (gcry_md_open(&h->hd, h->hash_id, flags)) {
+ free(h);
+ return -EINVAL;
+ }
+
+ if (gcry_md_setkey(h->hd, key, key_length)) {
+ gcry_md_close(h->hd);
+ free(h);
+ return -EINVAL;
+ }
+
+ h->hash_len = gcry_md_get_algo_dlen(h->hash_id);
+ *ctx = h;
+ return 0;
+}
+
+static void crypt_hmac_restart(struct crypt_hmac *ctx)
+{
+ gcry_md_reset(ctx->hd);
+}
+
+int crypt_hmac_write(struct crypt_hmac *ctx, const char *buffer, size_t length)
+{
+ gcry_md_write(ctx->hd, buffer, length);
+ return 0;
+}
+
+int crypt_hmac_final(struct crypt_hmac *ctx, char *buffer, size_t length)
+{
+ unsigned char *hash;
+
+ if (length > (size_t)ctx->hash_len)
+ return -EINVAL;
+
+ hash = gcry_md_read(ctx->hd, ctx->hash_id);
+ if (!hash)
+ return -EINVAL;
+
+ memcpy(buffer, hash, length);
+ crypt_hmac_restart(ctx);
+
+ return 0;
+}
+
+void crypt_hmac_destroy(struct crypt_hmac *ctx)
+{
+ gcry_md_close(ctx->hd);
+ memset(ctx, 0, sizeof(*ctx));
+ free(ctx);
+}
+
+/* RNG */
+int crypt_backend_rng(char *buffer, size_t length, int quality, int fips __attribute__((unused)))
+{
+ switch(quality) {
+ case CRYPT_RND_NORMAL:
+ gcry_randomize(buffer, length, GCRY_STRONG_RANDOM);
+ break;
+ case CRYPT_RND_SALT:
+ case CRYPT_RND_KEY:
+ default:
+ gcry_randomize(buffer, length, GCRY_VERY_STRONG_RANDOM);
+ break;
+ }
+ return 0;
+}
+
+static int pbkdf2(const char *hash,
+ const char *password, size_t password_length,
+ const char *salt, size_t salt_length,
+ char *key, size_t key_length,
+ uint32_t iterations)
+{
+ const char *hash_name = crypt_hash_compat_name(hash, NULL);
+
+#if USE_INTERNAL_PBKDF2
+ return pkcs5_pbkdf2(hash_name, password, password_length, salt, salt_length,
+ iterations, key_length, key, 0);
+#else /* USE_INTERNAL_PBKDF2 */
+ int hash_id = gcry_md_map_name(hash_name);
+
+ if (!hash_id)
+ return -EINVAL;
+
+ if (gcry_kdf_derive(password, password_length, GCRY_KDF_PBKDF2, hash_id,
+ salt, salt_length, iterations, key_length, key))
+ return -EINVAL;
+
+ return 0;
+#endif /* USE_INTERNAL_PBKDF2 */
+}
+
+/* PBKDF */
+int crypt_pbkdf(const char *kdf, const char *hash,
+ const char *password, size_t password_length,
+ const char *salt, size_t salt_length,
+ char *key, size_t key_length,
+ uint32_t iterations, uint32_t memory, uint32_t parallel)
+{
+ if (!kdf)
+ return -EINVAL;
+
+ if (!strcmp(kdf, "pbkdf2"))
+ return pbkdf2(hash, password, password_length, salt, salt_length,
+ key, key_length, iterations);
+ else if (!strncmp(kdf, "argon2", 6))
+ return argon2(kdf, password, password_length, salt, salt_length,
+ key, key_length, iterations, memory, parallel);
+ return -EINVAL;
+}
+
+/* Block ciphers */
+static int _cipher_init(gcry_cipher_hd_t *hd, const char *name,
+ const char *mode, const void *buffer, size_t length)
+{
+ int cipher_id, mode_id;
+
+ cipher_id = gcry_cipher_map_name(name);
+ if (cipher_id == GCRY_CIPHER_MODE_NONE)
+ return -ENOENT;
+
+ if (!strcmp(mode, "ecb"))
+ mode_id = GCRY_CIPHER_MODE_ECB;
+ else if (!strcmp(mode, "cbc"))
+ mode_id = GCRY_CIPHER_MODE_CBC;
+#if HAVE_DECL_GCRY_CIPHER_MODE_XTS
+ else if (!strcmp(mode, "xts"))
+ mode_id = GCRY_CIPHER_MODE_XTS;
+#endif
+ else
+ return -ENOENT;
+
+ if (gcry_cipher_open(hd, cipher_id, mode_id, 0))
+ return -EINVAL;
+
+ if (gcry_cipher_setkey(*hd, buffer, length)) {
+ gcry_cipher_close(*hd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int crypt_cipher_init(struct crypt_cipher **ctx, const char *name,
+ const char *mode, const void *key, size_t key_length)
+{
+ struct crypt_cipher *h;
+ int r;
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ if (!_cipher_init(&h->u.hd, name, mode, key, key_length)) {
+ h->use_kernel = false;
+ *ctx = h;
+ return 0;
+ }
+
+ r = crypt_cipher_init_kernel(&h->u.kernel, name, mode, key, key_length);
+ if (r < 0) {
+ free(h);
+ return r;
+ }
+
+ h->use_kernel = true;
+ *ctx = h;
+ return 0;
+}
+
+void crypt_cipher_destroy(struct crypt_cipher *ctx)
+{
+ if (ctx->use_kernel)
+ crypt_cipher_destroy_kernel(&ctx->u.kernel);
+ else
+ gcry_cipher_close(ctx->u.hd);
+ free(ctx);
+}
+
+int crypt_cipher_encrypt(struct crypt_cipher *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ if (ctx->use_kernel)
+ return crypt_cipher_encrypt_kernel(&ctx->u.kernel, in, out, length, iv, iv_length);
+
+ if (iv && gcry_cipher_setiv(ctx->u.hd, iv, iv_length))
+ return -EINVAL;
+
+ if (gcry_cipher_encrypt(ctx->u.hd, out, length, in, length))
+ return -EINVAL;
+
+ return 0;
+}
+
+int crypt_cipher_decrypt(struct crypt_cipher *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ if (ctx->use_kernel)
+ return crypt_cipher_decrypt_kernel(&ctx->u.kernel, in, out, length, iv, iv_length);
+
+ if (iv && gcry_cipher_setiv(ctx->u.hd, iv, iv_length))
+ return -EINVAL;
+
+ if (gcry_cipher_decrypt(ctx->u.hd, out, length, in, length))
+ return -EINVAL;
+
+ return 0;
+}
+
+bool crypt_cipher_kernel_only(struct crypt_cipher *ctx)
+{
+ return ctx->use_kernel;
+}
+
+int crypt_bitlk_decrypt_key(const void *key, size_t key_length,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length,
+ const char *tag, size_t tag_length)
+{
+#ifdef GCRY_CCM_BLOCK_LEN
+ gcry_cipher_hd_t hd;
+ uint64_t l[3];
+ int r = -EINVAL;
+
+ if (gcry_cipher_open(&hd, GCRY_CIPHER_AES256, GCRY_CIPHER_MODE_CCM, 0))
+ return -EINVAL;
+
+ if (gcry_cipher_setkey(hd, key, key_length))
+ goto out;
+
+ if (gcry_cipher_setiv(hd, iv, iv_length))
+ goto out;
+
+ l[0] = length;
+ l[1] = 0;
+ l[2] = tag_length;
+ if (gcry_cipher_ctl(hd, GCRYCTL_SET_CCM_LENGTHS, l, sizeof(l)))
+ goto out;
+
+ if (gcry_cipher_decrypt(hd, out, length, in, length))
+ goto out;
+
+ if (gcry_cipher_checktag(hd, tag, tag_length))
+ goto out;
+
+ r = 0;
+out:
+ gcry_cipher_close(hd);
+ return r;
+#else
+ return -ENOTSUP;
+#endif
+}
+
+int crypt_backend_memeq(const void *m1, const void *m2, size_t n)
+{
+ return crypt_internal_memeq(m1, m2, n);
+}
+
+#if !ENABLE_FIPS
+bool crypt_fips_mode(void) { return false; }
+#else
+bool crypt_fips_mode(void)
+{
+ static bool fips_mode = false, fips_checked = false;
+
+ if (fips_checked)
+ return fips_mode;
+
+ fips_mode = gcry_fips_mode_active();
+ fips_checked = true;
+
+ return fips_mode;
+}
+#endif /* ENABLE FIPS */
diff --git a/lib/crypto_backend/crypto_kernel.c b/lib/crypto_backend/crypto_kernel.c
new file mode 100644
index 0000000..8493c0a
--- /dev/null
+++ b/lib/crypto_backend/crypto_kernel.c
@@ -0,0 +1,428 @@
+/*
+ * Linux kernel userspace API crypto backend implementation
+ *
+ * Copyright (C) 2010-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <sys/utsname.h>
+#include <linux/if_alg.h>
+#include "crypto_backend_internal.h"
+
+#ifndef AF_ALG
+#define AF_ALG 38
+#endif
+#ifndef SOL_ALG
+#define SOL_ALG 279
+#endif
+
+static int crypto_backend_initialised = 0;
+static char version[256];
+
+struct hash_alg {
+ const char *name;
+ const char *kernel_name;
+ int length;
+ unsigned int block_length;
+};
+
+static struct hash_alg hash_algs[] = {
+ { "sha1", "sha1", 20, 64 },
+ { "sha224", "sha224", 28, 64 },
+ { "sha256", "sha256", 32, 64 },
+ { "sha384", "sha384", 48, 128 },
+ { "sha512", "sha512", 64, 128 },
+ { "ripemd160", "rmd160", 20, 64 },
+ { "whirlpool", "wp512", 64, 64 },
+ { "sha3-224", "sha3-224", 28, 144 },
+ { "sha3-256", "sha3-256", 32, 136 },
+ { "sha3-384", "sha3-384", 48, 104 },
+ { "sha3-512", "sha3-512", 64, 72 },
+ { "stribog256","streebog256", 32, 64 },
+ { "stribog512","streebog512", 64, 64 },
+ { "sm3", "sm3", 32, 64 },
+ { "blake2b-160","blake2b-160",20, 128 },
+ { "blake2b-256","blake2b-256",32, 128 },
+ { "blake2b-384","blake2b-384",48, 128 },
+ { "blake2b-512","blake2b-512",64, 128 },
+ { "blake2s-128","blake2s-128",16, 64 },
+ { "blake2s-160","blake2s-160",20, 64 },
+ { "blake2s-224","blake2s-224",28, 64 },
+ { "blake2s-256","blake2s-256",32, 64 },
+ { NULL, NULL, 0, 0 }
+};
+
+struct crypt_hash {
+ int tfmfd;
+ int opfd;
+ int hash_len;
+};
+
+struct crypt_hmac {
+ int tfmfd;
+ int opfd;
+ int hash_len;
+};
+
+struct crypt_cipher {
+ struct crypt_cipher_kernel ck;
+};
+
+static int crypt_kernel_socket_init(struct sockaddr_alg *sa, int *tfmfd, int *opfd,
+ const void *key, size_t key_length)
+{
+ *tfmfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
+ if (*tfmfd < 0)
+ return -ENOTSUP;
+
+ if (bind(*tfmfd, (struct sockaddr *)sa, sizeof(*sa)) < 0) {
+ close(*tfmfd);
+ *tfmfd = -1;
+ return -ENOENT;
+ }
+
+ if (key && setsockopt(*tfmfd, SOL_ALG, ALG_SET_KEY, key, key_length) < 0) {
+ close(*tfmfd);
+ *tfmfd = -1;
+ return -EINVAL;
+ }
+
+ *opfd = accept(*tfmfd, NULL, 0);
+ if (*opfd < 0) {
+ close(*tfmfd);
+ *tfmfd = -1;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int crypt_backend_init(bool fips __attribute__((unused)))
+{
+ struct utsname uts;
+ struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "hash",
+ .salg_name = "sha256",
+ };
+ int r, tfmfd = -1, opfd = -1;
+
+ if (crypto_backend_initialised)
+ return 0;
+
+ if (uname(&uts) == -1 || strcmp(uts.sysname, "Linux"))
+ return -EINVAL;
+
+ r = snprintf(version, sizeof(version), "%s %s kernel cryptoAPI",
+ uts.sysname, uts.release);
+ if (r < 0 || (size_t)r >= sizeof(version))
+ return -EINVAL;
+
+ if (crypt_kernel_socket_init(&sa, &tfmfd, &opfd, NULL, 0) < 0)
+ return -EINVAL;
+
+ close(tfmfd);
+ close(opfd);
+
+ crypto_backend_initialised = 1;
+ return 0;
+}
+
+void crypt_backend_destroy(void)
+{
+ crypto_backend_initialised = 0;
+}
+
+uint32_t crypt_backend_flags(void)
+{
+ return CRYPT_BACKEND_KERNEL;
+}
+
+const char *crypt_backend_version(void)
+{
+ return crypto_backend_initialised ? version : "";
+}
+
+static struct hash_alg *_get_alg(const char *name)
+{
+ int i = 0;
+
+ while (name && hash_algs[i].name) {
+ if (!strcmp(name, hash_algs[i].name))
+ return &hash_algs[i];
+ i++;
+ }
+ return NULL;
+}
+
+/* HASH */
+int crypt_hash_size(const char *name)
+{
+ struct hash_alg *ha = _get_alg(name);
+
+ return ha ? ha->length : -EINVAL;
+}
+
+int crypt_hash_init(struct crypt_hash **ctx, const char *name)
+{
+ struct crypt_hash *h;
+ struct hash_alg *ha;
+ struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "hash",
+ };
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ ha = _get_alg(name);
+ if (!ha) {
+ free(h);
+ return -EINVAL;
+ }
+ h->hash_len = ha->length;
+
+ strncpy((char *)sa.salg_name, ha->kernel_name, sizeof(sa.salg_name)-1);
+
+ if (crypt_kernel_socket_init(&sa, &h->tfmfd, &h->opfd, NULL, 0) < 0) {
+ free(h);
+ return -EINVAL;
+ }
+
+ *ctx = h;
+ return 0;
+}
+
+int crypt_hash_write(struct crypt_hash *ctx, const char *buffer, size_t length)
+{
+ ssize_t r;
+
+ r = send(ctx->opfd, buffer, length, MSG_MORE);
+ if (r < 0 || (size_t)r < length)
+ return -EIO;
+
+ return 0;
+}
+
+int crypt_hash_final(struct crypt_hash *ctx, char *buffer, size_t length)
+{
+ ssize_t r;
+
+ if (length > (size_t)ctx->hash_len)
+ return -EINVAL;
+
+ r = read(ctx->opfd, buffer, length);
+ if (r < 0)
+ return -EIO;
+
+ return 0;
+}
+
+void crypt_hash_destroy(struct crypt_hash *ctx)
+{
+ if (ctx->tfmfd >= 0)
+ close(ctx->tfmfd);
+ if (ctx->opfd >= 0)
+ close(ctx->opfd);
+ memset(ctx, 0, sizeof(*ctx));
+ free(ctx);
+}
+
+/* HMAC */
+int crypt_hmac_size(const char *name)
+{
+ return crypt_hash_size(name);
+}
+
+int crypt_hmac_init(struct crypt_hmac **ctx, const char *name,
+ const void *key, size_t key_length)
+{
+ struct crypt_hmac *h;
+ struct hash_alg *ha;
+ struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "hash",
+ };
+ int r;
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ ha = _get_alg(name);
+ if (!ha) {
+ free(h);
+ return -EINVAL;
+ }
+ h->hash_len = ha->length;
+
+ r = snprintf((char *)sa.salg_name, sizeof(sa.salg_name),
+ "hmac(%s)", ha->kernel_name);
+ if (r < 0 || (size_t)r >= sizeof(sa.salg_name)) {
+ free(h);
+ return -EINVAL;
+ }
+
+ if (crypt_kernel_socket_init(&sa, &h->tfmfd, &h->opfd, key, key_length) < 0) {
+ free(h);
+ return -EINVAL;
+ }
+
+ *ctx = h;
+ return 0;
+}
+
+int crypt_hmac_write(struct crypt_hmac *ctx, const char *buffer, size_t length)
+{
+ ssize_t r;
+
+ r = send(ctx->opfd, buffer, length, MSG_MORE);
+ if (r < 0 || (size_t)r < length)
+ return -EIO;
+
+ return 0;
+}
+
+int crypt_hmac_final(struct crypt_hmac *ctx, char *buffer, size_t length)
+{
+ ssize_t r;
+
+ if (length > (size_t)ctx->hash_len)
+ return -EINVAL;
+
+ r = read(ctx->opfd, buffer, length);
+ if (r < 0)
+ return -EIO;
+
+ return 0;
+}
+
+void crypt_hmac_destroy(struct crypt_hmac *ctx)
+{
+ if (ctx->tfmfd >= 0)
+ close(ctx->tfmfd);
+ if (ctx->opfd >= 0)
+ close(ctx->opfd);
+ memset(ctx, 0, sizeof(*ctx));
+ free(ctx);
+}
+
+/* RNG - N/A */
+int crypt_backend_rng(char *buffer __attribute__((unused)), size_t length __attribute__((unused)),
+ int quality __attribute__((unused)), int fips __attribute__((unused)))
+{
+ return -EINVAL;
+}
+
+/* PBKDF */
+int crypt_pbkdf(const char *kdf, const char *hash,
+ const char *password, size_t password_length,
+ const char *salt, size_t salt_length,
+ char *key, size_t key_length,
+ uint32_t iterations, uint32_t memory, uint32_t parallel)
+{
+ struct hash_alg *ha;
+
+ if (!kdf)
+ return -EINVAL;
+
+ if (!strcmp(kdf, "pbkdf2")) {
+ ha = _get_alg(hash);
+ if (!ha)
+ return -EINVAL;
+
+ return pkcs5_pbkdf2(hash, password, password_length, salt, salt_length,
+ iterations, key_length, key, ha->block_length);
+ } else if (!strncmp(kdf, "argon2", 6)) {
+ return argon2(kdf, password, password_length, salt, salt_length,
+ key, key_length, iterations, memory, parallel);
+ }
+
+ return -EINVAL;
+}
+
+/* Block ciphers */
+int crypt_cipher_init(struct crypt_cipher **ctx, const char *name,
+ const char *mode, const void *key, size_t key_length)
+{
+ struct crypt_cipher *h;
+ int r;
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ r = crypt_cipher_init_kernel(&h->ck, name, mode, key, key_length);
+ if (r < 0) {
+ free(h);
+ return r;
+ }
+
+ *ctx = h;
+ return 0;
+}
+
+void crypt_cipher_destroy(struct crypt_cipher *ctx)
+{
+ crypt_cipher_destroy_kernel(&ctx->ck);
+ free(ctx);
+}
+
+int crypt_cipher_encrypt(struct crypt_cipher *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ return crypt_cipher_encrypt_kernel(&ctx->ck, in, out, length, iv, iv_length);
+}
+
+int crypt_cipher_decrypt(struct crypt_cipher *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ return crypt_cipher_decrypt_kernel(&ctx->ck, in, out, length, iv, iv_length);
+}
+
+bool crypt_cipher_kernel_only(struct crypt_cipher *ctx __attribute__((unused)))
+{
+ return true;
+}
+
+int crypt_bitlk_decrypt_key(const void *key, size_t key_length,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length,
+ const char *tag, size_t tag_length)
+{
+ return crypt_bitlk_decrypt_key_kernel(key, key_length, in, out, length,
+ iv, iv_length, tag, tag_length);
+}
+
+int crypt_backend_memeq(const void *m1, const void *m2, size_t n)
+{
+ return crypt_internal_memeq(m1, m2, n);
+}
+
+bool crypt_fips_mode(void)
+{
+ return false;
+}
diff --git a/lib/crypto_backend/crypto_nettle.c b/lib/crypto_backend/crypto_nettle.c
new file mode 100644
index 0000000..086e4fc
--- /dev/null
+++ b/lib/crypto_backend/crypto_nettle.c
@@ -0,0 +1,460 @@
+/*
+ * Nettle crypto backend implementation
+ *
+ * Copyright (C) 2011-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2011-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <nettle/sha.h>
+#include <nettle/sha3.h>
+#include <nettle/hmac.h>
+#include <nettle/pbkdf2.h>
+#include <nettle/memops.h>
+#include "crypto_backend_internal.h"
+
+#if HAVE_NETTLE_VERSION_H
+#include <nettle/version.h>
+#define VSTR(s) STR(s)
+#define STR(s) #s
+static const char *version = "Nettle "VSTR(NETTLE_VERSION_MAJOR)"."VSTR(NETTLE_VERSION_MINOR);
+#else
+static const char *version = "Nettle";
+#endif
+
+typedef void (*init_func) (void *);
+typedef void (*update_func) (void *, size_t, const uint8_t *);
+typedef void (*digest_func) (void *, size_t, uint8_t *);
+typedef void (*set_key_func) (void *, size_t, const uint8_t *);
+
+struct hash_alg {
+ const char *name;
+ int length;
+ init_func init;
+ update_func update;
+ digest_func digest;
+ update_func hmac_update;
+ digest_func hmac_digest;
+ set_key_func hmac_set_key;
+};
+
+/* Missing HMAC wrappers in Nettle */
+#define HMAC_FCE(xxx) \
+struct xhmac_##xxx##_ctx HMAC_CTX(struct xxx##_ctx); \
+static void xhmac_##xxx##_set_key(struct xhmac_##xxx##_ctx *ctx, \
+size_t key_length, const uint8_t *key) \
+{HMAC_SET_KEY(ctx, &nettle_##xxx, key_length, key);} \
+static void xhmac_##xxx##_update(struct xhmac_##xxx##_ctx *ctx, \
+size_t length, const uint8_t *data) \
+{xxx##_update(&ctx->state, length, data);} \
+static void xhmac_##xxx##_digest(struct xhmac_##xxx##_ctx *ctx, \
+size_t length, uint8_t *digest) \
+{HMAC_DIGEST(ctx, &nettle_##xxx, length, digest);}
+
+HMAC_FCE(sha3_224);
+HMAC_FCE(sha3_256);
+HMAC_FCE(sha3_384);
+HMAC_FCE(sha3_512);
+
+static struct hash_alg hash_algs[] = {
+ { "sha1", SHA1_DIGEST_SIZE,
+ (init_func) sha1_init,
+ (update_func) sha1_update,
+ (digest_func) sha1_digest,
+ (update_func) hmac_sha1_update,
+ (digest_func) hmac_sha1_digest,
+ (set_key_func) hmac_sha1_set_key,
+ },
+ { "sha224", SHA224_DIGEST_SIZE,
+ (init_func) sha224_init,
+ (update_func) sha224_update,
+ (digest_func) sha224_digest,
+ (update_func) hmac_sha224_update,
+ (digest_func) hmac_sha224_digest,
+ (set_key_func) hmac_sha224_set_key,
+ },
+ { "sha256", SHA256_DIGEST_SIZE,
+ (init_func) sha256_init,
+ (update_func) sha256_update,
+ (digest_func) sha256_digest,
+ (update_func) hmac_sha256_update,
+ (digest_func) hmac_sha256_digest,
+ (set_key_func) hmac_sha256_set_key,
+ },
+ { "sha384", SHA384_DIGEST_SIZE,
+ (init_func) sha384_init,
+ (update_func) sha384_update,
+ (digest_func) sha384_digest,
+ (update_func) hmac_sha384_update,
+ (digest_func) hmac_sha384_digest,
+ (set_key_func) hmac_sha384_set_key,
+ },
+ { "sha512", SHA512_DIGEST_SIZE,
+ (init_func) sha512_init,
+ (update_func) sha512_update,
+ (digest_func) sha512_digest,
+ (update_func) hmac_sha512_update,
+ (digest_func) hmac_sha512_digest,
+ (set_key_func) hmac_sha512_set_key,
+ },
+ { "ripemd160", RIPEMD160_DIGEST_SIZE,
+ (init_func) ripemd160_init,
+ (update_func) ripemd160_update,
+ (digest_func) ripemd160_digest,
+ (update_func) hmac_ripemd160_update,
+ (digest_func) hmac_ripemd160_digest,
+ (set_key_func) hmac_ripemd160_set_key,
+ },
+/* Nettle prior to version 3.2 has incompatible SHA3 implementation */
+#if NETTLE_SHA3_FIPS202
+ { "sha3-224", SHA3_224_DIGEST_SIZE,
+ (init_func) sha3_224_init,
+ (update_func) sha3_224_update,
+ (digest_func) sha3_224_digest,
+ (update_func) xhmac_sha3_224_update,
+ (digest_func) xhmac_sha3_224_digest,
+ (set_key_func) xhmac_sha3_224_set_key,
+ },
+ { "sha3-256", SHA3_256_DIGEST_SIZE,
+ (init_func) sha3_256_init,
+ (update_func) sha3_256_update,
+ (digest_func) sha3_256_digest,
+ (update_func) xhmac_sha3_256_update,
+ (digest_func) xhmac_sha3_256_digest,
+ (set_key_func) xhmac_sha3_256_set_key,
+ },
+ { "sha3-384", SHA3_384_DIGEST_SIZE,
+ (init_func) sha3_384_init,
+ (update_func) sha3_384_update,
+ (digest_func) sha3_384_digest,
+ (update_func) xhmac_sha3_384_update,
+ (digest_func) xhmac_sha3_384_digest,
+ (set_key_func) xhmac_sha3_384_set_key,
+ },
+ { "sha3-512", SHA3_512_DIGEST_SIZE,
+ (init_func) sha3_512_init,
+ (update_func) sha3_512_update,
+ (digest_func) sha3_512_digest,
+ (update_func) xhmac_sha3_512_update,
+ (digest_func) xhmac_sha3_512_digest,
+ (set_key_func) xhmac_sha3_512_set_key,
+ },
+#endif
+ { NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL, }
+};
+
+struct crypt_hash {
+ const struct hash_alg *hash;
+ union {
+ struct sha1_ctx sha1;
+ struct sha224_ctx sha224;
+ struct sha256_ctx sha256;
+ struct sha384_ctx sha384;
+ struct sha512_ctx sha512;
+ struct ripemd160_ctx ripemd160;
+ struct sha3_224_ctx sha3_224;
+ struct sha3_256_ctx sha3_256;
+ struct sha3_384_ctx sha3_384;
+ struct sha3_512_ctx sha3_512;
+ } nettle_ctx;
+};
+
+struct crypt_hmac {
+ const struct hash_alg *hash;
+ union {
+ struct hmac_sha1_ctx sha1;
+ struct hmac_sha224_ctx sha224;
+ struct hmac_sha256_ctx sha256;
+ struct hmac_sha384_ctx sha384;
+ struct hmac_sha512_ctx sha512;
+ struct hmac_ripemd160_ctx ripemd160;
+ struct xhmac_sha3_224_ctx sha3_224;
+ struct xhmac_sha3_256_ctx sha3_256;
+ struct xhmac_sha3_384_ctx sha3_384;
+ struct xhmac_sha3_512_ctx sha3_512;
+ } nettle_ctx;
+ size_t key_length;
+ uint8_t *key;
+};
+
+struct crypt_cipher {
+ struct crypt_cipher_kernel ck;
+};
+
+uint32_t crypt_backend_flags(void)
+{
+ return 0;
+}
+
+static struct hash_alg *_get_alg(const char *name)
+{
+ int i = 0;
+
+ while (name && hash_algs[i].name) {
+ if (!strcmp(name, hash_algs[i].name))
+ return &hash_algs[i];
+ i++;
+ }
+ return NULL;
+}
+
+int crypt_backend_init(bool fips __attribute__((unused)))
+{
+ return 0;
+}
+
+void crypt_backend_destroy(void)
+{
+ return;
+}
+
+const char *crypt_backend_version(void)
+{
+ return version;
+}
+
+/* HASH */
+int crypt_hash_size(const char *name)
+{
+ struct hash_alg *ha = _get_alg(name);
+
+ return ha ? ha->length : -EINVAL;
+}
+
+int crypt_hash_init(struct crypt_hash **ctx, const char *name)
+{
+ struct crypt_hash *h;
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ h->hash = _get_alg(name);
+ if (!h->hash) {
+ free(h);
+ return -EINVAL;
+ }
+
+ h->hash->init(&h->nettle_ctx);
+
+ *ctx = h;
+ return 0;
+}
+
+static void crypt_hash_restart(struct crypt_hash *ctx)
+{
+ ctx->hash->init(&ctx->nettle_ctx);
+}
+
+int crypt_hash_write(struct crypt_hash *ctx, const char *buffer, size_t length)
+{
+ ctx->hash->update(&ctx->nettle_ctx, length, (const uint8_t*)buffer);
+ return 0;
+}
+
+int crypt_hash_final(struct crypt_hash *ctx, char *buffer, size_t length)
+{
+ if (length > (size_t)ctx->hash->length)
+ return -EINVAL;
+
+ ctx->hash->digest(&ctx->nettle_ctx, length, (uint8_t *)buffer);
+ crypt_hash_restart(ctx);
+ return 0;
+}
+
+void crypt_hash_destroy(struct crypt_hash *ctx)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ free(ctx);
+}
+
+/* HMAC */
+int crypt_hmac_size(const char *name)
+{
+ return crypt_hash_size(name);
+}
+
+int crypt_hmac_init(struct crypt_hmac **ctx, const char *name,
+ const void *key, size_t key_length)
+{
+ struct crypt_hmac *h;
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+ memset(ctx, 0, sizeof(*ctx));
+
+
+ h->hash = _get_alg(name);
+ if (!h->hash) {
+ free(h);
+ return -EINVAL;
+ }
+
+ h->key = malloc(key_length);
+ if (!h->key) {
+ free(h);
+ return -ENOMEM;
+ }
+
+ memcpy(h->key, key, key_length);
+ h->key_length = key_length;
+
+ h->hash->init(&h->nettle_ctx);
+ h->hash->hmac_set_key(&h->nettle_ctx, h->key_length, h->key);
+
+ *ctx = h;
+ return 0;
+}
+
+static void crypt_hmac_restart(struct crypt_hmac *ctx)
+{
+ ctx->hash->hmac_set_key(&ctx->nettle_ctx, ctx->key_length, ctx->key);
+}
+
+int crypt_hmac_write(struct crypt_hmac *ctx, const char *buffer, size_t length)
+{
+ ctx->hash->hmac_update(&ctx->nettle_ctx, length, (const uint8_t *)buffer);
+ return 0;
+}
+
+int crypt_hmac_final(struct crypt_hmac *ctx, char *buffer, size_t length)
+{
+ if (length > (size_t)ctx->hash->length)
+ return -EINVAL;
+
+ ctx->hash->hmac_digest(&ctx->nettle_ctx, length, (uint8_t *)buffer);
+ crypt_hmac_restart(ctx);
+ return 0;
+}
+
+void crypt_hmac_destroy(struct crypt_hmac *ctx)
+{
+ memset(ctx->key, 0, ctx->key_length);
+ free(ctx->key);
+ memset(ctx, 0, sizeof(*ctx));
+ free(ctx);
+}
+
+/* RNG - N/A */
+int crypt_backend_rng(char *buffer __attribute__((unused)),
+ size_t length __attribute__((unused)),
+ int quality __attribute__((unused)),
+ int fips __attribute__((unused)))
+{
+ return -EINVAL;
+}
+
+/* PBKDF */
+int crypt_pbkdf(const char *kdf, const char *hash,
+ const char *password, size_t password_length,
+ const char *salt, size_t salt_length,
+ char *key, size_t key_length,
+ uint32_t iterations, uint32_t memory, uint32_t parallel)
+{
+ struct crypt_hmac *h;
+ int r;
+
+ if (!kdf)
+ return -EINVAL;
+
+ if (!strcmp(kdf, "pbkdf2")) {
+ r = crypt_hmac_init(&h, hash, password, password_length);
+ if (r < 0)
+ return r;
+
+ nettle_pbkdf2(&h->nettle_ctx, h->hash->hmac_update,
+ h->hash->hmac_digest, h->hash->length, iterations,
+ salt_length, (const uint8_t *)salt, key_length,
+ (uint8_t *)key);
+ crypt_hmac_destroy(h);
+ return 0;
+ } else if (!strncmp(kdf, "argon2", 6)) {
+ return argon2(kdf, password, password_length, salt, salt_length,
+ key, key_length, iterations, memory, parallel);
+ }
+
+ return -EINVAL;
+}
+
+/* Block ciphers */
+int crypt_cipher_init(struct crypt_cipher **ctx, const char *name,
+ const char *mode, const void *key, size_t key_length)
+{
+ struct crypt_cipher *h;
+ int r;
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ r = crypt_cipher_init_kernel(&h->ck, name, mode, key, key_length);
+ if (r < 0) {
+ free(h);
+ return r;
+ }
+
+ *ctx = h;
+ return 0;
+}
+
+void crypt_cipher_destroy(struct crypt_cipher *ctx)
+{
+ crypt_cipher_destroy_kernel(&ctx->ck);
+ free(ctx);
+}
+
+int crypt_cipher_encrypt(struct crypt_cipher *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ return crypt_cipher_encrypt_kernel(&ctx->ck, in, out, length, iv, iv_length);
+}
+
+int crypt_cipher_decrypt(struct crypt_cipher *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ return crypt_cipher_decrypt_kernel(&ctx->ck, in, out, length, iv, iv_length);
+}
+
+bool crypt_cipher_kernel_only(struct crypt_cipher *ctx __attribute__((unused)))
+{
+ return true;
+}
+
+int crypt_bitlk_decrypt_key(const void *key, size_t key_length,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length,
+ const char *tag, size_t tag_length)
+{
+ return crypt_bitlk_decrypt_key_kernel(key, key_length, in, out, length,
+ iv, iv_length, tag, tag_length);
+}
+
+int crypt_backend_memeq(const void *m1, const void *m2, size_t n)
+{
+ /* The logic is inverse to memcmp... */
+ return !memeql_sec(m1, m2, n);
+}
+
+bool crypt_fips_mode(void)
+{
+ return false;
+}
diff --git a/lib/crypto_backend/crypto_nss.c b/lib/crypto_backend/crypto_nss.c
new file mode 100644
index 0000000..c154812
--- /dev/null
+++ b/lib/crypto_backend/crypto_nss.c
@@ -0,0 +1,407 @@
+/*
+ * NSS crypto backend implementation
+ *
+ * Copyright (C) 2010-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <string.h>
+#include <errno.h>
+#include <nss.h>
+#include <pk11pub.h>
+#include "crypto_backend_internal.h"
+
+#define CONST_CAST(x) (x)(uintptr_t)
+
+static int crypto_backend_initialised = 0;
+static char version[64];
+
+struct hash_alg {
+ const char *name;
+ SECOidTag oid;
+ CK_MECHANISM_TYPE ck_type;
+ int length;
+ unsigned int block_length;
+};
+
+static struct hash_alg hash_algs[] = {
+ { "sha1", SEC_OID_SHA1, CKM_SHA_1_HMAC, 20, 64 },
+ { "sha256", SEC_OID_SHA256, CKM_SHA256_HMAC, 32, 64 },
+ { "sha384", SEC_OID_SHA384, CKM_SHA384_HMAC, 48, 128 },
+ { "sha512", SEC_OID_SHA512, CKM_SHA512_HMAC, 64, 128 },
+// { "ripemd160", SEC_OID_RIPEMD160, CKM_RIPEMD160_HMAC, 20, 64 },
+ { NULL, 0, 0, 0 }
+};
+
+struct crypt_hash {
+ PK11Context *md;
+ const struct hash_alg *hash;
+};
+
+struct crypt_hmac {
+ PK11Context *md;
+ PK11SymKey *key;
+ PK11SlotInfo *slot;
+ const struct hash_alg *hash;
+};
+
+struct crypt_cipher {
+ struct crypt_cipher_kernel ck;
+};
+
+static struct hash_alg *_get_alg(const char *name)
+{
+ int i = 0;
+
+ while (name && hash_algs[i].name) {
+ if (!strcmp(name, hash_algs[i].name))
+ return &hash_algs[i];
+ i++;
+ }
+ return NULL;
+}
+
+int crypt_backend_init(bool fips __attribute__((unused)))
+{
+ int r;
+
+ if (crypto_backend_initialised)
+ return 0;
+
+ if (NSS_NoDB_Init(".") != SECSuccess)
+ return -EINVAL;
+
+#if HAVE_DECL_NSS_GETVERSION
+ r = snprintf(version, sizeof(version), "NSS %s", NSS_GetVersion());
+#else
+ r = snprintf(version, sizeof(version), "NSS");
+#endif
+ if (r < 0 || (size_t)r >= sizeof(version))
+ return -EINVAL;
+
+ crypto_backend_initialised = 1;
+ return 0;
+}
+
+void crypt_backend_destroy(void)
+{
+ crypto_backend_initialised = 0;
+}
+
+uint32_t crypt_backend_flags(void)
+{
+ return 0;
+}
+
+const char *crypt_backend_version(void)
+{
+ return crypto_backend_initialised ? version : "";
+}
+
+/* HASH */
+int crypt_hash_size(const char *name)
+{
+ struct hash_alg *ha = _get_alg(name);
+
+ return ha ? ha->length : -EINVAL;
+}
+
+int crypt_hash_init(struct crypt_hash **ctx, const char *name)
+{
+ struct crypt_hash *h;
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ h->hash = _get_alg(name);
+ if (!h->hash) {
+ free(h);
+ return -EINVAL;
+ }
+
+ h->md = PK11_CreateDigestContext(h->hash->oid);
+ if (!h->md) {
+ free(h);
+ return -EINVAL;
+ }
+
+ if (PK11_DigestBegin(h->md) != SECSuccess) {
+ PK11_DestroyContext(h->md, PR_TRUE);
+ free(h);
+ return -EINVAL;
+ }
+
+ *ctx = h;
+ return 0;
+}
+
+static int crypt_hash_restart(struct crypt_hash *ctx)
+{
+ if (PK11_DigestBegin(ctx->md) != SECSuccess)
+ return -EINVAL;
+
+ return 0;
+}
+
+int crypt_hash_write(struct crypt_hash *ctx, const char *buffer, size_t length)
+{
+ if (PK11_DigestOp(ctx->md, CONST_CAST(unsigned char *)buffer, length) != SECSuccess)
+ return -EINVAL;
+
+ return 0;
+}
+
+int crypt_hash_final(struct crypt_hash *ctx, char *buffer, size_t length)
+{
+ unsigned char tmp[64];
+ unsigned int tmp_len;
+
+ if (length > (size_t)ctx->hash->length)
+ return -EINVAL;
+
+ if (PK11_DigestFinal(ctx->md, tmp, &tmp_len, length) != SECSuccess)
+ return -EINVAL;
+
+ memcpy(buffer, tmp, length);
+ crypt_backend_memzero(tmp, sizeof(tmp));
+
+ if (tmp_len < length)
+ return -EINVAL;
+
+ if (crypt_hash_restart(ctx))
+ return -EINVAL;
+
+ return 0;
+}
+
+void crypt_hash_destroy(struct crypt_hash *ctx)
+{
+ PK11_DestroyContext(ctx->md, PR_TRUE);
+ memset(ctx, 0, sizeof(*ctx));
+ free(ctx);
+}
+
+/* HMAC */
+int crypt_hmac_size(const char *name)
+{
+ return crypt_hash_size(name);
+}
+
+int crypt_hmac_init(struct crypt_hmac **ctx, const char *name,
+ const void *key, size_t key_length)
+{
+ struct crypt_hmac *h;
+ SECItem keyItem;
+ SECItem noParams;
+
+ keyItem.type = siBuffer;
+ keyItem.data = CONST_CAST(unsigned char *)key;
+ keyItem.len = (int)key_length;
+
+ noParams.type = siBuffer;
+ noParams.data = 0;
+ noParams.len = 0;
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+ memset(ctx, 0, sizeof(*ctx));
+
+
+ h->hash = _get_alg(name);
+ if (!h->hash)
+ goto err;
+
+ h->slot = PK11_GetInternalKeySlot();
+ if (!h->slot)
+ goto err;
+
+ h->key = PK11_ImportSymKey(h->slot, h->hash->ck_type, PK11_OriginUnwrap,
+ CKA_SIGN, &keyItem, NULL);
+ if (!h->key)
+ goto err;
+
+ h->md = PK11_CreateContextBySymKey(h->hash->ck_type, CKA_SIGN, h->key,
+ &noParams);
+ if (!h->md)
+ goto err;
+
+ if (PK11_DigestBegin(h->md) != SECSuccess)
+ goto err;
+
+ *ctx = h;
+ return 0;
+err:
+ crypt_hmac_destroy(h);
+ return -EINVAL;
+}
+
+static int crypt_hmac_restart(struct crypt_hmac *ctx)
+{
+ if (PK11_DigestBegin(ctx->md) != SECSuccess)
+ return -EINVAL;
+
+ return 0;
+}
+
+int crypt_hmac_write(struct crypt_hmac *ctx, const char *buffer, size_t length)
+{
+ if (PK11_DigestOp(ctx->md, CONST_CAST(unsigned char *)buffer, length) != SECSuccess)
+ return -EINVAL;
+
+ return 0;
+}
+
+int crypt_hmac_final(struct crypt_hmac *ctx, char *buffer, size_t length)
+{
+ unsigned char tmp[64];
+ unsigned int tmp_len;
+
+ if (length > (size_t)ctx->hash->length)
+ return -EINVAL;
+
+ if (PK11_DigestFinal(ctx->md, tmp, &tmp_len, length) != SECSuccess)
+ return -EINVAL;
+
+ memcpy(buffer, tmp, length);
+ crypt_backend_memzero(tmp, sizeof(tmp));
+
+ if (tmp_len < length)
+ return -EINVAL;
+
+ if (crypt_hmac_restart(ctx))
+ return -EINVAL;
+
+ return 0;
+}
+
+void crypt_hmac_destroy(struct crypt_hmac *ctx)
+{
+ if (ctx->key)
+ PK11_FreeSymKey(ctx->key);
+ if (ctx->slot)
+ PK11_FreeSlot(ctx->slot);
+ if (ctx->md)
+ PK11_DestroyContext(ctx->md, PR_TRUE);
+ memset(ctx, 0, sizeof(*ctx));
+ free(ctx);
+}
+
+/* RNG */
+int crypt_backend_rng(char *buffer, size_t length, int quality __attribute__((unused)), int fips)
+{
+ if (fips)
+ return -EINVAL;
+
+ if (PK11_GenerateRandom((unsigned char *)buffer, length) != SECSuccess)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* PBKDF */
+int crypt_pbkdf(const char *kdf, const char *hash,
+ const char *password, size_t password_length,
+ const char *salt, size_t salt_length,
+ char *key, size_t key_length,
+ uint32_t iterations, uint32_t memory, uint32_t parallel)
+{
+ struct hash_alg *ha;
+
+ if (!kdf)
+ return -EINVAL;
+
+ if (!strcmp(kdf, "pbkdf2")) {
+ ha = _get_alg(hash);
+ if (!ha)
+ return -EINVAL;
+
+ return pkcs5_pbkdf2(hash, password, password_length, salt, salt_length,
+ iterations, key_length, key, ha->block_length);
+ } else if (!strncmp(kdf, "argon2", 6)) {
+ return argon2(kdf, password, password_length, salt, salt_length,
+ key, key_length, iterations, memory, parallel);
+ }
+
+ return -EINVAL;
+}
+
+/* Block ciphers */
+int crypt_cipher_init(struct crypt_cipher **ctx, const char *name,
+ const char *mode, const void *key, size_t key_length)
+{
+ struct crypt_cipher *h;
+ int r;
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ r = crypt_cipher_init_kernel(&h->ck, name, mode, key, key_length);
+ if (r < 0) {
+ free(h);
+ return r;
+ }
+
+ *ctx = h;
+ return 0;
+}
+
+void crypt_cipher_destroy(struct crypt_cipher *ctx)
+{
+ crypt_cipher_destroy_kernel(&ctx->ck);
+ free(ctx);
+}
+
+int crypt_cipher_encrypt(struct crypt_cipher *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ return crypt_cipher_encrypt_kernel(&ctx->ck, in, out, length, iv, iv_length);
+}
+
+int crypt_cipher_decrypt(struct crypt_cipher *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ return crypt_cipher_decrypt_kernel(&ctx->ck, in, out, length, iv, iv_length);
+}
+
+bool crypt_cipher_kernel_only(struct crypt_cipher *ctx __attribute__((unused)))
+{
+ return true;
+}
+
+int crypt_bitlk_decrypt_key(const void *key, size_t key_length,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length,
+ const char *tag, size_t tag_length)
+{
+ return crypt_bitlk_decrypt_key_kernel(key, key_length, in, out, length,
+ iv, iv_length, tag, tag_length);
+}
+
+int crypt_backend_memeq(const void *m1, const void *m2, size_t n)
+{
+ return NSS_SecureMemcmp(m1, m2, n);
+}
+
+bool crypt_fips_mode(void)
+{
+ return false;
+}
diff --git a/lib/crypto_backend/crypto_openssl.c b/lib/crypto_backend/crypto_openssl.c
new file mode 100644
index 0000000..607ec38
--- /dev/null
+++ b/lib/crypto_backend/crypto_openssl.c
@@ -0,0 +1,849 @@
+/*
+ * OPENSSL crypto backend implementation
+ *
+ * Copyright (C) 2010-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link the code of portions of this program with the
+ * OpenSSL library under certain conditions as described in each
+ * individual source file, and distribute linked combinations
+ * including the two.
+ *
+ * You must obey the GNU Lesser General Public License in all respects
+ * for all of the code used other than OpenSSL.
+ */
+
+#include <string.h>
+#include <errno.h>
+#include <limits.h>
+#include <openssl/crypto.h>
+#include <openssl/evp.h>
+#include <openssl/hmac.h>
+#include <openssl/rand.h>
+#include "crypto_backend_internal.h"
+#if OPENSSL_VERSION_MAJOR >= 3
+#include <openssl/provider.h>
+#include <openssl/kdf.h>
+#include <openssl/core_names.h>
+static OSSL_PROVIDER *ossl_legacy = NULL;
+static OSSL_PROVIDER *ossl_default = NULL;
+static OSSL_LIB_CTX *ossl_ctx = NULL;
+static char backend_version[256] = "OpenSSL";
+#endif
+
+#define CONST_CAST(x) (x)(uintptr_t)
+
+static int crypto_backend_initialised = 0;
+
+struct crypt_hash {
+ EVP_MD_CTX *md;
+ const EVP_MD *hash_id;
+ int hash_len;
+};
+
+struct crypt_hmac {
+#if OPENSSL_VERSION_MAJOR >= 3
+ EVP_MAC *mac;
+ EVP_MAC_CTX *md;
+ EVP_MAC_CTX *md_org;
+#else
+ HMAC_CTX *md;
+ const EVP_MD *hash_id;
+#endif
+ int hash_len;
+};
+
+struct crypt_cipher {
+ bool use_kernel;
+ union {
+ struct crypt_cipher_kernel kernel;
+ struct {
+ EVP_CIPHER_CTX *hd_enc;
+ EVP_CIPHER_CTX *hd_dec;
+ const EVP_CIPHER *cipher_type;
+ size_t iv_length;
+ } lib;
+ } u;
+};
+
+struct hash_alg {
+ const char *name;
+ const char *openssl_name;
+};
+
+/*
+ * Compatible wrappers for OpenSSL < 1.1.0 and LibreSSL < 2.7.0
+ */
+#if OPENSSL_VERSION_NUMBER < 0x10100000L || \
+ (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x2070000fL)
+
+static int openssl_backend_init(bool fips __attribute__((unused)))
+{
+ OpenSSL_add_all_algorithms();
+ return 0;
+}
+
+static void openssl_backend_exit(void)
+{
+}
+
+static const char *openssl_backend_version(void)
+{
+ return SSLeay_version(SSLEAY_VERSION);
+}
+
+static EVP_MD_CTX *EVP_MD_CTX_new(void)
+{
+ EVP_MD_CTX *md = malloc(sizeof(*md));
+
+ if (md)
+ EVP_MD_CTX_init(md);
+
+ return md;
+}
+
+static void EVP_MD_CTX_free(EVP_MD_CTX *md)
+{
+ EVP_MD_CTX_cleanup(md);
+ free(md);
+}
+
+static HMAC_CTX *HMAC_CTX_new(void)
+{
+ HMAC_CTX *md = malloc(sizeof(*md));
+
+ if (md)
+ HMAC_CTX_init(md);
+
+ return md;
+}
+
+static void HMAC_CTX_free(HMAC_CTX *md)
+{
+ HMAC_CTX_cleanup(md);
+ free(md);
+}
+#else
+static void openssl_backend_exit(void)
+{
+#if OPENSSL_VERSION_MAJOR >= 3
+ if (ossl_legacy)
+ OSSL_PROVIDER_unload(ossl_legacy);
+ if (ossl_default)
+ OSSL_PROVIDER_unload(ossl_default);
+ if (ossl_ctx)
+ OSSL_LIB_CTX_free(ossl_ctx);
+
+ ossl_legacy = NULL;
+ ossl_default = NULL;
+ ossl_ctx = NULL;
+#endif
+}
+
+static int openssl_backend_init(bool fips)
+{
+/*
+ * OpenSSL >= 3.0.0 provides some algorithms in legacy provider
+ */
+#if OPENSSL_VERSION_MAJOR >= 3
+ int r;
+
+ /*
+ * In FIPS mode we keep default OpenSSL context & global config
+ */
+ if (!fips) {
+ ossl_ctx = OSSL_LIB_CTX_new();
+ if (!ossl_ctx)
+ return -EINVAL;
+
+ ossl_default = OSSL_PROVIDER_try_load(ossl_ctx, "default", 0);
+ if (!ossl_default) {
+ OSSL_LIB_CTX_free(ossl_ctx);
+ return -EINVAL;
+ }
+
+ /* Optional */
+ ossl_legacy = OSSL_PROVIDER_try_load(ossl_ctx, "legacy", 0);
+ }
+
+ r = snprintf(backend_version, sizeof(backend_version), "%s %s%s%s",
+ OpenSSL_version(OPENSSL_VERSION),
+ ossl_default ? "[default]" : "",
+ ossl_legacy ? "[legacy]" : "",
+ fips ? "[fips]" : "");
+
+ if (r < 0 || (size_t)r >= sizeof(backend_version)) {
+ openssl_backend_exit();
+ return -EINVAL;
+ }
+#endif
+ return 0;
+}
+
+static const char *openssl_backend_version(void)
+{
+#if OPENSSL_VERSION_MAJOR >= 3
+ return backend_version;
+#else
+ return OpenSSL_version(OPENSSL_VERSION);
+#endif
+}
+#endif
+
+int crypt_backend_init(bool fips)
+{
+ if (crypto_backend_initialised)
+ return 0;
+
+ if (openssl_backend_init(fips))
+ return -EINVAL;
+
+ crypto_backend_initialised = 1;
+ return 0;
+}
+
+void crypt_backend_destroy(void)
+{
+ /*
+ * If Destructor was already called, we must not call it again
+ */
+ if (!crypto_backend_initialised)
+ return;
+
+ crypto_backend_initialised = 0;
+
+ openssl_backend_exit();
+}
+
+uint32_t crypt_backend_flags(void)
+{
+#if OPENSSL_VERSION_MAJOR >= 3
+ return 0;
+#else
+ return CRYPT_BACKEND_PBKDF2_INT;
+#endif
+}
+
+const char *crypt_backend_version(void)
+{
+ return openssl_backend_version();
+}
+
+static const char *crypt_hash_compat_name(const char *name)
+{
+ const char *hash_name = name;
+ int i;
+ static struct hash_alg hash_algs[] = {
+ { "blake2b-512", "blake2b512" },
+ { "blake2s-256", "blake2s256" },
+ { NULL, NULL, }};
+
+ if (!name)
+ return NULL;
+
+ i = 0;
+ while (hash_algs[i].name) {
+ if (!strcasecmp(name, hash_algs[i].name)) {
+ hash_name = hash_algs[i].openssl_name;
+ break;
+ }
+ i++;
+ }
+
+ return hash_name;
+}
+
+static const EVP_MD *hash_id_get(const char *name)
+{
+#if OPENSSL_VERSION_MAJOR >= 3
+ return EVP_MD_fetch(ossl_ctx, crypt_hash_compat_name(name), NULL);
+#else
+ return EVP_get_digestbyname(crypt_hash_compat_name(name));
+#endif
+}
+
+static void hash_id_free(const EVP_MD *hash_id)
+{
+#if OPENSSL_VERSION_MAJOR >= 3
+ EVP_MD_free(CONST_CAST(EVP_MD*)hash_id);
+#endif
+}
+
+static const EVP_CIPHER *cipher_type_get(const char *name)
+{
+#if OPENSSL_VERSION_MAJOR >= 3
+ return EVP_CIPHER_fetch(ossl_ctx, name, NULL);
+#else
+ return EVP_get_cipherbyname(name);
+#endif
+}
+
+static void cipher_type_free(const EVP_CIPHER *cipher_type)
+{
+#if OPENSSL_VERSION_MAJOR >= 3
+ EVP_CIPHER_free(CONST_CAST(EVP_CIPHER*)cipher_type);
+#endif
+}
+
+/* HASH */
+int crypt_hash_size(const char *name)
+{
+ int size;
+ const EVP_MD *hash_id;
+
+ hash_id = hash_id_get(name);
+ if (!hash_id)
+ return -EINVAL;
+
+ size = EVP_MD_size(hash_id);
+ hash_id_free(hash_id);
+ return size;
+}
+
+int crypt_hash_init(struct crypt_hash **ctx, const char *name)
+{
+ struct crypt_hash *h;
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ h->md = EVP_MD_CTX_new();
+ if (!h->md) {
+ free(h);
+ return -ENOMEM;
+ }
+
+ h->hash_id = hash_id_get(name);
+ if (!h->hash_id) {
+ EVP_MD_CTX_free(h->md);
+ free(h);
+ return -EINVAL;
+ }
+
+ if (EVP_DigestInit_ex(h->md, h->hash_id, NULL) != 1) {
+ hash_id_free(h->hash_id);
+ EVP_MD_CTX_free(h->md);
+ free(h);
+ return -EINVAL;
+ }
+
+ h->hash_len = EVP_MD_size(h->hash_id);
+ *ctx = h;
+ return 0;
+}
+
+static int crypt_hash_restart(struct crypt_hash *ctx)
+{
+ if (EVP_DigestInit_ex(ctx->md, ctx->hash_id, NULL) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int crypt_hash_write(struct crypt_hash *ctx, const char *buffer, size_t length)
+{
+ if (EVP_DigestUpdate(ctx->md, buffer, length) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int crypt_hash_final(struct crypt_hash *ctx, char *buffer, size_t length)
+{
+ unsigned char tmp[EVP_MAX_MD_SIZE];
+ unsigned int tmp_len = 0;
+
+ if (length > (size_t)ctx->hash_len)
+ return -EINVAL;
+
+ if (EVP_DigestFinal_ex(ctx->md, tmp, &tmp_len) != 1)
+ return -EINVAL;
+
+ memcpy(buffer, tmp, length);
+ crypt_backend_memzero(tmp, sizeof(tmp));
+
+ if (tmp_len < length)
+ return -EINVAL;
+
+ if (crypt_hash_restart(ctx))
+ return -EINVAL;
+
+ return 0;
+}
+
+void crypt_hash_destroy(struct crypt_hash *ctx)
+{
+ hash_id_free(ctx->hash_id);
+ EVP_MD_CTX_free(ctx->md);
+ memset(ctx, 0, sizeof(*ctx));
+ free(ctx);
+}
+
+/* HMAC */
+int crypt_hmac_size(const char *name)
+{
+ return crypt_hash_size(name);
+}
+
+int crypt_hmac_init(struct crypt_hmac **ctx, const char *name,
+ const void *key, size_t key_length)
+{
+ struct crypt_hmac *h;
+#if OPENSSL_VERSION_MAJOR >= 3
+ OSSL_PARAM params[] = {
+ OSSL_PARAM_utf8_string(OSSL_MAC_PARAM_DIGEST, CONST_CAST(void*)name, 0),
+ OSSL_PARAM_END
+ };
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ h->mac = EVP_MAC_fetch(ossl_ctx, OSSL_MAC_NAME_HMAC, NULL);
+ if (!h->mac) {
+ free(h);
+ return -EINVAL;
+ }
+
+ h->md = EVP_MAC_CTX_new(h->mac);
+ if (!h->md) {
+ EVP_MAC_free(h->mac);
+ free(h);
+ return -ENOMEM;
+ }
+
+ if (EVP_MAC_init(h->md, key, key_length, params) != 1) {
+ EVP_MAC_CTX_free(h->md);
+ EVP_MAC_free(h->mac);
+ free(h);
+ return -EINVAL;
+ }
+
+ h->hash_len = EVP_MAC_CTX_get_mac_size(h->md);
+ h->md_org = EVP_MAC_CTX_dup(h->md);
+#else
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ h->md = HMAC_CTX_new();
+ if (!h->md) {
+ free(h);
+ return -ENOMEM;
+ }
+
+ h->hash_id = hash_id_get(name);
+ if (!h->hash_id) {
+ HMAC_CTX_free(h->md);
+ free(h);
+ return -EINVAL;
+ }
+
+ HMAC_Init_ex(h->md, key, key_length, h->hash_id, NULL);
+
+ h->hash_len = EVP_MD_size(h->hash_id);
+#endif
+ *ctx = h;
+ return 0;
+}
+
+static int crypt_hmac_restart(struct crypt_hmac *ctx)
+{
+#if OPENSSL_VERSION_MAJOR >= 3
+ EVP_MAC_CTX_free(ctx->md);
+ ctx->md = EVP_MAC_CTX_dup(ctx->md_org);
+ if (!ctx->md)
+ return -EINVAL;
+#else
+ HMAC_Init_ex(ctx->md, NULL, 0, ctx->hash_id, NULL);
+#endif
+ return 0;
+}
+
+int crypt_hmac_write(struct crypt_hmac *ctx, const char *buffer, size_t length)
+{
+#if OPENSSL_VERSION_MAJOR >= 3
+ return EVP_MAC_update(ctx->md, (const unsigned char *)buffer, length) == 1 ? 0 : -EINVAL;
+#else
+ HMAC_Update(ctx->md, (const unsigned char *)buffer, length);
+ return 0;
+#endif
+}
+
+int crypt_hmac_final(struct crypt_hmac *ctx, char *buffer, size_t length)
+{
+ unsigned char tmp[EVP_MAX_MD_SIZE];
+#if OPENSSL_VERSION_MAJOR >= 3
+ size_t tmp_len = 0;
+
+ if (length > (size_t)ctx->hash_len)
+ return -EINVAL;
+
+ if (EVP_MAC_final(ctx->md, tmp, &tmp_len, sizeof(tmp)) != 1)
+ return -EINVAL;
+#else
+ unsigned int tmp_len = 0;
+
+ if (length > (size_t)ctx->hash_len)
+ return -EINVAL;
+
+ HMAC_Final(ctx->md, tmp, &tmp_len);
+#endif
+ memcpy(buffer, tmp, length);
+ crypt_backend_memzero(tmp, sizeof(tmp));
+
+ if (tmp_len < length)
+ return -EINVAL;
+
+ if (crypt_hmac_restart(ctx))
+ return -EINVAL;
+
+ return 0;
+}
+
+void crypt_hmac_destroy(struct crypt_hmac *ctx)
+{
+#if OPENSSL_VERSION_MAJOR >= 3
+ EVP_MAC_CTX_free(ctx->md);
+ EVP_MAC_CTX_free(ctx->md_org);
+ EVP_MAC_free(ctx->mac);
+#else
+ hash_id_free(ctx->hash_id);
+ HMAC_CTX_free(ctx->md);
+#endif
+ memset(ctx, 0, sizeof(*ctx));
+ free(ctx);
+}
+
+/* RNG */
+int crypt_backend_rng(char *buffer, size_t length,
+ int quality __attribute__((unused)), int fips __attribute__((unused)))
+{
+ if (RAND_bytes((unsigned char *)buffer, length) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int openssl_pbkdf2(const char *password, size_t password_length,
+ const char *salt, size_t salt_length, uint32_t iterations,
+ const char *hash, char *key, size_t key_length)
+{
+ int r;
+#if OPENSSL_VERSION_MAJOR >= 3
+ EVP_KDF_CTX *ctx;
+ EVP_KDF *pbkdf2;
+ OSSL_PARAM params[] = {
+ OSSL_PARAM_octet_string(OSSL_KDF_PARAM_PASSWORD,
+ CONST_CAST(void*)password, password_length),
+ OSSL_PARAM_octet_string(OSSL_KDF_PARAM_SALT,
+ CONST_CAST(void*)salt, salt_length),
+ OSSL_PARAM_uint32(OSSL_KDF_PARAM_ITER, &iterations),
+ OSSL_PARAM_utf8_string(OSSL_KDF_PARAM_DIGEST,
+ CONST_CAST(void*)hash, 0),
+ OSSL_PARAM_END
+ };
+
+ pbkdf2 = EVP_KDF_fetch(ossl_ctx, "pbkdf2", NULL);
+ if (!pbkdf2)
+ return -EINVAL;
+
+ ctx = EVP_KDF_CTX_new(pbkdf2);
+ if (!ctx) {
+ EVP_KDF_free(pbkdf2);
+ return -EINVAL;
+ }
+
+ r = EVP_KDF_derive(ctx, (unsigned char*)key, key_length, params);
+
+ EVP_KDF_CTX_free(ctx);
+ EVP_KDF_free(pbkdf2);
+#else
+ const EVP_MD *hash_id = EVP_get_digestbyname(crypt_hash_compat_name(hash));
+ if (!hash_id)
+ return -EINVAL;
+
+ /* OpenSSL2 has iteration as signed int, avoid overflow */
+ if (iterations > INT_MAX)
+ return -EINVAL;
+
+ r = PKCS5_PBKDF2_HMAC(password, (int)password_length, (const unsigned char *)salt,
+ (int)salt_length, iterations, hash_id, (int)key_length, (unsigned char*) key);
+#endif
+ return r == 1 ? 0 : -EINVAL;
+}
+
+static int openssl_argon2(const char *type, const char *password, size_t password_length,
+ const char *salt, size_t salt_length, char *key, size_t key_length,
+ uint32_t iterations, uint32_t memory, uint32_t parallel)
+{
+ return argon2(type, password, password_length, salt, salt_length,
+ key, key_length, iterations, memory, parallel);
+}
+
+/* PBKDF */
+int crypt_pbkdf(const char *kdf, const char *hash,
+ const char *password, size_t password_length,
+ const char *salt, size_t salt_length,
+ char *key, size_t key_length,
+ uint32_t iterations, uint32_t memory, uint32_t parallel)
+{
+ if (!kdf)
+ return -EINVAL;
+
+ if (!strcmp(kdf, "pbkdf2"))
+ return openssl_pbkdf2(password, password_length, salt, salt_length,
+ iterations, hash, key, key_length);
+ if (!strncmp(kdf, "argon2", 6))
+ return openssl_argon2(kdf, password, password_length, salt, salt_length,
+ key, key_length, iterations, memory, parallel);
+ return -EINVAL;
+}
+
+/* Block ciphers */
+static void _cipher_destroy(EVP_CIPHER_CTX **hd_enc, EVP_CIPHER_CTX **hd_dec, const EVP_CIPHER **cipher_type)
+{
+ EVP_CIPHER_CTX_free(*hd_enc);
+ *hd_enc = NULL;
+
+ EVP_CIPHER_CTX_free(*hd_dec);
+ *hd_dec = NULL;
+
+ cipher_type_free(*cipher_type);
+ *cipher_type = NULL;
+}
+
+static int _cipher_init(EVP_CIPHER_CTX **hd_enc, EVP_CIPHER_CTX **hd_dec, const EVP_CIPHER **cipher_type, const char *name,
+ const char *mode, const void *key, size_t key_length, size_t *iv_length)
+{
+ char cipher_name[256];
+ const EVP_CIPHER *type;
+ int r, key_bits;
+
+ key_bits = key_length * 8;
+ if (!strcmp(mode, "xts"))
+ key_bits /= 2;
+
+ r = snprintf(cipher_name, sizeof(cipher_name), "%s-%d-%s", name, key_bits, mode);
+ if (r < 0 || (size_t)r >= sizeof(cipher_name))
+ return -EINVAL;
+
+ type = cipher_type_get(cipher_name);
+ if (!type)
+ return -ENOENT;
+
+ if (EVP_CIPHER_key_length(type) != (int)key_length) {
+ cipher_type_free(type);
+ return -EINVAL;
+ }
+
+ *hd_enc = EVP_CIPHER_CTX_new();
+ *hd_dec = EVP_CIPHER_CTX_new();
+ *iv_length = EVP_CIPHER_iv_length(type);
+
+ if (!*hd_enc || !*hd_dec) {
+ cipher_type_free(type);
+ return -EINVAL;
+ }
+
+ if (EVP_EncryptInit_ex(*hd_enc, type, NULL, key, NULL) != 1 ||
+ EVP_DecryptInit_ex(*hd_dec, type, NULL, key, NULL) != 1) {
+ _cipher_destroy(hd_enc, hd_dec, &type);
+ return -EINVAL;
+ }
+
+ if (EVP_CIPHER_CTX_set_padding(*hd_enc, 0) != 1 ||
+ EVP_CIPHER_CTX_set_padding(*hd_dec, 0) != 1) {
+ _cipher_destroy(hd_enc, hd_dec, &type);
+ return -EINVAL;
+ }
+
+ *cipher_type = type;
+
+ return 0;
+}
+
+int crypt_cipher_init(struct crypt_cipher **ctx, const char *name,
+ const char *mode, const void *key, size_t key_length)
+{
+ struct crypt_cipher *h;
+ int r;
+
+ h = malloc(sizeof(*h));
+ if (!h)
+ return -ENOMEM;
+
+ if (!_cipher_init(&h->u.lib.hd_enc, &h->u.lib.hd_dec, &h->u.lib.cipher_type, name, mode, key,
+ key_length, &h->u.lib.iv_length)) {
+ h->use_kernel = false;
+ *ctx = h;
+ return 0;
+ }
+
+ r = crypt_cipher_init_kernel(&h->u.kernel, name, mode, key, key_length);
+ if (r < 0) {
+ free(h);
+ return r;
+ }
+
+ h->use_kernel = true;
+ *ctx = h;
+ return 0;
+}
+
+void crypt_cipher_destroy(struct crypt_cipher *ctx)
+{
+ if (ctx->use_kernel)
+ crypt_cipher_destroy_kernel(&ctx->u.kernel);
+ else
+ _cipher_destroy(&ctx->u.lib.hd_enc, &ctx->u.lib.hd_dec, &ctx->u.lib.cipher_type);
+ free(ctx);
+}
+
+static int _cipher_encrypt(struct crypt_cipher *ctx, const unsigned char *in, unsigned char *out,
+ int length, const unsigned char *iv, size_t iv_length)
+{
+ int len;
+
+ if (ctx->u.lib.iv_length != iv_length)
+ return -EINVAL;
+
+ if (EVP_EncryptInit_ex(ctx->u.lib.hd_enc, NULL, NULL, NULL, iv) != 1)
+ return -EINVAL;
+
+ if (EVP_EncryptUpdate(ctx->u.lib.hd_enc, out, &len, in, length) != 1)
+ return -EINVAL;
+
+ if (EVP_EncryptFinal(ctx->u.lib.hd_enc, out + len, &len) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int _cipher_decrypt(struct crypt_cipher *ctx, const unsigned char *in, unsigned char *out,
+ int length, const unsigned char *iv, size_t iv_length)
+{
+ int len;
+
+ if (ctx->u.lib.iv_length != iv_length)
+ return -EINVAL;
+
+ if (EVP_DecryptInit_ex(ctx->u.lib.hd_dec, NULL, NULL, NULL, iv) != 1)
+ return -EINVAL;
+
+ if (EVP_DecryptUpdate(ctx->u.lib.hd_dec, out, &len, in, length) != 1)
+ return -EINVAL;
+
+ if (EVP_DecryptFinal(ctx->u.lib.hd_dec, out + len, &len) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int crypt_cipher_encrypt(struct crypt_cipher *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ if (ctx->use_kernel)
+ return crypt_cipher_encrypt_kernel(&ctx->u.kernel, in, out, length, iv, iv_length);
+
+ return _cipher_encrypt(ctx, (const unsigned char*)in,
+ (unsigned char *)out, length, (const unsigned char*)iv, iv_length);
+}
+
+int crypt_cipher_decrypt(struct crypt_cipher *ctx,
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length)
+{
+ if (ctx->use_kernel)
+ return crypt_cipher_decrypt_kernel(&ctx->u.kernel, in, out, length, iv, iv_length);
+
+ return _cipher_decrypt(ctx, (const unsigned char*)in,
+ (unsigned char *)out, length, (const unsigned char*)iv, iv_length);
+}
+
+bool crypt_cipher_kernel_only(struct crypt_cipher *ctx)
+{
+ return ctx->use_kernel;
+}
+
+int crypt_bitlk_decrypt_key(const void *key, size_t key_length __attribute__((unused)),
+ const char *in, char *out, size_t length,
+ const char *iv, size_t iv_length,
+ const char *tag, size_t tag_length)
+{
+#ifdef EVP_CTRL_CCM_SET_IVLEN
+ EVP_CIPHER_CTX *ctx;
+ int len = 0, r = -EINVAL;
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx)
+ return -EINVAL;
+
+ if (EVP_DecryptInit_ex(ctx, EVP_aes_256_ccm(), NULL, NULL, NULL) != 1)
+ goto out;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_IVLEN, iv_length, NULL) != 1)
+ goto out;
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_TAG, tag_length, CONST_CAST(void*)tag) != 1)
+ goto out;
+
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, key, (const unsigned char*)iv) != 1)
+ goto out;
+
+ if (EVP_DecryptUpdate(ctx, (unsigned char*)out, &len, (const unsigned char*)in, length) == 1)
+ r = 0;
+out:
+ EVP_CIPHER_CTX_free(ctx);
+ return r;
+#else
+ return -ENOTSUP;
+#endif
+}
+
+int crypt_backend_memeq(const void *m1, const void *m2, size_t n)
+{
+ return CRYPTO_memcmp(m1, m2, n);
+}
+
+#if !ENABLE_FIPS
+bool crypt_fips_mode(void) { return false; }
+#else
+static bool openssl_fips_mode(void)
+{
+#if OPENSSL_VERSION_MAJOR >= 3
+ return EVP_default_properties_is_fips_enabled(NULL);
+#else
+ return FIPS_mode();
+#endif
+}
+
+bool crypt_fips_mode(void)
+{
+ static bool fips_mode = false, fips_checked = false;
+
+ if (fips_checked)
+ return fips_mode;
+
+ fips_mode = openssl_fips_mode();
+ fips_checked = true;
+
+ return fips_mode;
+}
+#endif /* ENABLE FIPS */
diff --git a/lib/crypto_backend/crypto_storage.c b/lib/crypto_backend/crypto_storage.c
new file mode 100644
index 0000000..13479dd
--- /dev/null
+++ b/lib/crypto_backend/crypto_storage.c
@@ -0,0 +1,347 @@
+/*
+ * Generic wrapper for storage encryption modes and Initial Vectors
+ * (reimplementation of some functions from Linux dm-crypt kernel)
+ *
+ * Copyright (C) 2014-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+#include <strings.h>
+#include "bitops.h"
+#include "crypto_backend.h"
+
+#define SECTOR_SHIFT 9
+
+/*
+ * Internal IV helper
+ * IV documentation: https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt
+ */
+struct crypt_sector_iv {
+ enum { IV_NONE, IV_NULL, IV_PLAIN, IV_PLAIN64, IV_ESSIV, IV_BENBI, IV_PLAIN64BE, IV_EBOIV } type;
+ int iv_size;
+ char *iv;
+ struct crypt_cipher *cipher;
+ int shift;
+};
+
+/* Block encryption storage context */
+struct crypt_storage {
+ size_t sector_size;
+ unsigned iv_shift;
+ struct crypt_cipher *cipher;
+ struct crypt_sector_iv cipher_iv;
+};
+
+static int int_log2(unsigned int x)
+{
+ int r = 0;
+ for (x >>= 1; x > 0; x >>= 1)
+ r++;
+ return r;
+}
+
+static int crypt_sector_iv_init(struct crypt_sector_iv *ctx,
+ const char *cipher_name, const char *mode_name,
+ const char *iv_name, const void *key, size_t key_length,
+ size_t sector_size)
+{
+ int r;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->iv_size = crypt_cipher_ivsize(cipher_name, mode_name);
+ if (ctx->iv_size < 0 || (strcmp(mode_name, "ecb") && ctx->iv_size < 8))
+ return -ENOENT;
+
+ if (!strcmp(cipher_name, "cipher_null") ||
+ !strcmp(mode_name, "ecb")) {
+ if (iv_name)
+ return -EINVAL;
+ ctx->type = IV_NONE;
+ ctx->iv_size = 0;
+ return 0;
+ } else if (!iv_name) {
+ return -EINVAL;
+ } else if (!strcasecmp(iv_name, "null")) {
+ ctx->type = IV_NULL;
+ } else if (!strcasecmp(iv_name, "plain64")) {
+ ctx->type = IV_PLAIN64;
+ } else if (!strcasecmp(iv_name, "plain64be")) {
+ ctx->type = IV_PLAIN64BE;
+ } else if (!strcasecmp(iv_name, "plain")) {
+ ctx->type = IV_PLAIN;
+ } else if (!strncasecmp(iv_name, "essiv:", 6)) {
+ struct crypt_hash *h = NULL;
+ char *hash_name = strchr(iv_name, ':');
+ int hash_size;
+ char tmp[256];
+
+ if (!hash_name)
+ return -EINVAL;
+
+ hash_size = crypt_hash_size(++hash_name);
+ if (hash_size < 0)
+ return -ENOENT;
+
+ if ((unsigned)hash_size > sizeof(tmp))
+ return -EINVAL;
+
+ if (crypt_hash_init(&h, hash_name))
+ return -EINVAL;
+
+ r = crypt_hash_write(h, key, key_length);
+ if (r) {
+ crypt_hash_destroy(h);
+ return r;
+ }
+
+ r = crypt_hash_final(h, tmp, hash_size);
+ crypt_hash_destroy(h);
+ if (r) {
+ crypt_backend_memzero(tmp, sizeof(tmp));
+ return r;
+ }
+
+ r = crypt_cipher_init(&ctx->cipher, cipher_name, "ecb",
+ tmp, hash_size);
+ crypt_backend_memzero(tmp, sizeof(tmp));
+ if (r)
+ return r;
+
+ ctx->type = IV_ESSIV;
+ } else if (!strncasecmp(iv_name, "benbi", 5)) {
+ int log = int_log2(ctx->iv_size);
+ if (log > SECTOR_SHIFT)
+ return -EINVAL;
+
+ ctx->type = IV_BENBI;
+ ctx->shift = SECTOR_SHIFT - log;
+ } else if (!strncasecmp(iv_name, "eboiv", 5)) {
+ r = crypt_cipher_init(&ctx->cipher, cipher_name, "ecb",
+ key, key_length);
+ if (r)
+ return r;
+
+ ctx->type = IV_EBOIV;
+ ctx->shift = int_log2(sector_size);
+ } else
+ return -ENOENT;
+
+ ctx->iv = malloc(ctx->iv_size);
+ if (!ctx->iv)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int crypt_sector_iv_generate(struct crypt_sector_iv *ctx, uint64_t sector)
+{
+ uint64_t val, *u64_iv;
+ uint32_t *u32_iv;
+
+ switch (ctx->type) {
+ case IV_NONE:
+ break;
+ case IV_NULL:
+ memset(ctx->iv, 0, ctx->iv_size);
+ break;
+ case IV_PLAIN:
+ memset(ctx->iv, 0, ctx->iv_size);
+ u32_iv = (void *)ctx->iv;
+ *u32_iv = cpu_to_le32(sector & 0xffffffff);
+ break;
+ case IV_PLAIN64:
+ memset(ctx->iv, 0, ctx->iv_size);
+ u64_iv = (void *)ctx->iv;
+ *u64_iv = cpu_to_le64(sector);
+ break;
+ case IV_PLAIN64BE:
+ memset(ctx->iv, 0, ctx->iv_size);
+ /* iv_size is at least of size u64; usually it is 16 bytes */
+ u64_iv = (void *)&ctx->iv[ctx->iv_size - sizeof(uint64_t)];
+ *u64_iv = cpu_to_be64(sector);
+ break;
+ case IV_ESSIV:
+ memset(ctx->iv, 0, ctx->iv_size);
+ u64_iv = (void *)ctx->iv;
+ *u64_iv = cpu_to_le64(sector);
+ return crypt_cipher_encrypt(ctx->cipher,
+ ctx->iv, ctx->iv, ctx->iv_size, NULL, 0);
+ break;
+ case IV_BENBI:
+ memset(ctx->iv, 0, ctx->iv_size);
+ val = cpu_to_be64((sector << ctx->shift) + 1);
+ memcpy(ctx->iv + ctx->iv_size - sizeof(val), &val, sizeof(val));
+ break;
+ case IV_EBOIV:
+ memset(ctx->iv, 0, ctx->iv_size);
+ u64_iv = (void *)ctx->iv;
+ *u64_iv = cpu_to_le64(sector << ctx->shift);
+ return crypt_cipher_encrypt(ctx->cipher,
+ ctx->iv, ctx->iv, ctx->iv_size, NULL, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void crypt_sector_iv_destroy(struct crypt_sector_iv *ctx)
+{
+ if (ctx->type == IV_ESSIV || ctx->type == IV_EBOIV)
+ crypt_cipher_destroy(ctx->cipher);
+
+ if (ctx->iv) {
+ memset(ctx->iv, 0, ctx->iv_size);
+ free(ctx->iv);
+ }
+
+ memset(ctx, 0, sizeof(*ctx));
+}
+
+/* Block encryption storage wrappers */
+
+int crypt_storage_init(struct crypt_storage **ctx,
+ size_t sector_size,
+ const char *cipher,
+ const char *cipher_mode,
+ const void *key, size_t key_length,
+ bool large_iv)
+{
+ struct crypt_storage *s;
+ char mode_name[64];
+ char *cipher_iv = NULL;
+ int r = -EIO;
+
+ if (sector_size < (1 << SECTOR_SHIFT) ||
+ sector_size > (1 << (SECTOR_SHIFT + 3)) ||
+ sector_size & (sector_size - 1))
+ return -EINVAL;
+
+ s = malloc(sizeof(*s));
+ if (!s)
+ return -ENOMEM;
+ memset(s, 0, sizeof(*s));
+
+ /* Remove IV if present */
+ strncpy(mode_name, cipher_mode, sizeof(mode_name));
+ mode_name[sizeof(mode_name) - 1] = 0;
+ cipher_iv = strchr(mode_name, '-');
+ if (cipher_iv) {
+ *cipher_iv = '\0';
+ cipher_iv++;
+ }
+
+ r = crypt_cipher_init(&s->cipher, cipher, mode_name, key, key_length);
+ if (r) {
+ crypt_storage_destroy(s);
+ return r;
+ }
+
+ r = crypt_sector_iv_init(&s->cipher_iv, cipher, mode_name, cipher_iv, key, key_length, sector_size);
+ if (r) {
+ crypt_storage_destroy(s);
+ return r;
+ }
+
+ s->sector_size = sector_size;
+ s->iv_shift = large_iv ? int_log2(sector_size) - SECTOR_SHIFT : 0;
+
+ *ctx = s;
+ return 0;
+}
+
+int crypt_storage_decrypt(struct crypt_storage *ctx,
+ uint64_t iv_offset,
+ uint64_t length, char *buffer)
+{
+ uint64_t i;
+ int r = 0;
+
+ if (length & (ctx->sector_size - 1))
+ return -EINVAL;
+
+ if (iv_offset & ((ctx->sector_size >> SECTOR_SHIFT) - 1))
+ return -EINVAL;
+
+ for (i = 0; i < length; i += ctx->sector_size) {
+ r = crypt_sector_iv_generate(&ctx->cipher_iv, (iv_offset + (i >> SECTOR_SHIFT)) >> ctx->iv_shift);
+ if (r)
+ break;
+ r = crypt_cipher_decrypt(ctx->cipher,
+ &buffer[i],
+ &buffer[i],
+ ctx->sector_size,
+ ctx->cipher_iv.iv,
+ ctx->cipher_iv.iv_size);
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+int crypt_storage_encrypt(struct crypt_storage *ctx,
+ uint64_t iv_offset,
+ uint64_t length, char *buffer)
+{
+ uint64_t i;
+ int r = 0;
+
+ if (length & (ctx->sector_size - 1))
+ return -EINVAL;
+
+ if (iv_offset & ((ctx->sector_size >> SECTOR_SHIFT) - 1))
+ return -EINVAL;
+
+ for (i = 0; i < length; i += ctx->sector_size) {
+ r = crypt_sector_iv_generate(&ctx->cipher_iv, (iv_offset + (i >> SECTOR_SHIFT)) >> ctx->iv_shift);
+ if (r)
+ break;
+ r = crypt_cipher_encrypt(ctx->cipher,
+ &buffer[i],
+ &buffer[i],
+ ctx->sector_size,
+ ctx->cipher_iv.iv,
+ ctx->cipher_iv.iv_size);
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+void crypt_storage_destroy(struct crypt_storage *ctx)
+{
+ if (!ctx)
+ return;
+
+ crypt_sector_iv_destroy(&ctx->cipher_iv);
+
+ if (ctx->cipher)
+ crypt_cipher_destroy(ctx->cipher);
+
+ memset(ctx, 0, sizeof(*ctx));
+ free(ctx);
+}
+
+bool crypt_storage_kernel_only(struct crypt_storage *ctx)
+{
+ return crypt_cipher_kernel_only(ctx->cipher);
+}
diff --git a/lib/crypto_backend/pbkdf2_generic.c b/lib/crypto_backend/pbkdf2_generic.c
new file mode 100644
index 0000000..9e87e19
--- /dev/null
+++ b/lib/crypto_backend/pbkdf2_generic.c
@@ -0,0 +1,232 @@
+/*
+ * Implementation of Password-Based Cryptography as per PKCS#5
+ * Copyright (C) 2002,2003 Simon Josefsson
+ * Copyright (C) 2004 Free Software Foundation
+ *
+ * cryptsetup related changes
+ * Copyright (C) 2012-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2012-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <errno.h>
+#include <alloca.h>
+#include "crypto_backend_internal.h"
+
+static int hash_buf(const char *src, size_t src_len,
+ char *dst, size_t dst_len,
+ const char *hash_name)
+{
+ struct crypt_hash *hd = NULL;
+ int r;
+
+ if (crypt_hash_init(&hd, hash_name))
+ return -EINVAL;
+
+ r = crypt_hash_write(hd, src, src_len);
+
+ if (!r)
+ r = crypt_hash_final(hd, dst, dst_len);
+
+ crypt_hash_destroy(hd);
+ return r;
+}
+
+/*
+ * 5.2 PBKDF2
+ *
+ * PBKDF2 applies a pseudorandom function (see Appendix B.1 for an
+ * example) to derive keys. The length of the derived key is essentially
+ * unbounded. (However, the maximum effective search space for the
+ * derived key may be limited by the structure of the underlying
+ * pseudorandom function. See Appendix B.1 for further discussion.)
+ * PBKDF2 is recommended for new applications.
+ *
+ * PBKDF2 (P, S, c, dkLen)
+ *
+ * Options: PRF underlying pseudorandom function (hLen
+ * denotes the length in octets of the
+ * pseudorandom function output)
+ *
+ * Input: P password, an octet string (ASCII or UTF-8)
+ * S salt, an octet string
+ * c iteration count, a positive integer
+ * dkLen intended length in octets of the derived
+ * key, a positive integer, at most
+ * (2^32 - 1) * hLen
+ *
+ * Output: DK derived key, a dkLen-octet string
+ */
+
+/*
+ * if hash_block_size is not zero, the HMAC key is pre-hashed
+ * inside this function.
+ * This prevents situation when crypto backend doesn't support
+ * long HMAC keys or it tries hash long key in every iteration
+ * (because of crypt_final() cannot do simple key reset.
+ */
+
+#define MAX_PRF_BLOCK_LEN 80
+
+int pkcs5_pbkdf2(const char *hash,
+ const char *P, size_t Plen,
+ const char *S, size_t Slen,
+ unsigned int c, unsigned int dkLen,
+ char *DK, unsigned int hash_block_size)
+{
+ struct crypt_hmac *hmac;
+ char U[MAX_PRF_BLOCK_LEN];
+ char T[MAX_PRF_BLOCK_LEN];
+ char P_hash[MAX_PRF_BLOCK_LEN];
+ int i, k, rc = -EINVAL;
+ unsigned int u, hLen, l, r;
+ size_t tmplen = Slen + 4;
+ char *tmp;
+
+ tmp = alloca(tmplen);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ hLen = crypt_hmac_size(hash);
+ if (hLen == 0 || hLen > MAX_PRF_BLOCK_LEN)
+ return -EINVAL;
+
+ if (c == 0)
+ return -EINVAL;
+
+ if (dkLen == 0)
+ return -EINVAL;
+
+ /*
+ *
+ * Steps:
+ *
+ * 1. If dkLen > (2^32 - 1) * hLen, output "derived key too long" and
+ * stop.
+ */
+
+ if (dkLen > 4294967295U)
+ return -EINVAL;
+
+ /*
+ * 2. Let l be the number of hLen-octet blocks in the derived key,
+ * rounding up, and let r be the number of octets in the last
+ * block:
+ *
+ * l = CEIL (dkLen / hLen) ,
+ * r = dkLen - (l - 1) * hLen .
+ *
+ * Here, CEIL (x) is the "ceiling" function, i.e. the smallest
+ * integer greater than, or equal to, x.
+ */
+
+ l = dkLen / hLen;
+ if (dkLen % hLen)
+ l++;
+ r = dkLen - (l - 1) * hLen;
+
+ /*
+ * 3. For each block of the derived key apply the function F defined
+ * below to the password P, the salt S, the iteration count c, and
+ * the block index to compute the block:
+ *
+ * T_1 = F (P, S, c, 1) ,
+ * T_2 = F (P, S, c, 2) ,
+ * ...
+ * T_l = F (P, S, c, l) ,
+ *
+ * where the function F is defined as the exclusive-or sum of the
+ * first c iterates of the underlying pseudorandom function PRF
+ * applied to the password P and the concatenation of the salt S
+ * and the block index i:
+ *
+ * F (P, S, c, i) = U_1 \xor U_2 \xor ... \xor U_c
+ *
+ * where
+ *
+ * U_1 = PRF (P, S || INT (i)) ,
+ * U_2 = PRF (P, U_1) ,
+ * ...
+ * U_c = PRF (P, U_{c-1}) .
+ *
+ * Here, INT (i) is a four-octet encoding of the integer i, most
+ * significant octet first.
+ *
+ * 4. Concatenate the blocks and extract the first dkLen octets to
+ * produce a derived key DK:
+ *
+ * DK = T_1 || T_2 || ... || T_l<0..r-1>
+ *
+ * 5. Output the derived key DK.
+ *
+ * Note. The construction of the function F follows a "belt-and-
+ * suspenders" approach. The iterates U_i are computed recursively to
+ * remove a degree of parallelism from an opponent; they are exclusive-
+ * ored together to reduce concerns about the recursion degenerating
+ * into a small set of values.
+ *
+ */
+
+ /* If hash_block_size is provided, hash password in advance. */
+ if (hash_block_size > 0 && Plen > hash_block_size) {
+ if (hash_buf(P, Plen, P_hash, hLen, hash))
+ return -EINVAL;
+
+ if (crypt_hmac_init(&hmac, hash, P_hash, hLen))
+ return -EINVAL;
+ crypt_backend_memzero(P_hash, sizeof(P_hash));
+ } else {
+ if (crypt_hmac_init(&hmac, hash, P, Plen))
+ return -EINVAL;
+ }
+
+ for (i = 1; (unsigned int) i <= l; i++) {
+ memset(T, 0, hLen);
+
+ for (u = 1; u <= c ; u++) {
+ if (u == 1) {
+ memcpy(tmp, S, Slen);
+ tmp[Slen + 0] = (i & 0xff000000) >> 24;
+ tmp[Slen + 1] = (i & 0x00ff0000) >> 16;
+ tmp[Slen + 2] = (i & 0x0000ff00) >> 8;
+ tmp[Slen + 3] = (i & 0x000000ff) >> 0;
+
+ if (crypt_hmac_write(hmac, tmp, tmplen))
+ goto out;
+ } else {
+ if (crypt_hmac_write(hmac, U, hLen))
+ goto out;
+ }
+
+ if (crypt_hmac_final(hmac, U, hLen))
+ goto out;
+
+ for (k = 0; (unsigned int) k < hLen; k++)
+ T[k] ^= U[k];
+ }
+
+ memcpy(DK + (i - 1) * hLen, T, (unsigned int) i == l ? r : hLen);
+ }
+ rc = 0;
+out:
+ crypt_hmac_destroy(hmac);
+ crypt_backend_memzero(U, sizeof(U));
+ crypt_backend_memzero(T, sizeof(T));
+ crypt_backend_memzero(tmp, tmplen);
+
+ return rc;
+}
diff --git a/lib/crypto_backend/pbkdf_check.c b/lib/crypto_backend/pbkdf_check.c
new file mode 100644
index 0000000..53a2da9
--- /dev/null
+++ b/lib/crypto_backend/pbkdf_check.c
@@ -0,0 +1,443 @@
+/*
+ * PBKDF performance check
+ * Copyright (C) 2012-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2012-2023 Milan Broz
+ * Copyright (C) 2016-2020 Ondrej Mosnacek
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+#include <limits.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include "crypto_backend.h"
+
+#ifndef CLOCK_MONOTONIC_RAW
+#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
+#endif
+
+#define BENCH_MIN_MS 250
+#define BENCH_MIN_MS_FAST 10
+#define BENCH_PERCENT_ATLEAST 95
+#define BENCH_PERCENT_ATMOST 110
+#define BENCH_SAMPLES_FAST 3
+#define BENCH_SAMPLES_SLOW 1
+
+/* These PBKDF2 limits must be never violated */
+int crypt_pbkdf_get_limits(const char *kdf, struct crypt_pbkdf_limits *limits)
+{
+ if (!kdf || !limits)
+ return -EINVAL;
+
+ if (!strcmp(kdf, "pbkdf2")) {
+ limits->min_iterations = 1000; /* recommendation in NIST SP 800-132 */
+ limits->max_iterations = UINT32_MAX;
+ limits->min_memory = 0; /* N/A */
+ limits->min_bench_memory=0; /* N/A */
+ limits->max_memory = 0; /* N/A */
+ limits->min_parallel = 0; /* N/A */
+ limits->max_parallel = 0; /* N/A */
+ return 0;
+ } else if (!strcmp(kdf, "argon2i") || !strcmp(kdf, "argon2id")) {
+ limits->min_iterations = 4;
+ limits->max_iterations = UINT32_MAX;
+ limits->min_memory = 32; /* hard limit */
+ limits->min_bench_memory=64*1024; /* 64 MiB minimum for benchmark */
+ limits->max_memory = 4*1024*1024; /* 4GiB */
+ limits->min_parallel = 1;
+ limits->max_parallel = 4;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static long time_ms(struct rusage *start, struct rusage *end)
+{
+ int count_kernel_time = 0;
+ long ms;
+
+ if (crypt_backend_flags() & CRYPT_BACKEND_KERNEL)
+ count_kernel_time = 1;
+
+ /*
+ * If there is no self usage info, count system time.
+ * This seem like getrusage() bug in some hypervisors...
+ */
+ if (!end->ru_utime.tv_sec && !start->ru_utime.tv_sec &&
+ !end->ru_utime.tv_usec && !start->ru_utime.tv_usec)
+ count_kernel_time = 1;
+
+ ms = (end->ru_utime.tv_sec - start->ru_utime.tv_sec) * 1000;
+ ms += (end->ru_utime.tv_usec - start->ru_utime.tv_usec) / 1000;
+
+ if (count_kernel_time) {
+ ms += (end->ru_stime.tv_sec - start->ru_stime.tv_sec) * 1000;
+ ms += (end->ru_stime.tv_usec - start->ru_stime.tv_usec) / 1000;
+ }
+
+ return ms;
+}
+
+static long timespec_ms(struct timespec *start, struct timespec *end)
+{
+ return (end->tv_sec - start->tv_sec) * 1000 +
+ (end->tv_nsec - start->tv_nsec) / (1000 * 1000);
+}
+
+static int measure_argon2(const char *kdf, const char *password, size_t password_length,
+ const char *salt, size_t salt_length,
+ char *key, size_t key_length,
+ uint32_t t_cost, uint32_t m_cost, uint32_t parallel,
+ size_t samples, long ms_atleast, long *out_ms)
+{
+ long ms, ms_min = LONG_MAX;
+ int r;
+ size_t i;
+
+ for (i = 0; i < samples; i++) {
+ struct timespec tstart, tend;
+
+ /*
+ * NOTE: We must use clock_gettime here, because Argon2 can run over
+ * multiple threads, and thus we care about real time, not CPU time!
+ */
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &tstart) < 0)
+ return -EINVAL;
+
+ r = crypt_pbkdf(kdf, NULL, password, password_length, salt,
+ salt_length, key, key_length, t_cost, m_cost, parallel);
+ if (r < 0)
+ return r;
+
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &tend) < 0)
+ return -EINVAL;
+
+ ms = timespec_ms(&tstart, &tend);
+ if (ms < 0)
+ return -EINVAL;
+
+ if (ms < ms_atleast) {
+ /* early exit */
+ ms_min = ms;
+ break;
+ }
+ if (ms < ms_min) {
+ ms_min = ms;
+ }
+ }
+ *out_ms = ms_min;
+ return 0;
+}
+
+#define CONTINUE 0
+#define FINAL 1
+static int next_argon2_params(uint32_t *t_cost, uint32_t *m_cost,
+ uint32_t min_t_cost, uint32_t min_m_cost,
+ uint32_t max_m_cost, long ms, uint32_t target_ms)
+{
+ uint32_t old_t_cost, old_m_cost, new_t_cost, new_m_cost;
+ uint64_t num, denom;
+
+ old_t_cost = *t_cost;
+ old_m_cost = *m_cost;
+
+ if ((uint32_t)ms > target_ms) {
+ /* decreasing, first try to lower t_cost, then m_cost */
+ num = (uint64_t)*t_cost * (uint64_t)target_ms;
+ denom = (uint64_t)ms;
+ new_t_cost = (uint32_t)(num / denom);
+ if (new_t_cost < min_t_cost) {
+ num = (uint64_t)*t_cost * (uint64_t)*m_cost *
+ (uint64_t)target_ms;
+ denom = (uint64_t)min_t_cost * (uint64_t)ms;
+ *t_cost = min_t_cost;
+ *m_cost = (uint32_t)(num / denom);
+ if (*m_cost < min_m_cost) {
+ *m_cost = min_m_cost;
+ return FINAL;
+ }
+ } else {
+ *t_cost = new_t_cost;
+ }
+ } else {
+ /* increasing, first try to increase m_cost, then t_cost */
+ num = (uint64_t)*m_cost * (uint64_t)target_ms;
+ denom = (uint64_t)ms;
+ new_m_cost = (uint32_t)(num / denom);
+ if (new_m_cost > max_m_cost) {
+ num = (uint64_t)*t_cost * (uint64_t)*m_cost *
+ (uint64_t)target_ms;
+ denom = (uint64_t)max_m_cost * (uint64_t)ms;
+ *t_cost = (uint32_t)(num / denom);
+ *m_cost = max_m_cost;
+ if (*t_cost <= min_t_cost) {
+ *t_cost = min_t_cost;
+ return FINAL;
+ }
+ } else if (new_m_cost < min_m_cost) {
+ *m_cost = min_m_cost;
+ return FINAL;
+ } else {
+ *m_cost = new_m_cost;
+ }
+ }
+
+ /* do not continue if it is the same as in the previous run */
+ if (old_t_cost == *t_cost && old_m_cost == *m_cost)
+ return FINAL;
+
+ return CONTINUE;
+}
+
+static int crypt_argon2_check(const char *kdf, const char *password,
+ size_t password_length, const char *salt,
+ size_t salt_length, size_t key_length,
+ uint32_t min_t_cost, uint32_t min_m_cost, uint32_t max_m_cost,
+ uint32_t parallel, uint32_t target_ms,
+ uint32_t *out_t_cost, uint32_t *out_m_cost,
+ int (*progress)(uint32_t time_ms, void *usrptr),
+ void *usrptr)
+{
+ int r = 0;
+ char *key = NULL;
+ uint32_t t_cost, m_cost;
+ long ms;
+ long ms_atleast = (long)target_ms * BENCH_PERCENT_ATLEAST / 100;
+ long ms_atmost = (long)target_ms * BENCH_PERCENT_ATMOST / 100;
+
+ if (key_length <= 0 || target_ms <= 0)
+ return -EINVAL;
+
+ if (min_m_cost < (parallel * 8))
+ min_m_cost = parallel * 8;
+
+ if (max_m_cost < min_m_cost)
+ return -EINVAL;
+
+ key = malloc(key_length);
+ if (!key)
+ return -ENOMEM;
+
+ t_cost = min_t_cost;
+ m_cost = min_m_cost;
+
+ /* 1. Find some small parameters, s. t. ms >= BENCH_MIN_MS: */
+ while (1) {
+ r = measure_argon2(kdf, password, password_length, salt, salt_length,
+ key, key_length, t_cost, m_cost, parallel,
+ BENCH_SAMPLES_FAST, BENCH_MIN_MS, &ms);
+ if (!r) {
+ /* Update parameters to actual measurement */
+ *out_t_cost = t_cost;
+ *out_m_cost = m_cost;
+ if (progress && progress((uint32_t)ms, usrptr))
+ r = -EINTR;
+ }
+
+ if (r < 0)
+ goto out;
+
+ if (ms >= BENCH_MIN_MS)
+ break;
+
+ if (m_cost == max_m_cost) {
+ if (ms < BENCH_MIN_MS_FAST)
+ t_cost *= 16;
+ else {
+ uint32_t new = (t_cost * BENCH_MIN_MS) / (uint32_t)ms;
+ if (new == t_cost)
+ break;
+
+ t_cost = new;
+ }
+ } else {
+ if (ms < BENCH_MIN_MS_FAST)
+ m_cost *= 16;
+ else {
+ uint32_t new = (m_cost * BENCH_MIN_MS) / (uint32_t)ms;
+ if (new == m_cost)
+ break;
+
+ m_cost = new;
+ }
+ if (m_cost > max_m_cost) {
+ m_cost = max_m_cost;
+ }
+ }
+ }
+ /*
+ * 2. Use the params obtained in (1.) to estimate the target params.
+ * 3. Then repeatedly measure the candidate params and if they fall out of
+ * the acceptance range (+-5 %), try to improve the estimate:
+ */
+ do {
+ if (next_argon2_params(&t_cost, &m_cost, min_t_cost, min_m_cost,
+ max_m_cost, ms, target_ms)) {
+ /* Update parameters to final computation */
+ *out_t_cost = t_cost;
+ *out_m_cost = m_cost;
+ break;
+ }
+
+ r = measure_argon2(kdf, password, password_length, salt, salt_length,
+ key, key_length, t_cost, m_cost, parallel,
+ BENCH_SAMPLES_SLOW, ms_atleast, &ms);
+
+ if (!r) {
+ /* Update parameters to actual measurement */
+ *out_t_cost = t_cost;
+ *out_m_cost = m_cost;
+ if (progress && progress((uint32_t)ms, usrptr))
+ r = -EINTR;
+ }
+
+ if (r < 0)
+ break;
+
+ } while (ms < ms_atleast || ms > ms_atmost);
+out:
+ if (key) {
+ crypt_backend_memzero(key, key_length);
+ free(key);
+ }
+ return r;
+}
+
+/* This code benchmarks PBKDF and returns iterations/second using specified hash */
+static int crypt_pbkdf_check(const char *kdf, const char *hash,
+ const char *password, size_t password_length,
+ const char *salt, size_t salt_length,
+ size_t key_length, uint32_t *iter_secs, uint32_t target_ms,
+ int (*progress)(uint32_t time_ms, void *usrptr), void *usrptr)
+
+{
+ struct rusage rstart, rend;
+ int r = 0, step = 0;
+ long ms = 0;
+ char *key = NULL;
+ uint32_t iterations;
+ double PBKDF2_temp;
+
+ if (!kdf || !hash || key_length <= 0)
+ return -EINVAL;
+
+ key = malloc(key_length);
+ if (!key)
+ return -ENOMEM;
+
+ *iter_secs = 0;
+ iterations = 1 << 15;
+ while (1) {
+ if (getrusage(RUSAGE_SELF, &rstart) < 0) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = crypt_pbkdf(kdf, hash, password, password_length, salt,
+ salt_length, key, key_length, iterations, 0, 0);
+
+ if (r < 0)
+ goto out;
+
+ if (getrusage(RUSAGE_SELF, &rend) < 0) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ ms = time_ms(&rstart, &rend);
+ if (ms) {
+ PBKDF2_temp = (double)iterations * target_ms / ms;
+ if (PBKDF2_temp > UINT32_MAX) {
+ r = -EINVAL;
+ goto out;
+ }
+ *iter_secs = (uint32_t)PBKDF2_temp;
+ }
+
+ if (progress && progress((uint32_t)ms, usrptr)) {
+ r = -EINTR;
+ goto out;
+ }
+
+ if (ms > 500)
+ break;
+
+ if (ms <= 62)
+ iterations <<= 4;
+ else if (ms <= 125)
+ iterations <<= 3;
+ else if (ms <= 250)
+ iterations <<= 2;
+ else
+ iterations <<= 1;
+
+ if (++step > 10 || !iterations) {
+ r = -EINVAL;
+ goto out;
+ }
+ }
+out:
+ if (key) {
+ crypt_backend_memzero(key, key_length);
+ free(key);
+ }
+ return r;
+}
+
+int crypt_pbkdf_perf(const char *kdf, const char *hash,
+ const char *password, size_t password_size,
+ const char *salt, size_t salt_size,
+ size_t volume_key_size, uint32_t time_ms,
+ uint32_t max_memory_kb, uint32_t parallel_threads,
+ uint32_t *iterations_out, uint32_t *memory_out,
+ int (*progress)(uint32_t time_ms, void *usrptr), void *usrptr)
+{
+ struct crypt_pbkdf_limits pbkdf_limits;
+ int r = -EINVAL;
+ uint32_t min_memory;
+
+ if (!kdf || !iterations_out || !memory_out)
+ return -EINVAL;
+
+ r = crypt_pbkdf_get_limits(kdf, &pbkdf_limits);
+ if (r < 0)
+ return r;
+
+ min_memory = pbkdf_limits.min_bench_memory;
+ if (min_memory > max_memory_kb)
+ min_memory = max_memory_kb;
+
+ *memory_out = 0;
+ *iterations_out = 0;
+
+ if (!strcmp(kdf, "pbkdf2"))
+ r = crypt_pbkdf_check(kdf, hash, password, password_size,
+ salt, salt_size, volume_key_size,
+ iterations_out, time_ms, progress, usrptr);
+
+ else if (!strncmp(kdf, "argon2", 6))
+ r = crypt_argon2_check(kdf, password, password_size,
+ salt, salt_size, volume_key_size,
+ pbkdf_limits.min_iterations,
+ min_memory,
+ max_memory_kb,
+ parallel_threads, time_ms, iterations_out,
+ memory_out, progress, usrptr);
+ return r;
+}
diff --git a/lib/crypto_backend/utf8.c b/lib/crypto_backend/utf8.c
new file mode 100644
index 0000000..24e0d8d
--- /dev/null
+++ b/lib/crypto_backend/utf8.c
@@ -0,0 +1,288 @@
+/*
+ * UTF8/16 helpers, copied and adapted from systemd project.
+ *
+ * Copyright (C) 2010 Lennart Poettering
+ *
+ * cryptsetup related changes
+ * Copyright (C) 2021-2023 Vojtech Trefny
+
+ * Parts of the original systemd implementation are based on the GLIB utf8
+ * validation functions.
+ * gutf8.c - Operations on UTF-8 strings.
+ *
+ * Copyright (C) 1999 Tom Tromey
+ * Copyright (C) 2000 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <errno.h>
+#include <endian.h>
+
+#include "crypto_backend.h"
+
+static inline bool utf16_is_surrogate(char16_t c)
+{
+ return c >= 0xd800U && c <= 0xdfffU;
+}
+
+static inline bool utf16_is_trailing_surrogate(char16_t c)
+{
+ return c >= 0xdc00U && c <= 0xdfffU;
+}
+
+static inline char32_t utf16_surrogate_pair_to_unichar(char16_t lead, char16_t trail)
+{
+ return ((((char32_t) lead - 0xd800U) << 10) + ((char32_t) trail - 0xdc00U) + 0x10000U);
+}
+
+/**
+ * utf8_encode_unichar() - Encode single UCS-4 character as UTF-8
+ * @out_utf8: output buffer of at least 4 bytes or NULL
+ * @g: UCS-4 character to encode
+ *
+ * This encodes a single UCS-4 character as UTF-8 and writes it into @out_utf8.
+ * The length of the character is returned. It is not zero-terminated! If the
+ * output buffer is NULL, only the length is returned.
+ *
+ * Returns: The length in bytes that the UTF-8 representation does or would
+ * occupy.
+ */
+static size_t utf8_encode_unichar(char *out_utf8, char32_t g)
+{
+ if (g < (1 << 7)) {
+ if (out_utf8)
+ out_utf8[0] = g & 0x7f;
+ return 1;
+ } else if (g < (1 << 11)) {
+ if (out_utf8) {
+ out_utf8[0] = 0xc0 | ((g >> 6) & 0x1f);
+ out_utf8[1] = 0x80 | (g & 0x3f);
+ }
+ return 2;
+ } else if (g < (1 << 16)) {
+ if (out_utf8) {
+ out_utf8[0] = 0xe0 | ((g >> 12) & 0x0f);
+ out_utf8[1] = 0x80 | ((g >> 6) & 0x3f);
+ out_utf8[2] = 0x80 | (g & 0x3f);
+ }
+ return 3;
+ } else if (g < (1 << 21)) {
+ if (out_utf8) {
+ out_utf8[0] = 0xf0 | ((g >> 18) & 0x07);
+ out_utf8[1] = 0x80 | ((g >> 12) & 0x3f);
+ out_utf8[2] = 0x80 | ((g >> 6) & 0x3f);
+ out_utf8[3] = 0x80 | (g & 0x3f);
+ }
+ return 4;
+ }
+
+ return 0;
+}
+
+/**
+ * crypt_utf16_to_utf8()
+ * @out: output buffer, should be 2 * @length + 1 long
+ * @s: string to convert
+ * @length: length of @s in bytes
+ *
+ * Converts a UTF16LE encoded string to a UTF8 encoded string.
+ *
+ * Returns: 0 on success, negative errno otherwise
+ */
+int crypt_utf16_to_utf8(char **out, const char16_t *s, size_t length /* bytes! */)
+{
+ const uint8_t *f;
+ char *t;
+
+ assert(s);
+ assert(out);
+ assert(*out);
+
+ /* Input length is in bytes, i.e. the shortest possible character takes 2 bytes. Each unicode character may
+ * take up to 4 bytes in UTF-8. Let's also account for a trailing NUL byte. */
+ if (length * 2 < length)
+ return -EOVERFLOW; /* overflow */
+
+ f = (const uint8_t*) s;
+ t = *out;
+
+ while (f + 1 < (const uint8_t*) s + length) {
+ char16_t w1, w2;
+
+ /* see RFC 2781 section 2.2 */
+
+ w1 = f[1] << 8 | f[0];
+ f += 2;
+
+ if (!utf16_is_surrogate(w1)) {
+ t += utf8_encode_unichar(t, w1);
+ continue;
+ }
+
+ if (utf16_is_trailing_surrogate(w1))
+ continue; /* spurious trailing surrogate, ignore */
+
+ if (f + 1 >= (const uint8_t*) s + length)
+ break;
+
+ w2 = f[1] << 8 | f[0];
+ f += 2;
+
+ if (!utf16_is_trailing_surrogate(w2)) {
+ f -= 2;
+ continue; /* surrogate missing its trailing surrogate, ignore */
+ }
+
+ t += utf8_encode_unichar(t, utf16_surrogate_pair_to_unichar(w1, w2));
+ }
+
+ *t = 0;
+ return 0;
+}
+
+/* count of characters used to encode one unicode char */
+static size_t utf8_encoded_expected_len(uint8_t c)
+{
+ if (c < 0x80)
+ return 1;
+ if ((c & 0xe0) == 0xc0)
+ return 2;
+ if ((c & 0xf0) == 0xe0)
+ return 3;
+ if ((c & 0xf8) == 0xf0)
+ return 4;
+ if ((c & 0xfc) == 0xf8)
+ return 5;
+ if ((c & 0xfe) == 0xfc)
+ return 6;
+
+ return 0;
+}
+
+/* decode one unicode char */
+static int utf8_encoded_to_unichar(const char *str, char32_t *ret_unichar)
+{
+ char32_t unichar;
+ size_t len, i;
+
+ assert(str);
+
+ len = utf8_encoded_expected_len(str[0]);
+
+ switch (len) {
+ case 1:
+ *ret_unichar = (char32_t)str[0];
+ return 0;
+ case 2:
+ unichar = str[0] & 0x1f;
+ break;
+ case 3:
+ unichar = (char32_t)str[0] & 0x0f;
+ break;
+ case 4:
+ unichar = (char32_t)str[0] & 0x07;
+ break;
+ case 5:
+ unichar = (char32_t)str[0] & 0x03;
+ break;
+ case 6:
+ unichar = (char32_t)str[0] & 0x01;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 1; i < len; i++) {
+ if (((char32_t)str[i] & 0xc0) != 0x80)
+ return -EINVAL;
+
+ unichar <<= 6;
+ unichar |= (char32_t)str[i] & 0x3f;
+ }
+
+ *ret_unichar = unichar;
+
+ return 0;
+}
+
+static size_t utf16_encode_unichar(char16_t *out, char32_t c)
+{
+ /* Note that this encodes as little-endian. */
+
+ switch (c) {
+
+ case 0 ... 0xd7ffU:
+ case 0xe000U ... 0xffffU:
+ out[0] = htole16(c);
+ return 1;
+
+ case 0x10000U ... 0x10ffffU:
+ c -= 0x10000U;
+ out[0] = htole16((c >> 10) + 0xd800U);
+ out[1] = htole16((c & 0x3ffU) + 0xdc00U);
+ return 2;
+
+ default: /* A surrogate (invalid) */
+ return 0;
+ }
+}
+
+/**
+ * crypt_utf8_to_utf16()
+ * @out: output buffer, should be @length + 1 long
+ * @s: string to convert
+ * @length: length of @s in bytes
+ *
+ * Converts a UTF8 encoded string to a UTF16LE encoded string.
+ *
+ * Returns: 0 on success, negative errno otherwise
+ */
+int crypt_utf8_to_utf16(char16_t **out, const char *s, size_t length)
+{
+ char16_t *p;
+ size_t i;
+ int r;
+
+ assert(s);
+
+ p = *out;
+
+ for (i = 0; i < length;) {
+ char32_t unichar;
+ size_t e;
+
+ e = utf8_encoded_expected_len(s[i]);
+ if (e <= 1) /* Invalid and single byte characters are copied as they are */
+ goto copy;
+
+ if (i + e > length) /* sequence longer than input buffer, then copy as-is */
+ goto copy;
+
+ r = utf8_encoded_to_unichar(s + i, &unichar);
+ if (r < 0) /* sequence invalid, then copy as-is */
+ goto copy;
+
+ p += utf16_encode_unichar(p, unichar);
+ i += e;
+ continue;
+
+ copy:
+ *(p++) = htole16(s[i++]);
+ }
+
+ *p = 0;
+ return 0;
+}
diff --git a/lib/fvault2/fvault2.c b/lib/fvault2/fvault2.c
new file mode 100644
index 0000000..0b0c9ce
--- /dev/null
+++ b/lib/fvault2/fvault2.c
@@ -0,0 +1,1057 @@
+/*
+ * FVAULT2 (FileVault2-compatible) volume handling
+ *
+ * Copyright (C) 2021-2022 Pavel Tobias
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <regex.h>
+#include <stdio.h>
+#include <uuid/uuid.h>
+
+#include "internal.h"
+#include "fvault2.h"
+
+/* Core Storage signature/magic; "CS" big-endian */
+#define FVAULT2_CORE_STORAGE_MAGIC 0x4353
+
+/* size of the physical volume header in bytes */
+#define FVAULT2_VOL_HEADER_SIZE 512
+
+/* size of a single metadata block in bytes */
+#define FVAULT2_MD_BLOCK_SIZE 8192
+
+/* maximal offset to read metadata block */
+#define FVAULT2_MAX_OFF 1024*1024*1024
+
+/* encrypted metadata parsing progress flags (see _read_encrypted_metadata) */
+#define FVAULT2_ENC_MD_PARSED_0x0019 0b001
+#define FVAULT2_ENC_MD_PARSED_0x001A 0b010
+#define FVAULT2_ENC_MD_PARSED_0x0305 0b100
+#define FVAULT2_ENC_MD_PARSED_NONE 0b000
+#define FVAULT2_ENC_MD_PARSED_ALL 0b111
+
+/* sizes of decoded PassphraseWrappedKEKStruct and KEKWrappedVolumeKeyStruct */
+#define FVAULT2_PWK_SIZE 284
+#define FVAULT2_KWVK_SIZE 256
+
+/* size of an AES-128 key */
+#define FVAULT2_AES_KEY_SIZE 16
+
+/* size of the volume key and the encrypted metadata decryption key */
+#define FVAULT2_XTS_KEY_SIZE (FVAULT2_AES_KEY_SIZE * 2)
+
+/* size of an XTS tweak value */
+#define FVAULT2_XTS_TWEAK_SIZE 16
+
+/* size of a binary representation of a UUID */
+#define FVAULT2_UUID_BIN_SIZE 16
+
+struct crc32_checksum {
+ uint32_t value;
+ uint32_t seed;
+} __attribute__((packed));
+
+struct volume_header {
+ struct crc32_checksum checksum;
+ uint16_t version;
+ uint16_t block_type;
+ uint8_t unknown1[52];
+ uint64_t ph_vol_size;
+ uint8_t unknown2[16];
+ uint16_t magic;
+ uint32_t checksum_algo;
+ uint8_t unknown3[2];
+ uint32_t block_size;
+ uint32_t metadata_size;
+ uint64_t disklbl_blkoff;
+ uint64_t other_md_blkoffs[3];
+ uint8_t unknown4[32];
+ uint32_t key_data_size;
+ uint32_t cipher;
+ uint8_t key_data[FVAULT2_AES_KEY_SIZE];
+ uint8_t unknown5[112];
+ uint8_t ph_vol_uuid[FVAULT2_UUID_BIN_SIZE];
+ uint8_t unknown6[192];
+} __attribute__((packed));
+
+struct volume_groups_descriptor {
+ uint8_t unknown1[8];
+ uint64_t enc_md_blocks_n;
+ uint8_t unknown2[16];
+ uint64_t enc_md_blkoff;
+} __attribute__((packed));
+
+struct metadata_block_header {
+ struct crc32_checksum checksum;
+ uint16_t version;
+ uint16_t block_type;
+ uint8_t unknown1[20];
+ uint64_t block_num;
+ uint8_t unknown2[8];
+ uint32_t block_size;
+ uint8_t unknown3[12];
+} __attribute__((packed));
+
+struct metadata_block_0x0011 {
+ struct metadata_block_header header;
+ uint32_t md_size;
+ uint8_t unknown1[4];
+ struct crc32_checksum checksum;
+ uint8_t unknown2[140];
+ uint32_t vol_gr_des_off;
+} __attribute__((packed));
+
+struct metadata_block_0x0019 {
+ struct metadata_block_header header;
+ uint8_t unknown1[40];
+ uint32_t xml_comp_size;
+ uint32_t xml_uncomp_size;
+ uint32_t xml_off;
+ uint32_t xml_size;
+} __attribute__((packed));
+
+struct metadata_block_0x001a {
+ struct metadata_block_header header;
+ uint8_t unknown1[64];
+ uint32_t xml_off;
+ uint32_t xml_size;
+} __attribute__((packed));
+
+struct metadata_block_0x0305 {
+ struct metadata_block_header header;
+ uint32_t entries_n;
+ uint8_t unknown1[36];
+ uint32_t log_vol_blkoff;
+} __attribute__((packed));
+
+struct passphrase_wrapped_kek {
+ uint32_t pbkdf2_salt_type;
+ uint32_t pbkdf2_salt_size;
+ uint8_t pbkdf2_salt[FVAULT2_PBKDF2_SALT_SIZE];
+ uint32_t wrapped_kek_type;
+ uint32_t wrapped_kek_size;
+ uint8_t wrapped_kek[FVAULT2_WRAPPED_KEY_SIZE];
+ uint8_t unknown1[112];
+ uint32_t pbkdf2_iters;
+} __attribute__((packed));
+
+struct kek_wrapped_volume_key {
+ uint32_t wrapped_vk_type;
+ uint32_t wrapped_vk_size;
+ uint8_t wrapped_vk[FVAULT2_WRAPPED_KEY_SIZE];
+} __attribute__((packed));
+
+/**
+ * Test whether all bytes of a chunk of memory are equal to a constant value.
+ * @param[in] value the value all bytes should be equal to
+ * @param[in] data the tested chunk of memory
+ * @param[in] data_size byte-size of the chunk of memory
+ */
+static bool _filled_with(
+ uint8_t value,
+ const void *data,
+ size_t data_size)
+{
+ const uint8_t *data_bytes = data;
+ size_t i;
+
+ for (i = 0; i < data_size; i++)
+ if (data_bytes[i] != value)
+ return false;
+
+ return true;
+}
+
+/**
+ * Assert the validity of the CRC checksum of a chunk of memory.
+ * @param[in] data a chunk of memory starting with a crc32_checksum struct
+ * @param[in] data_size the size of the chunk of memory in bytes
+ */
+static int _check_crc(
+ const void *data,
+ size_t data_size)
+{
+ const size_t crc_size = sizeof(struct crc32_checksum);
+ uint32_t seed;
+ uint32_t value;
+
+ assert(data_size >= crc_size);
+
+ value = le32_to_cpu(((const struct crc32_checksum *)data)->value);
+ seed = le32_to_cpu(((const struct crc32_checksum *)data)->seed);
+ if (seed != 0xffffffff)
+ return -EINVAL;
+
+ if (crypt_crc32c(seed, (const uint8_t *)data + crc_size,
+ data_size - crc_size) != value)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * Unwrap an AES-wrapped key.
+ * @param[in] kek the KEK with which the key has been wrapped
+ * @param[in] kek_size the size of the KEK in bytes
+ * @param[in] key_wrapped the wrapped key
+ * @param[in] key_wrapped_size the size of the wrapped key in bytes
+ * @param[out] key_buf key an output buffer for the unwrapped key
+ * @param[in] key_buf_size the size of the output buffer in bytes
+ */
+static int _unwrap_key(
+ const void *kek,
+ size_t kek_size,
+ const void *key_wrapped,
+ size_t key_wrapped_size,
+ void *key_buf,
+ size_t key_buf_size)
+{
+ /* Algorithm and notation taken from NIST Special Publication 800-38F:
+ https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-38F.pdf
+
+ This implementation supports only 128-bit KEKs and wrapped keys. */
+
+ int r = 0;
+ struct crypt_cipher *cipher = NULL;
+ void *cipher_in = NULL;
+ void *cipher_out = NULL;
+ uint64_t a;
+ uint64_t r2;
+ uint64_t r3;
+ uint64_t t;
+ uint64_t r2_prev;
+
+ assert(kek_size == 16 && key_wrapped_size == 24 && key_buf_size == 16);
+
+ r = crypt_cipher_init(&cipher, "aes", "ecb", kek, kek_size);
+ if (r < 0)
+ goto out;
+
+ cipher_in = malloc(16);
+ if (cipher_in == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ cipher_out = malloc(16);
+ if (cipher_out == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ /* CHAPTER 6.1, ALGORITHM 2: W^-1(C) */
+
+ /* initialize variables */
+ a = ((const uint64_t *)key_wrapped)[0]; /* A = C_1 (see step 1c) */
+ r2 = ((const uint64_t *)key_wrapped)[1]; /* R_1 = C_2 (see step 1d) */
+ r3 = ((const uint64_t *)key_wrapped)[2]; /* R_2 = C_3 (see step 1d) */
+
+ /* calculate intermediate values for each t = s, ..., 1 (see step 2),
+ where s = 6 * (n - 1) (see step 1a) */
+ for (t = 6 * (3 - 1); t > 0; t--) {
+ /* store current R2 for later assignment (see step 2c) */
+ r2_prev = r2;
+
+ /* prepare input for CIPH^{-1}_K (see steps 2a, 2b) */
+ ((uint64_t *)cipher_in)[0] = a ^ cpu_to_be64(t);
+ ((uint64_t *)cipher_in)[1] = r3;
+
+ /* A||R2 = CIPH^{-1}_K(...) (see steps 2a, 2b) */
+ r = crypt_cipher_decrypt(cipher, cipher_in, cipher_out, 16, NULL, 0);
+ if (r < 0)
+ goto out;
+ a = ((uint64_t *)cipher_out)[0];
+ r2 = ((uint64_t *)cipher_out)[1];
+
+ /* assign previous R2 (see step 2c) */
+ r3 = r2_prev;
+ }
+
+ /* note that A||R_1||R_2 holds the result S (see step 3) */
+
+ /* CHAPTER 6.2, ALGORITHM 4: KW-AD(C) */
+
+ /* check whether MSB_{64}(S) (= A) matches ICV1 (see step 3) */
+ if (a != 0xA6A6A6A6A6A6A6A6) {
+ r = -EPERM;
+ goto out;
+ }
+
+ /* return LSB_{128}(S) (= R_1||R_2) (see step 4) */
+ ((uint64_t *)key_buf)[0] = r2;
+ ((uint64_t *)key_buf)[1] = r3;
+out:
+ free(cipher_in);
+ free(cipher_out);
+ if (cipher != NULL)
+ crypt_cipher_destroy(cipher);
+ return r;
+}
+
+/**
+ * Search XML plist data for a property and return its value.
+ * @param[in] xml a 0-terminated string containing the XML plist data
+ * @param[in] prop_key a 0-terminated string with the seeked property's key
+ * @param[in] prop_type a 0-terminated string with the seeked property's type
+ * @param[out] value a 0-terminated string with the found property's value
+ */
+static int _search_xml(
+ const char *xml,
+ const char *prop_key,
+ const char *prop_type,
+ char **value)
+{
+ int r = 0;
+ char *pattern = NULL;
+ bool regex_ready = false;
+ regex_t regex;
+ regmatch_t match[2];
+ const char *value_start;
+ size_t value_len;
+
+ if (asprintf(&pattern, "<key>%s</key><%s[^>]*>([^<]+)</%s>",
+ prop_key, prop_type, prop_type) < 0) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ if (regcomp(&regex, pattern, REG_EXTENDED) != 0) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ regex_ready = true;
+
+ if (regexec(&regex, xml, 2, match, 0) != 0) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ value_start = xml + match[1].rm_so;
+ value_len = match[1].rm_eo - match[1].rm_so;
+
+ *value = calloc(value_len + 1, 1);
+ if (*value == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(*value, value_start, value_len);
+out:
+ free(pattern);
+ if (regex_ready)
+ regfree(&regex);
+ return r;
+}
+
+/**
+ * Extract relevant info from a metadata block of type 0x0019.
+ * @param[in] md_block the pre-read and decrypted metadata block
+ * @param[out] pbkdf2_iters number of PBKDF2 iterations
+ * @param[out] pbkdf2_salt PBKDF2 salt (intermt. key derivation from passphrase)
+ * @param[out] wrapped_kek KEK AES-wrapped with passphrase-derived key
+ * @param[out] wrapped_vk volume key AES-wrapped with KEK
+ */
+static int _parse_metadata_block_0x0019(
+ const struct metadata_block_0x0019 *md_block,
+ uint32_t *pbkdf2_iters,
+ uint8_t *pbkdf2_salt,
+ uint8_t *wrapped_kek,
+ uint8_t *wrapped_vk)
+{
+ int r = 0;
+ char *xml = NULL;
+ char *pwk_base64 = NULL;
+ char *kwvk_base64 = NULL;
+ struct passphrase_wrapped_kek *pwk = NULL;
+ struct kek_wrapped_volume_key *kwvk = NULL;
+ size_t decoded_size;
+ uint32_t xml_off = le32_to_cpu(md_block->xml_off);
+ uint32_t xml_size = le32_to_cpu(md_block->xml_size);
+
+ if (xml_off + xml_size > FVAULT2_MD_BLOCK_SIZE)
+ return -EINVAL;
+
+ xml = strndup((const char *)md_block + xml_off, xml_size);
+ if (xml == NULL)
+ return -ENOMEM;
+
+ r = _search_xml(xml, "PassphraseWrappedKEKStruct", "data", &pwk_base64);
+ if (r < 0)
+ goto out;
+ r = crypt_base64_decode((char **)&pwk, &decoded_size, pwk_base64, strlen(pwk_base64));
+ if (r < 0)
+ goto out;
+ if (decoded_size != FVAULT2_PWK_SIZE) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = _search_xml(xml, "KEKWrappedVolumeKeyStruct", "data", &kwvk_base64);
+ if (r < 0)
+ goto out;
+ r = crypt_base64_decode((char **)&kwvk, &decoded_size, kwvk_base64, strlen(kwvk_base64));
+ if (r < 0)
+ goto out;
+ if (decoded_size != FVAULT2_KWVK_SIZE) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ *pbkdf2_iters = le32_to_cpu(pwk->pbkdf2_iters);
+ memcpy(pbkdf2_salt, pwk->pbkdf2_salt, FVAULT2_PBKDF2_SALT_SIZE);
+ memcpy(wrapped_kek, pwk->wrapped_kek, FVAULT2_WRAPPED_KEY_SIZE);
+ memcpy(wrapped_vk, kwvk->wrapped_vk, FVAULT2_WRAPPED_KEY_SIZE);
+out:
+ free(xml);
+ free(pwk_base64);
+ free(kwvk_base64);
+ free(pwk);
+ free(kwvk);
+ return r;
+}
+
+/**
+ * Validate a UUID string and reformat it to match system defaults.
+ * @param[in] uuid_in the original UUID string
+ * @param[out] uuid_out the reformatted UUID string
+ */
+static int _reformat_uuid(
+ const char *uuid_in,
+ char *uuid_out)
+{
+ uint8_t uuid_bin[FVAULT2_UUID_LEN];
+ int r;
+
+ r = uuid_parse(uuid_in, uuid_bin);
+ if (r < 0)
+ return -EINVAL;
+
+ uuid_unparse(uuid_bin, uuid_out);
+ return 0;
+}
+
+/**
+ * Extract relevant info from a metadata block of type 0x001A.
+ * @param[in] md_block the pre-read and decrypted metadata block
+ * @param[out] log_vol_size encrypted logical volume size in bytes
+ * @param[out] family_uuid logical volume family UUID
+ */
+static int _parse_metadata_block_0x001a(
+ const struct metadata_block_0x001a *md_block,
+ uint64_t *log_vol_size,
+ char *family_uuid)
+{
+ int r = 0;
+ char *xml = NULL;
+ char *log_vol_size_str = NULL;
+ char *family_uuid_str = NULL;
+ uint32_t xml_off = le32_to_cpu(md_block->xml_off);
+ uint32_t xml_size = le32_to_cpu(md_block->xml_size);
+
+ if (xml_off + xml_size > FVAULT2_MD_BLOCK_SIZE)
+ return -EINVAL;
+
+ xml = strndup((const char *)md_block + xml_off, xml_size);
+ if (xml == NULL)
+ return -ENOMEM;
+
+ r = _search_xml(xml, "com.apple.corestorage.lv.size", "integer", &log_vol_size_str);
+ if (r < 0)
+ goto out;
+ *log_vol_size = strtoull(log_vol_size_str, NULL, 16);
+ if (*log_vol_size == 0 || *log_vol_size == ULLONG_MAX) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = _search_xml(xml, "com.apple.corestorage.lv.familyUUID", "string", &family_uuid_str);
+ if (r < 0)
+ goto out;
+ r = _reformat_uuid(family_uuid_str, family_uuid);
+ if (r < 0)
+ goto out;
+out:
+ free(xml);
+ free(log_vol_size_str);
+ free(family_uuid_str);
+ return r;
+}
+
+/**
+ * Extract relevant info from a metadata block of type 0x0305.
+ * @param[in] md_block the pre-read and decrypted metadata block
+ * @param[out] log_vol_blkoff block-offset of the encrypted logical volume
+ */
+static int _parse_metadata_block_0x0305(
+ const struct metadata_block_0x0305 *md_block,
+ uint32_t *log_vol_blkoff)
+{
+ *log_vol_blkoff = le32_to_cpu(md_block->log_vol_blkoff);
+ return 0;
+}
+
+/**
+ * Extract relevant info from the physical volume header.
+ * @param[in] devfd opened device file descriptor
+ * @param[in] cd crypt_device passed into FVAULT2_read_metadata
+ * @param[out] block_size used to compute byte-offsets from block-offsets
+ * @param[out] disklbl_blkoff block-offset of the disk label block
+ * @param[out] ph_vol_uuid physical volume UUID
+ * @param[out] enc_md_key AES-XTS key used to decrypt the encrypted metadata
+ */
+static int _read_volume_header(
+ int devfd,
+ struct crypt_device *cd,
+ uint64_t *block_size,
+ uint64_t *disklbl_blkoff,
+ char *ph_vol_uuid,
+ struct volume_key **enc_md_key)
+{
+ int r = 0;
+ struct device *dev = crypt_metadata_device(cd);
+ struct volume_header *vol_header = NULL;
+
+ assert(sizeof(*vol_header) == FVAULT2_VOL_HEADER_SIZE);
+
+ vol_header = malloc(FVAULT2_VOL_HEADER_SIZE);
+ if (vol_header == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ log_dbg(cd, "Reading FVAULT2 volume header of size %u bytes.", FVAULT2_VOL_HEADER_SIZE);
+ if (read_blockwise(devfd, device_block_size(cd, dev),
+ device_alignment(dev), vol_header,
+ FVAULT2_VOL_HEADER_SIZE) != FVAULT2_VOL_HEADER_SIZE) {
+ log_err(cd, _("Could not read %u bytes of volume header."), FVAULT2_VOL_HEADER_SIZE);
+ r = -EIO;
+ goto out;
+ }
+
+ r = _check_crc(vol_header, FVAULT2_VOL_HEADER_SIZE);
+ if (r < 0) {
+ log_dbg(cd, "CRC mismatch.");
+ goto out;
+ }
+
+ if (le16_to_cpu(vol_header->version) != 1) {
+ log_err(cd, _("Unsupported FVAULT2 version %" PRIu16 "."),
+ le16_to_cpu(vol_header->version));
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (be16_to_cpu(vol_header->magic) != FVAULT2_CORE_STORAGE_MAGIC) {
+ log_dbg(cd, "Invalid Core Storage magic bytes.");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (le32_to_cpu(vol_header->key_data_size) != FVAULT2_AES_KEY_SIZE) {
+ log_dbg(cd, "Unsupported AES key size: %" PRIu32 " bytes.",
+ le32_to_cpu(vol_header->key_data_size));
+ r = -EINVAL;
+ goto out;
+ }
+
+ *enc_md_key = crypt_alloc_volume_key(FVAULT2_XTS_KEY_SIZE, NULL);
+ if (*enc_md_key == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ *block_size = le32_to_cpu(vol_header->block_size);
+ *disklbl_blkoff = le64_to_cpu(vol_header->disklbl_blkoff);
+ uuid_unparse(vol_header->ph_vol_uuid, ph_vol_uuid);
+ memcpy((*enc_md_key)->key, vol_header->key_data, FVAULT2_AES_KEY_SIZE);
+ memcpy((*enc_md_key)->key + FVAULT2_AES_KEY_SIZE,
+ vol_header->ph_vol_uuid, FVAULT2_AES_KEY_SIZE);
+out:
+ free(vol_header);
+ return r;
+}
+
+/**
+ * Extract info from the disk label block and the volume groups descriptor.
+ * @param[in] devfd opened device file descriptor
+ * @param[in] cd crypt_device passed into FVAULT2_read_metadata
+ * @param[in] block_size used to compute byte-offsets from block-offsets
+ * @param[in] disklbl_blkoff block-offset of the disk label block
+ * @param[out] enc_md_blkoff block-offset of the encrypted metadata
+ * @param[out] enc_md_blocks_n total count of encrypted metadata blocks
+ */
+static int _read_disklabel(
+ int devfd,
+ struct crypt_device *cd,
+ uint64_t block_size,
+ uint64_t disklbl_blkoff,
+ uint64_t *enc_md_blkoff,
+ uint64_t *enc_md_blocks_n)
+{
+ int r = 0;
+ uint64_t off;
+ ssize_t size;
+ void *md_block = NULL;
+ struct metadata_block_0x0011 *md_block_11;
+ struct volume_groups_descriptor *vol_gr_des = NULL;
+ struct device *dev = crypt_metadata_device(cd);
+
+ md_block = malloc(FVAULT2_MD_BLOCK_SIZE);
+ if (md_block == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ if (uint64_mult_overflow(&off, disklbl_blkoff, block_size) ||
+ off > FVAULT2_MAX_OFF) {
+ log_dbg(cd, "Device offset overflow.");
+ r = -EINVAL;
+ goto out;
+ }
+ size = FVAULT2_MD_BLOCK_SIZE;
+ log_dbg(cd, "Reading FVAULT2 disk label header of size %zu bytes.", size);
+ if (read_lseek_blockwise(devfd, device_block_size(cd, dev),
+ device_alignment(dev), md_block, size, off) != size) {
+ r = -EIO;
+ goto out;
+ }
+
+ r = _check_crc(md_block, FVAULT2_MD_BLOCK_SIZE);
+ if (r < 0) {
+ log_dbg(cd, "CRC mismatch.");
+ goto out;
+ }
+
+ vol_gr_des = malloc(sizeof(*vol_gr_des));
+ if (vol_gr_des == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ md_block_11 = md_block;
+ off += le32_to_cpu(md_block_11->vol_gr_des_off);
+ if (off > FVAULT2_MAX_OFF) {
+ log_dbg(cd, "Device offset overflow.");
+ r = -EINVAL;
+ goto out;
+ }
+ size = sizeof(struct volume_groups_descriptor);
+ log_dbg(cd, "Reading FVAULT2 volume groups descriptor of size %zu bytes.", size);
+ if (read_lseek_blockwise(devfd, device_block_size(cd, dev),
+ device_alignment(dev), vol_gr_des, size, off) != size) {
+ r = -EIO;
+ goto out;
+ }
+
+ *enc_md_blkoff = le64_to_cpu(vol_gr_des->enc_md_blkoff);
+ *enc_md_blocks_n = le64_to_cpu(vol_gr_des->enc_md_blocks_n);
+out:
+ free(md_block);
+ free(vol_gr_des);
+ return r;
+}
+
+/**
+ * Extract info from relevant encrypted metadata blocks.
+ * @param[in] devfd opened device file descriptor
+ * @param[in] cd crypt_device passed into FVAULT2_read_metadata
+ * @param[in] block_size used to compute byte-offsets from block-offsets
+ * @param[in] start_blkoff block-offset of the start of the encrypted metadata
+ * @param[in] blocks_n total count of encrypted metadata blocks
+ * @param[in] key AES-XTS key for decryption
+ * @param[out] params decryption parameters struct to fill
+ */
+static int _read_encrypted_metadata(
+ int devfd,
+ struct crypt_device *cd,
+ uint64_t block_size,
+ uint64_t start_blkoff,
+ uint64_t blocks_n,
+ const struct volume_key *key,
+ struct fvault2_params *params)
+{
+ int r = 0;
+ int status = FVAULT2_ENC_MD_PARSED_NONE;
+ struct device *dev = crypt_metadata_device(cd);
+ struct crypt_cipher *cipher = NULL;
+ void *tweak;
+ void *md_block_enc = NULL;
+ void *md_block = NULL;
+ struct metadata_block_header *md_block_header;
+ uint32_t log_vol_blkoff;
+ uint64_t i, start_off;
+ off_t off;
+ unsigned int block_type;
+
+ tweak = calloc(FVAULT2_XTS_TWEAK_SIZE, 1);
+ if (tweak == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ md_block_enc = malloc(FVAULT2_MD_BLOCK_SIZE);
+ if (md_block_enc == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ md_block = malloc(FVAULT2_MD_BLOCK_SIZE);
+ if (md_block == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ r = crypt_cipher_init(&cipher, "aes", "xts", key->key, FVAULT2_XTS_KEY_SIZE);
+ if (r < 0)
+ goto out;
+
+ if (uint64_mult_overflow(&start_off, start_blkoff, block_size) ||
+ start_off > FVAULT2_MAX_OFF) {
+ log_dbg(cd, "Device offset overflow.");
+ r = -EINVAL;
+ goto out;
+ }
+
+ log_dbg(cd, "Reading FVAULT2 encrypted metadata blocks.");
+ for (i = 0; i < blocks_n; i++) {
+ off = start_off + i * FVAULT2_MD_BLOCK_SIZE;
+ if (off > FVAULT2_MAX_OFF) {
+ log_dbg(cd, "Device offset overflow.");
+ r = -EINVAL;
+ goto out;
+ }
+ if (read_lseek_blockwise(devfd, device_block_size(cd, dev),
+ device_alignment(dev), md_block_enc,
+ FVAULT2_MD_BLOCK_SIZE, off)
+ != FVAULT2_MD_BLOCK_SIZE) {
+ r = -EIO;
+ goto out;
+ }
+
+ if (_filled_with(0, md_block_enc, FVAULT2_MD_BLOCK_SIZE))
+ break;
+
+ *(uint64_t *)tweak = cpu_to_le64(i);
+ r = crypt_cipher_decrypt(cipher, md_block_enc, md_block,
+ FVAULT2_MD_BLOCK_SIZE, tweak, FVAULT2_XTS_TWEAK_SIZE);
+ if (r < 0)
+ goto out;
+
+ r = _check_crc(md_block, FVAULT2_MD_BLOCK_SIZE);
+ if (r < 0) {
+ log_dbg(cd, "CRC mismatch.");
+ goto out;
+ }
+
+ md_block_header = md_block;
+ block_type = le16_to_cpu(md_block_header->block_type);
+ switch (block_type) {
+ case 0x0019:
+ log_dbg(cd, "Get FVAULT2 metadata block %" PRIu64 " type 0x0019.", i);
+ r = _parse_metadata_block_0x0019(md_block,
+ &params->pbkdf2_iters,
+ (uint8_t *)params->pbkdf2_salt,
+ (uint8_t *)params->wrapped_kek,
+ (uint8_t *)params->wrapped_vk);
+ if (r < 0)
+ goto out;
+ status |= FVAULT2_ENC_MD_PARSED_0x0019;
+ break;
+
+ case 0x001A:
+ log_dbg(cd, "Get FVAULT2 metadata block %" PRIu64 " type 0x001A.", i);
+ r = _parse_metadata_block_0x001a(md_block,
+ &params->log_vol_size,
+ params->family_uuid);
+ if (r < 0)
+ goto out;
+ status |= FVAULT2_ENC_MD_PARSED_0x001A;
+ break;
+
+ case 0x0305:
+ log_dbg(cd, "Get FVAULT2 metadata block %" PRIu64 " type 0x0305.", i);
+ r = _parse_metadata_block_0x0305(md_block,
+ &log_vol_blkoff);
+ if (r < 0)
+ goto out;
+ if (uint64_mult_overflow(&params->log_vol_off,
+ log_vol_blkoff, block_size)) {
+ log_dbg(cd, "Device offset overflow.");
+ r = -EINVAL;
+ goto out;
+ }
+ status |= FVAULT2_ENC_MD_PARSED_0x0305;
+ break;
+ }
+ }
+
+ if (status != FVAULT2_ENC_MD_PARSED_ALL) {
+ log_dbg(cd, "Necessary FVAULT2 metadata blocks not found.");
+ r = -EINVAL;
+ goto out;
+ }
+out:
+ free(tweak);
+ free(md_block_enc);
+ free(md_block);
+ if (cipher != NULL)
+ crypt_cipher_destroy(cipher);
+ return r;
+}
+
+/**
+ * Activate device.
+ * @param[in] cd crypt_device struct passed into FVAULT2_activate_by_*
+ * @param[in] name name of the mapped device
+ * @param[in] vol_key the pre-derived AES-XTS volume key
+ * @param[in] params logical volume decryption parameters
+ * @param[in] flags flags assigned to the crypt_dm_active_device struct
+ */
+static int _activate(
+ struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vol_key,
+ const struct fvault2_params *params,
+ uint32_t flags)
+{
+ int r = 0;
+ char *cipher = NULL;
+ struct crypt_dm_active_device dm_dev = {
+ .flags = flags,
+ .size = params->log_vol_size / SECTOR_SIZE
+ };
+
+ r = device_block_adjust(cd, crypt_data_device(cd), DEV_EXCL,
+ crypt_get_data_offset(cd), &dm_dev.size, &dm_dev.flags);
+ if (r)
+ return r;
+
+ if (asprintf(&cipher, "%s-%s", params->cipher, params->cipher_mode) < 0)
+ return -ENOMEM;
+
+ r = dm_crypt_target_set(&dm_dev.segment, 0, dm_dev.size,
+ crypt_data_device(cd), vol_key, cipher,
+ crypt_get_iv_offset(cd), crypt_get_data_offset(cd),
+ crypt_get_integrity(cd), crypt_get_integrity_tag_size(cd),
+ crypt_get_sector_size(cd));
+
+ if (!r)
+ r = dm_create_device(cd, name, CRYPT_FVAULT2, &dm_dev);
+
+ dm_targets_free(cd, &dm_dev);
+ free(cipher);
+ return r;
+}
+
+int FVAULT2_read_metadata(
+ struct crypt_device *cd,
+ struct fvault2_params *params)
+{
+ int r = 0;
+ int devfd;
+ uint64_t block_size;
+ uint64_t disklbl_blkoff;
+ uint64_t enc_md_blkoff;
+ uint64_t enc_md_blocks_n;
+ struct volume_key *enc_md_key = NULL;
+ struct device *device = crypt_metadata_device(cd);
+
+ devfd = device_open(cd, device, O_RDONLY);
+ if (devfd < 0) {
+ log_err(cd, _("Cannot open device %s."), device_path(device));
+ return -EIO;
+ }
+
+ r = _read_volume_header(devfd, cd, &block_size, &disklbl_blkoff,
+ params->ph_vol_uuid, &enc_md_key);
+ if (r < 0)
+ goto out;
+
+ r = _read_disklabel(devfd, cd, block_size, disklbl_blkoff,
+ &enc_md_blkoff, &enc_md_blocks_n);
+ if (r < 0)
+ goto out;
+
+ r = _read_encrypted_metadata(devfd, cd, block_size, enc_md_blkoff,
+ enc_md_blocks_n, enc_md_key, params);
+ if (r < 0)
+ goto out;
+
+ params->cipher = "aes";
+ params->cipher_mode = "xts-plain64";
+ params->key_size = FVAULT2_XTS_KEY_SIZE;
+out:
+ crypt_free_volume_key(enc_md_key);
+ return r;
+}
+
+int FVAULT2_get_volume_key(
+ struct crypt_device *cd,
+ const char *passphrase,
+ size_t passphrase_len,
+ const struct fvault2_params *params,
+ struct volume_key **vol_key)
+{
+ int r = 0;
+ uint8_t family_uuid_bin[FVAULT2_UUID_BIN_SIZE];
+ struct volume_key *passphrase_key = NULL;
+ struct volume_key *kek = NULL;
+ struct crypt_hash *hash = NULL;
+
+ *vol_key = NULL;
+
+ if (uuid_parse(params->family_uuid, family_uuid_bin) < 0) {
+ log_dbg(cd, "Could not parse logical volume family UUID: %s.",
+ params->family_uuid);
+ r = -EINVAL;
+ goto out;
+ }
+
+ passphrase_key = crypt_alloc_volume_key(FVAULT2_AES_KEY_SIZE, NULL);
+ if (passphrase_key == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ r = crypt_pbkdf("pbkdf2", "sha256", passphrase, passphrase_len,
+ params->pbkdf2_salt, FVAULT2_PBKDF2_SALT_SIZE, passphrase_key->key,
+ FVAULT2_AES_KEY_SIZE, params->pbkdf2_iters, 0, 0);
+ if (r < 0)
+ goto out;
+
+ kek = crypt_alloc_volume_key(FVAULT2_AES_KEY_SIZE, NULL);
+ if (kek == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ r = _unwrap_key(passphrase_key->key, FVAULT2_AES_KEY_SIZE, params->wrapped_kek,
+ FVAULT2_WRAPPED_KEY_SIZE, kek->key, FVAULT2_AES_KEY_SIZE);
+ if (r < 0)
+ goto out;
+
+ *vol_key = crypt_alloc_volume_key(FVAULT2_XTS_KEY_SIZE, NULL);
+ if (*vol_key == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ r = _unwrap_key(kek->key, FVAULT2_AES_KEY_SIZE, params->wrapped_vk,
+ FVAULT2_WRAPPED_KEY_SIZE, (*vol_key)->key, FVAULT2_AES_KEY_SIZE);
+ if (r < 0)
+ goto out;
+
+ r = crypt_hash_init(&hash, "sha256");
+ if (r < 0)
+ goto out;
+ r = crypt_hash_write(hash, (*vol_key)->key, FVAULT2_AES_KEY_SIZE);
+ if (r < 0)
+ goto out;
+ r = crypt_hash_write(hash, (char *)family_uuid_bin,
+ FVAULT2_UUID_BIN_SIZE);
+ if (r < 0)
+ goto out;
+ r = crypt_hash_final(hash, (*vol_key)->key + FVAULT2_AES_KEY_SIZE,
+ FVAULT2_AES_KEY_SIZE);
+ if (r < 0)
+ goto out;
+out:
+ crypt_free_volume_key(passphrase_key);
+ crypt_free_volume_key(kek);
+ if (r < 0) {
+ crypt_free_volume_key(*vol_key);
+ *vol_key = NULL;
+ }
+ if (hash != NULL)
+ crypt_hash_destroy(hash);
+ return r;
+}
+
+int FVAULT2_dump(
+ struct crypt_device *cd,
+ struct device *device,
+ const struct fvault2_params *params)
+{
+ log_std(cd, "Header information for FVAULT2 device %s.\n", device_path(device));
+
+ log_std(cd, "Physical volume UUID: \t%s\n", params->ph_vol_uuid);
+ log_std(cd, "Family UUID: \t%s\n", params->family_uuid);
+
+ log_std(cd, "Logical volume offset:\t%" PRIu64 " [bytes]\n", params->log_vol_off);
+
+ log_std(cd, "Logical volume size: \t%" PRIu64 " [bytes]\n",
+ params->log_vol_size);
+
+ log_std(cd, "Cipher: \t%s\n", params->cipher);
+ log_std(cd, "Cipher mode: \t%s\n", params->cipher_mode);
+
+ log_std(cd, "PBKDF2 iterations: \t%" PRIu32 "\n", params->pbkdf2_iters);
+
+ log_std(cd, "PBKDF2 salt: \t");
+ crypt_log_hex(cd, params->pbkdf2_salt, FVAULT2_PBKDF2_SALT_SIZE, " ", 0, NULL);
+ log_std(cd, "\n");
+
+ return 0;
+}
+
+int FVAULT2_activate_by_passphrase(
+ struct crypt_device *cd,
+ const char *name,
+ const char *passphrase,
+ size_t passphrase_len,
+ const struct fvault2_params *params,
+ uint32_t flags)
+{
+ int r;
+ struct volume_key *vol_key = NULL;
+
+ r = FVAULT2_get_volume_key(cd, passphrase, passphrase_len, params, &vol_key);
+ if (r < 0)
+ return r;
+
+ if (name)
+ r = _activate(cd, name, vol_key, params, flags);
+
+ crypt_free_volume_key(vol_key);
+ return r;
+}
+
+int FVAULT2_activate_by_volume_key(
+ struct crypt_device *cd,
+ const char *name,
+ const char *key,
+ size_t key_size,
+ const struct fvault2_params *params,
+ uint32_t flags)
+{
+ int r = 0;
+ struct volume_key *vol_key = NULL;
+
+ if (key_size != FVAULT2_XTS_KEY_SIZE)
+ return -EINVAL;
+
+ vol_key = crypt_alloc_volume_key(FVAULT2_XTS_KEY_SIZE, key);
+ if (vol_key == NULL)
+ return -ENOMEM;
+
+ r = _activate(cd, name, vol_key, params, flags);
+
+ crypt_free_volume_key(vol_key);
+ return r;
+}
diff --git a/lib/fvault2/fvault2.h b/lib/fvault2/fvault2.h
new file mode 100644
index 0000000..ce50ee3
--- /dev/null
+++ b/lib/fvault2/fvault2.h
@@ -0,0 +1,80 @@
+/*
+ * FVAULT2 (FileVault2-compatible) volume handling
+ *
+ * Copyright (C) 2021-2022 Pavel Tobias
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTSETUP_FVAULT2_H
+#define _CRYPTSETUP_FVAULT2_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define FVAULT2_WRAPPED_KEY_SIZE 24
+#define FVAULT2_PBKDF2_SALT_SIZE 16
+#define FVAULT2_UUID_LEN 37
+
+struct crypt_device;
+struct volume_key;
+
+struct fvault2_params {
+ const char *cipher;
+ const char *cipher_mode;
+ uint16_t key_size;
+ uint32_t pbkdf2_iters;
+ char pbkdf2_salt[FVAULT2_PBKDF2_SALT_SIZE];
+ char wrapped_kek[FVAULT2_WRAPPED_KEY_SIZE];
+ char wrapped_vk[FVAULT2_WRAPPED_KEY_SIZE];
+ char family_uuid[FVAULT2_UUID_LEN];
+ char ph_vol_uuid[FVAULT2_UUID_LEN];
+ uint64_t log_vol_off;
+ uint64_t log_vol_size;
+};
+
+int FVAULT2_read_metadata(
+ struct crypt_device *cd,
+ struct fvault2_params *params);
+
+int FVAULT2_get_volume_key(
+ struct crypt_device *cd,
+ const char *passphrase,
+ size_t passphrase_len,
+ const struct fvault2_params *params,
+ struct volume_key **vol_key);
+
+int FVAULT2_dump(
+ struct crypt_device *cd,
+ struct device *device,
+ const struct fvault2_params *params);
+
+int FVAULT2_activate_by_passphrase(
+ struct crypt_device *cd,
+ const char *name,
+ const char *passphrase,
+ size_t passphrase_len,
+ const struct fvault2_params *params,
+ uint32_t flags);
+
+int FVAULT2_activate_by_volume_key(
+ struct crypt_device *cd,
+ const char *name,
+ const char *key,
+ size_t key_size,
+ const struct fvault2_params *params,
+ uint32_t flags);
+
+#endif
diff --git a/lib/integrity/integrity.c b/lib/integrity/integrity.c
new file mode 100644
index 0000000..aeadc82
--- /dev/null
+++ b/lib/integrity/integrity.c
@@ -0,0 +1,402 @@
+/*
+ * Integrity volume handling
+ *
+ * Copyright (C) 2016-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <uuid/uuid.h>
+
+#include "integrity.h"
+#include "internal.h"
+
+/* For LUKS2, integrity metadata are on DATA device even for detached header! */
+static struct device *INTEGRITY_metadata_device(struct crypt_device *cd)
+{
+ const char *type = crypt_get_type(cd);
+
+ if (type && !strcmp(type, CRYPT_LUKS2))
+ return crypt_data_device(cd);
+
+ return crypt_metadata_device(cd);
+}
+
+static int INTEGRITY_read_superblock(struct crypt_device *cd,
+ struct device *device,
+ uint64_t offset, struct superblock *sb)
+{
+ int devfd, r;
+
+ devfd = device_open(cd, device, O_RDONLY);
+ if(devfd < 0)
+ return -EINVAL;
+
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), sb, sizeof(*sb), offset) != sizeof(*sb) ||
+ memcmp(sb->magic, SB_MAGIC, sizeof(sb->magic))) {
+ log_dbg(cd, "No kernel dm-integrity metadata detected on %s.", device_path(device));
+ r = -EINVAL;
+ } else if (sb->version < SB_VERSION_1 || sb->version > SB_VERSION_5) {
+ log_err(cd, _("Incompatible kernel dm-integrity metadata (version %u) detected on %s."),
+ sb->version, device_path(device));
+ r = -EINVAL;
+ } else {
+ sb->integrity_tag_size = le16toh(sb->integrity_tag_size);
+ sb->journal_sections = le32toh(sb->journal_sections);
+ sb->provided_data_sectors = le64toh(sb->provided_data_sectors);
+ sb->recalc_sector = le64toh(sb->recalc_sector);
+ sb->flags = le32toh(sb->flags);
+ r = 0;
+ }
+
+ return r;
+}
+
+int INTEGRITY_read_sb(struct crypt_device *cd,
+ struct crypt_params_integrity *params,
+ uint32_t *flags)
+{
+ struct superblock sb;
+ int r;
+
+ r = INTEGRITY_read_superblock(cd, INTEGRITY_metadata_device(cd), 0, &sb);
+ if (r)
+ return r;
+
+ params->sector_size = SECTOR_SIZE << sb.log2_sectors_per_block;
+ params->tag_size = sb.integrity_tag_size;
+
+ if (flags)
+ *flags = sb.flags;
+
+ return 0;
+}
+
+int INTEGRITY_dump(struct crypt_device *cd, struct device *device, uint64_t offset)
+{
+ struct superblock sb;
+ int r;
+
+ r = INTEGRITY_read_superblock(cd, device, offset, &sb);
+ if (r)
+ return r;
+
+ log_std(cd, "Info for integrity device %s.\n", device_path(device));
+ log_std(cd, "superblock_version %d\n", (unsigned)sb.version);
+ log_std(cd, "log2_interleave_sectors %d\n", sb.log2_interleave_sectors);
+ log_std(cd, "integrity_tag_size %u\n", sb.integrity_tag_size);
+ log_std(cd, "journal_sections %u\n", sb.journal_sections);
+ log_std(cd, "provided_data_sectors %" PRIu64 "\n", sb.provided_data_sectors);
+ log_std(cd, "sector_size %u\n", SECTOR_SIZE << sb.log2_sectors_per_block);
+ if (sb.version >= SB_VERSION_2 && (sb.flags & SB_FLAG_RECALCULATING))
+ log_std(cd, "recalc_sector %" PRIu64 "\n", sb.recalc_sector);
+ log_std(cd, "log2_blocks_per_bitmap %u\n", sb.log2_blocks_per_bitmap_bit);
+ log_std(cd, "flags %s%s%s%s%s\n",
+ sb.flags & SB_FLAG_HAVE_JOURNAL_MAC ? "have_journal_mac " : "",
+ sb.flags & SB_FLAG_RECALCULATING ? "recalculating " : "",
+ sb.flags & SB_FLAG_DIRTY_BITMAP ? "dirty_bitmap " : "",
+ sb.flags & SB_FLAG_FIXED_PADDING ? "fix_padding " : "",
+ sb.flags & SB_FLAG_FIXED_HMAC ? "fix_hmac " : "");
+
+ return 0;
+}
+
+int INTEGRITY_data_sectors(struct crypt_device *cd,
+ struct device *device, uint64_t offset,
+ uint64_t *data_sectors)
+{
+ struct superblock sb;
+ int r;
+
+ r = INTEGRITY_read_superblock(cd, device, offset, &sb);
+ if (r)
+ return r;
+
+ *data_sectors = sb.provided_data_sectors;
+ return 0;
+}
+
+int INTEGRITY_key_size(const char *integrity)
+{
+ if (!integrity)
+ return 0;
+
+ //FIXME: use crypto backend hash size
+ if (!strcmp(integrity, "aead"))
+ return 0;
+ else if (!strcmp(integrity, "hmac(sha1)"))
+ return 20;
+ else if (!strcmp(integrity, "hmac(sha256)"))
+ return 32;
+ else if (!strcmp(integrity, "hmac(sha512)"))
+ return 64;
+ else if (!strcmp(integrity, "poly1305"))
+ return 0;
+ else if (!strcmp(integrity, "none"))
+ return 0;
+
+ return -EINVAL;
+}
+
+/* Return hash or hmac(hash) size, if known */
+int INTEGRITY_hash_tag_size(const char *integrity)
+{
+ char hash[MAX_CIPHER_LEN];
+ int r;
+
+ if (!integrity)
+ return 0;
+
+ if (!strcmp(integrity, "crc32") || !strcmp(integrity, "crc32c"))
+ return 4;
+
+ if (!strcmp(integrity, "xxhash64"))
+ return 8;
+
+ r = sscanf(integrity, "hmac(%" MAX_CIPHER_LEN_STR "[^)]s", hash);
+ if (r == 1)
+ r = crypt_hash_size(hash);
+ else
+ r = crypt_hash_size(integrity);
+
+ return r < 0 ? 0 : r;
+}
+
+int INTEGRITY_tag_size(const char *integrity,
+ const char *cipher,
+ const char *cipher_mode)
+{
+ int iv_tag_size = 0, auth_tag_size = 0;
+
+ if (!cipher_mode)
+ iv_tag_size = 0;
+ else if (!strcmp(cipher_mode, "xts-random"))
+ iv_tag_size = 16;
+ else if (!strcmp(cipher_mode, "gcm-random"))
+ iv_tag_size = 12;
+ else if (!strcmp(cipher_mode, "ccm-random"))
+ iv_tag_size = 8;
+ else if (!strcmp(cipher_mode, "ctr-random"))
+ iv_tag_size = 16;
+ else if (!strcmp(cipher, "aegis256") && !strcmp(cipher_mode, "random"))
+ iv_tag_size = 32;
+ else if (!strcmp(cipher_mode, "random"))
+ iv_tag_size = 16;
+
+ //FIXME: use crypto backend hash size
+ if (!integrity || !strcmp(integrity, "none"))
+ auth_tag_size = 0;
+ else if (!strcmp(integrity, "aead"))
+ auth_tag_size = 16; /* gcm- mode only */
+ else if (!strcmp(integrity, "cmac(aes)"))
+ auth_tag_size = 16;
+ else if (!strcmp(integrity, "hmac(sha1)"))
+ auth_tag_size = 20;
+ else if (!strcmp(integrity, "hmac(sha256)"))
+ auth_tag_size = 32;
+ else if (!strcmp(integrity, "hmac(sha512)"))
+ auth_tag_size = 64;
+ else if (!strcmp(integrity, "poly1305")) {
+ if (iv_tag_size)
+ iv_tag_size = 12;
+ auth_tag_size = 16;
+ }
+
+ return iv_tag_size + auth_tag_size;
+}
+
+int INTEGRITY_create_dmd_device(struct crypt_device *cd,
+ const struct crypt_params_integrity *params,
+ struct volume_key *vk,
+ struct volume_key *journal_crypt_key,
+ struct volume_key *journal_mac_key,
+ struct crypt_dm_active_device *dmd,
+ uint32_t flags, uint32_t sb_flags)
+{
+ int r;
+
+ if (!dmd)
+ return -EINVAL;
+
+ *dmd = (struct crypt_dm_active_device) {
+ .flags = flags,
+ };
+
+ /* Workaround for kernel dm-integrity table bug */
+ if (sb_flags & SB_FLAG_RECALCULATING)
+ dmd->flags |= CRYPT_ACTIVATE_RECALCULATE;
+
+ r = INTEGRITY_data_sectors(cd, INTEGRITY_metadata_device(cd),
+ crypt_get_data_offset(cd) * SECTOR_SIZE, &dmd->size);
+ if (r < 0)
+ return r;
+
+ return dm_integrity_target_set(cd, &dmd->segment, 0, dmd->size,
+ INTEGRITY_metadata_device(cd), crypt_data_device(cd),
+ crypt_get_integrity_tag_size(cd), crypt_get_data_offset(cd),
+ crypt_get_sector_size(cd), vk, journal_crypt_key,
+ journal_mac_key, params);
+}
+
+int INTEGRITY_activate_dmd_device(struct crypt_device *cd,
+ const char *name,
+ const char *type,
+ struct crypt_dm_active_device *dmd,
+ uint32_t sb_flags)
+{
+ int r;
+ uint32_t dmi_flags;
+ struct dm_target *tgt = &dmd->segment;
+
+ if (!single_segment(dmd) || tgt->type != DM_INTEGRITY)
+ return -EINVAL;
+
+ log_dbg(cd, "Trying to activate INTEGRITY device on top of %s, using name %s, tag size %d, provided sectors %" PRIu64".",
+ device_path(tgt->data_device), name, tgt->u.integrity.tag_size, dmd->size);
+
+ r = create_or_reload_device(cd, name, type, dmd);
+
+ if (r < 0 && (dm_flags(cd, DM_INTEGRITY, &dmi_flags) || !(dmi_flags & DM_INTEGRITY_SUPPORTED))) {
+ log_err(cd, _("Kernel does not support dm-integrity mapping."));
+ return -ENOTSUP;
+ }
+
+ if (r < 0 && (sb_flags & SB_FLAG_FIXED_PADDING) && !dm_flags(cd, DM_INTEGRITY, &dmi_flags) &&
+ !(dmi_flags & DM_INTEGRITY_FIX_PADDING_SUPPORTED)) {
+ log_err(cd, _("Kernel does not support dm-integrity fixed metadata alignment."));
+ return -ENOTSUP;
+ }
+
+ if (r < 0 && (dmd->flags & CRYPT_ACTIVATE_RECALCULATE) &&
+ !(crypt_get_compatibility(cd) & CRYPT_COMPAT_LEGACY_INTEGRITY_RECALC) &&
+ ((sb_flags & SB_FLAG_FIXED_HMAC) ?
+ (tgt->u.integrity.vk && !tgt->u.integrity.journal_integrity_key) :
+ (tgt->u.integrity.vk || tgt->u.integrity.journal_integrity_key))) {
+ log_err(cd, _("Kernel refuses to activate insecure recalculate option (see legacy activation options to override)."));
+ return -ENOTSUP;
+ }
+
+ return r;
+}
+
+int INTEGRITY_activate(struct crypt_device *cd,
+ const char *name,
+ const struct crypt_params_integrity *params,
+ struct volume_key *vk,
+ struct volume_key *journal_crypt_key,
+ struct volume_key *journal_mac_key,
+ uint32_t flags, uint32_t sb_flags)
+{
+ struct crypt_dm_active_device dmdq = {}, dmd = {};
+ int r;
+
+ if (flags & CRYPT_ACTIVATE_REFRESH) {
+ r = dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEYSIZE |
+ DM_ACTIVE_CRYPT_KEY |
+ DM_ACTIVE_INTEGRITY_PARAMS |
+ DM_ACTIVE_JOURNAL_CRYPT_KEY |
+ DM_ACTIVE_JOURNAL_MAC_KEY, &dmdq);
+ if (r < 0)
+ return r;
+
+ r = INTEGRITY_create_dmd_device(cd, params, vk ?: dmdq.segment.u.integrity.vk,
+ journal_crypt_key ?: dmdq.segment.u.integrity.journal_crypt_key,
+ journal_mac_key ?: dmdq.segment.u.integrity.journal_integrity_key,
+ &dmd, flags, sb_flags);
+
+ if (!r)
+ dmd.size = dmdq.size;
+ } else
+ r = INTEGRITY_create_dmd_device(cd, params, vk, journal_crypt_key,
+ journal_mac_key, &dmd, flags, sb_flags);
+
+ if (!r)
+ r = INTEGRITY_activate_dmd_device(cd, name, CRYPT_INTEGRITY, &dmd, sb_flags);
+
+ dm_targets_free(cd, &dmdq);
+ dm_targets_free(cd, &dmd);
+ return r;
+}
+
+int INTEGRITY_format(struct crypt_device *cd,
+ const struct crypt_params_integrity *params,
+ struct volume_key *journal_crypt_key,
+ struct volume_key *journal_mac_key)
+{
+ uint32_t dmi_flags;
+ char tmp_name[64], tmp_uuid[40];
+ struct crypt_dm_active_device dmdi = {
+ .size = 8,
+ .flags = CRYPT_ACTIVATE_PRIVATE, /* We always create journal but it can be unused later */
+ };
+ struct dm_target *tgt = &dmdi.segment;
+ int r;
+ uuid_t tmp_uuid_bin;
+ struct volume_key *vk = NULL;
+
+ uuid_generate(tmp_uuid_bin);
+ uuid_unparse(tmp_uuid_bin, tmp_uuid);
+
+ r = snprintf(tmp_name, sizeof(tmp_name), "temporary-cryptsetup-%s", tmp_uuid);
+ if (r < 0 || (size_t)r >= sizeof(tmp_name))
+ return -EINVAL;
+
+ /* There is no data area, we can actually use fake zeroed key */
+ if (params && params->integrity_key_size)
+ vk = crypt_alloc_volume_key(params->integrity_key_size, NULL);
+
+ r = dm_integrity_target_set(cd, tgt, 0, dmdi.size, INTEGRITY_metadata_device(cd),
+ crypt_data_device(cd), crypt_get_integrity_tag_size(cd),
+ crypt_get_data_offset(cd), crypt_get_sector_size(cd), vk,
+ journal_crypt_key, journal_mac_key, params);
+ if (r < 0) {
+ crypt_free_volume_key(vk);
+ return r;
+ }
+
+ log_dbg(cd, "Trying to format INTEGRITY device on top of %s, tmp name %s, tag size %d.",
+ device_path(tgt->data_device), tmp_name, tgt->u.integrity.tag_size);
+
+ r = device_block_adjust(cd, tgt->data_device, DEV_EXCL, tgt->u.integrity.offset, NULL, NULL);
+ if (r < 0 && (dm_flags(cd, DM_INTEGRITY, &dmi_flags) || !(dmi_flags & DM_INTEGRITY_SUPPORTED))) {
+ log_err(cd, _("Kernel does not support dm-integrity mapping."));
+ r = -ENOTSUP;
+ }
+ if (r) {
+ dm_targets_free(cd, &dmdi);
+ return r;
+ }
+
+ if (tgt->u.integrity.meta_device) {
+ r = device_block_adjust(cd, tgt->u.integrity.meta_device, DEV_EXCL, 0, NULL, NULL);
+ if (r) {
+ dm_targets_free(cd, &dmdi);
+ return r;
+ }
+ }
+
+ r = dm_create_device(cd, tmp_name, CRYPT_INTEGRITY, &dmdi);
+ crypt_free_volume_key(vk);
+ dm_targets_free(cd, &dmdi);
+ if (r)
+ return r;
+
+ return dm_remove_device(cd, tmp_name, CRYPT_DEACTIVATE_FORCE);
+}
diff --git a/lib/integrity/integrity.h b/lib/integrity/integrity.h
new file mode 100644
index 0000000..2883ef8
--- /dev/null
+++ b/lib/integrity/integrity.h
@@ -0,0 +1,101 @@
+/*
+ * Integrity header definition
+ *
+ * Copyright (C) 2016-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTSETUP_INTEGRITY_H
+#define _CRYPTSETUP_INTEGRITY_H
+
+#include <stdint.h>
+
+struct crypt_device;
+struct device;
+struct crypt_params_integrity;
+struct volume_key;
+struct crypt_dm_active_device;
+
+/* dm-integrity helper */
+#define SB_MAGIC "integrt"
+#define SB_VERSION_1 1
+#define SB_VERSION_2 2
+#define SB_VERSION_3 3
+#define SB_VERSION_4 4
+#define SB_VERSION_5 5
+
+#define SB_FLAG_HAVE_JOURNAL_MAC (1 << 0)
+#define SB_FLAG_RECALCULATING (1 << 1) /* V2 only */
+#define SB_FLAG_DIRTY_BITMAP (1 << 2) /* V3 only */
+#define SB_FLAG_FIXED_PADDING (1 << 3) /* V4 only */
+#define SB_FLAG_FIXED_HMAC (1 << 4) /* V5 only */
+
+struct superblock {
+ uint8_t magic[8];
+ uint8_t version;
+ int8_t log2_interleave_sectors;
+ uint16_t integrity_tag_size;
+ uint32_t journal_sections;
+ uint64_t provided_data_sectors;
+ uint32_t flags;
+ uint8_t log2_sectors_per_block;
+ uint8_t log2_blocks_per_bitmap_bit; /* V3 only */
+ uint8_t pad[2];
+ uint64_t recalc_sector; /* V2 only */
+} __attribute__ ((packed));
+
+int INTEGRITY_read_sb(struct crypt_device *cd,
+ struct crypt_params_integrity *params,
+ uint32_t *flags);
+
+int INTEGRITY_dump(struct crypt_device *cd, struct device *device, uint64_t offset);
+
+int INTEGRITY_data_sectors(struct crypt_device *cd,
+ struct device *device, uint64_t offset,
+ uint64_t *data_sectors);
+int INTEGRITY_key_size(const char *integrity);
+int INTEGRITY_tag_size(const char *integrity,
+ const char *cipher,
+ const char *cipher_mode);
+int INTEGRITY_hash_tag_size(const char *integrity);
+
+int INTEGRITY_format(struct crypt_device *cd,
+ const struct crypt_params_integrity *params,
+ struct volume_key *journal_crypt_key,
+ struct volume_key *journal_mac_key);
+
+int INTEGRITY_activate(struct crypt_device *cd,
+ const char *name,
+ const struct crypt_params_integrity *params,
+ struct volume_key *vk,
+ struct volume_key *journal_crypt_key,
+ struct volume_key *journal_mac_key,
+ uint32_t flags, uint32_t sb_flags);
+
+int INTEGRITY_create_dmd_device(struct crypt_device *cd,
+ const struct crypt_params_integrity *params,
+ struct volume_key *vk,
+ struct volume_key *journal_crypt_key,
+ struct volume_key *journal_mac_key,
+ struct crypt_dm_active_device *dmd,
+ uint32_t flags, uint32_t sb_flags);
+
+int INTEGRITY_activate_dmd_device(struct crypt_device *cd,
+ const char *name,
+ const char *type,
+ struct crypt_dm_active_device *dmd,
+ uint32_t sb_flags);
+#endif
diff --git a/lib/internal.h b/lib/internal.h
new file mode 100644
index 0000000..b5cb4e3
--- /dev/null
+++ b/lib/internal.h
@@ -0,0 +1,253 @@
+/*
+ * libcryptsetup - cryptsetup library internal
+ *
+ * Copyright (C) 2004 Jana Saout <jana@saout.de>
+ * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef INTERNAL_H
+#define INTERNAL_H
+
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <fcntl.h>
+#include <assert.h>
+
+#include "nls.h"
+#include "bitops.h"
+#include "utils_blkid.h"
+#include "utils_crypt.h"
+#include "utils_loop.h"
+#include "utils_dm.h"
+#include "utils_keyring.h"
+#include "utils_io.h"
+#include "crypto_backend/crypto_backend.h"
+#include "utils_storage_wrappers.h"
+
+#include "libcryptsetup.h"
+
+#include "libcryptsetup_macros.h"
+#include "libcryptsetup_symver.h"
+
+#define LOG_MAX_LEN 4096
+#define MAX_DM_DEPS 32
+
+#define CRYPT_SUBDEV "SUBDEV" /* prefix for sublayered devices underneath public crypt types */
+
+#ifndef O_CLOEXEC
+#define O_CLOEXEC 0
+#endif
+
+struct crypt_device;
+struct luks2_reencrypt;
+
+struct volume_key {
+ int id;
+ size_t keylength;
+ const char *key_description;
+ struct volume_key *next;
+ char key[];
+};
+
+struct volume_key *crypt_alloc_volume_key(size_t keylength, const char *key);
+struct volume_key *crypt_generate_volume_key(struct crypt_device *cd, size_t keylength);
+void crypt_free_volume_key(struct volume_key *vk);
+int crypt_volume_key_set_description(struct volume_key *key, const char *key_description);
+void crypt_volume_key_set_id(struct volume_key *vk, int id);
+int crypt_volume_key_get_id(const struct volume_key *vk);
+void crypt_volume_key_add_next(struct volume_key **vks, struct volume_key *vk);
+struct volume_key *crypt_volume_key_next(struct volume_key *vk);
+struct volume_key *crypt_volume_key_by_id(struct volume_key *vk, int id);
+
+struct crypt_pbkdf_type *crypt_get_pbkdf(struct crypt_device *cd);
+int init_pbkdf_type(struct crypt_device *cd,
+ const struct crypt_pbkdf_type *pbkdf,
+ const char *dev_type);
+int verify_pbkdf_params(struct crypt_device *cd,
+ const struct crypt_pbkdf_type *pbkdf);
+int crypt_benchmark_pbkdf_internal(struct crypt_device *cd,
+ struct crypt_pbkdf_type *pbkdf,
+ size_t volume_key_size);
+const char *crypt_get_cipher_spec(struct crypt_device *cd);
+
+/* Device backend */
+struct device;
+int device_alloc(struct crypt_device *cd, struct device **device, const char *path);
+int device_alloc_no_check(struct device **device, const char *path);
+void device_close(struct crypt_device *cd, struct device *device);
+void device_free(struct crypt_device *cd, struct device *device);
+const char *device_path(const struct device *device);
+const char *device_dm_name(const struct device *device);
+const char *device_block_path(const struct device *device);
+void device_topology_alignment(struct crypt_device *cd,
+ struct device *device,
+ unsigned long *required_alignment, /* bytes */
+ unsigned long *alignment_offset, /* bytes */
+ unsigned long default_alignment);
+size_t device_block_size(struct crypt_device *cd, struct device *device);
+int device_read_ahead(struct device *device, uint32_t *read_ahead);
+int device_size(struct device *device, uint64_t *size);
+int device_open(struct crypt_device *cd, struct device *device, int flags);
+int device_open_excl(struct crypt_device *cd, struct device *device, int flags);
+void device_release_excl(struct crypt_device *cd, struct device *device);
+void device_disable_direct_io(struct device *device);
+int device_is_identical(struct device *device1, struct device *device2);
+int device_is_rotational(struct device *device);
+size_t device_alignment(struct device *device);
+int device_direct_io(const struct device *device);
+int device_fallocate(struct device *device, uint64_t size);
+void device_sync(struct crypt_device *cd, struct device *device);
+int device_check_size(struct crypt_device *cd,
+ struct device *device,
+ uint64_t req_offset, int falloc);
+void device_set_block_size(struct device *device, size_t size);
+size_t device_optimal_encryption_sector_size(struct crypt_device *cd, struct device *device);
+
+int device_open_locked(struct crypt_device *cd, struct device *device, int flags);
+int device_read_lock(struct crypt_device *cd, struct device *device);
+int device_write_lock(struct crypt_device *cd, struct device *device);
+void device_read_unlock(struct crypt_device *cd, struct device *device);
+void device_write_unlock(struct crypt_device *cd, struct device *device);
+bool device_is_locked(struct device *device);
+
+enum devcheck { DEV_OK = 0, DEV_EXCL = 1 };
+int device_check_access(struct crypt_device *cd,
+ struct device *device,
+ enum devcheck device_check);
+int device_block_adjust(struct crypt_device *cd,
+ struct device *device,
+ enum devcheck device_check,
+ uint64_t device_offset,
+ uint64_t *size,
+ uint32_t *flags);
+size_t size_round_up(size_t size, size_t block);
+
+int create_or_reload_device(struct crypt_device *cd, const char *name,
+ const char *type, struct crypt_dm_active_device *dmd);
+
+int create_or_reload_device_with_integrity(struct crypt_device *cd, const char *name,
+ const char *type, struct crypt_dm_active_device *dmd,
+ struct crypt_dm_active_device *dmdi);
+
+/* Receive backend devices from context helpers */
+struct device *crypt_metadata_device(struct crypt_device *cd);
+struct device *crypt_data_device(struct crypt_device *cd);
+
+int crypt_confirm(struct crypt_device *cd, const char *msg);
+
+char *crypt_lookup_dev(const char *dev_id);
+int crypt_dev_is_rotational(int major, int minor);
+int crypt_dev_is_partition(const char *dev_path);
+char *crypt_get_partition_device(const char *dev_path, uint64_t offset, uint64_t size);
+char *crypt_get_base_device(const char *dev_path);
+uint64_t crypt_dev_partition_offset(const char *dev_path);
+int lookup_by_disk_id(const char *dm_uuid);
+int lookup_by_sysfs_uuid_field(const char *dm_uuid);
+int crypt_uuid_cmp(const char *dm_uuid, const char *hdr_uuid);
+
+size_t crypt_getpagesize(void);
+unsigned crypt_cpusonline(void);
+uint64_t crypt_getphysmemory_kb(void);
+
+int init_crypto(struct crypt_device *ctx);
+
+#define log_dbg(c, x...) crypt_logf(c, CRYPT_LOG_DEBUG, x)
+#define log_std(c, x...) crypt_logf(c, CRYPT_LOG_NORMAL, x)
+#define log_verbose(c, x...) crypt_logf(c, CRYPT_LOG_VERBOSE, x)
+#define log_err(c, x...) crypt_logf(c, CRYPT_LOG_ERROR, x)
+
+int crypt_get_debug_level(void);
+
+void crypt_process_priority(struct crypt_device *cd, int *priority, bool raise);
+
+int crypt_metadata_locking_enabled(void);
+
+int crypt_random_init(struct crypt_device *ctx);
+int crypt_random_get(struct crypt_device *ctx, char *buf, size_t len, int quality);
+void crypt_random_exit(void);
+int crypt_random_default_key_rng(void);
+
+int crypt_plain_hash(struct crypt_device *cd,
+ const char *hash_name,
+ char *key, size_t key_size,
+ const char *passphrase, size_t passphrase_size);
+int PLAIN_activate(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vk,
+ uint64_t size,
+ uint32_t flags);
+
+void *crypt_get_hdr(struct crypt_device *cd, const char *type);
+void crypt_set_luks2_reencrypt(struct crypt_device *cd, struct luks2_reencrypt *rh);
+struct luks2_reencrypt *crypt_get_luks2_reencrypt(struct crypt_device *cd);
+
+int onlyLUKS2(struct crypt_device *cd);
+int onlyLUKS2mask(struct crypt_device *cd, uint32_t mask);
+
+int crypt_wipe_device(struct crypt_device *cd,
+ struct device *device,
+ crypt_wipe_pattern pattern,
+ uint64_t offset,
+ uint64_t length,
+ size_t wipe_block_size,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
+ void *usrptr);
+
+/* Internal integrity helpers */
+const char *crypt_get_integrity(struct crypt_device *cd);
+int crypt_get_integrity_key_size(struct crypt_device *cd);
+int crypt_get_integrity_tag_size(struct crypt_device *cd);
+
+int crypt_key_in_keyring(struct crypt_device *cd);
+void crypt_set_key_in_keyring(struct crypt_device *cd, unsigned key_in_keyring);
+int crypt_volume_key_load_in_keyring(struct crypt_device *cd, struct volume_key *vk);
+int crypt_use_keyring_for_vk(struct crypt_device *cd);
+void crypt_drop_keyring_key_by_description(struct crypt_device *cd, const char *key_description, key_type_t ktype);
+void crypt_drop_keyring_key(struct crypt_device *cd, struct volume_key *vks);
+
+static inline uint64_t compact_version(uint16_t major, uint16_t minor, uint16_t patch, uint16_t release)
+{
+ return (uint64_t)release | ((uint64_t)patch << 16) | ((uint64_t)minor << 32) | ((uint64_t)major << 48);
+}
+
+int kernel_version(uint64_t *kversion);
+
+int crypt_serialize_lock(struct crypt_device *cd);
+void crypt_serialize_unlock(struct crypt_device *cd);
+
+bool crypt_string_in(const char *str, char **list, size_t list_size);
+int crypt_strcmp(const char *a, const char *b);
+int crypt_compare_dm_devices(struct crypt_device *cd,
+ const struct crypt_dm_active_device *src,
+ const struct crypt_dm_active_device *tgt);
+static inline void *crypt_zalloc(size_t size) { return calloc(1, size); }
+
+static inline bool uint64_mult_overflow(uint64_t *u, uint64_t b, size_t size)
+{
+ *u = (uint64_t)b * size;
+ if ((uint64_t)(*u / size) != b)
+ return true;
+ return false;
+}
+
+#endif /* INTERNAL_H */
diff --git a/lib/keyslot_context.c b/lib/keyslot_context.c
new file mode 100644
index 0000000..89bd433
--- /dev/null
+++ b/lib/keyslot_context.c
@@ -0,0 +1,488 @@
+/*
+ * LUKS - Linux Unified Key Setup, keyslot unlock helpers
+ *
+ * Copyright (C) 2022-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2022-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+
+#include "luks1/luks.h"
+#include "luks2/luks2.h"
+#include "keyslot_context.h"
+
+static int get_luks2_key_by_passphrase(struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ int keyslot,
+ int segment,
+ struct volume_key **r_vk)
+{
+ int r;
+
+ assert(cd);
+ assert(kc && kc->type == CRYPT_KC_TYPE_PASSPHRASE);
+ assert(r_vk);
+
+ r = LUKS2_keyslot_open(cd, keyslot, segment, kc->u.p.passphrase, kc->u.p.passphrase_size, r_vk);
+ if (r < 0)
+ kc->error = r;
+
+ return r;
+}
+
+static int get_luks1_volume_key_by_passphrase(struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ int keyslot,
+ struct volume_key **r_vk)
+{
+ int r;
+
+ assert(cd);
+ assert(kc && kc->type == CRYPT_KC_TYPE_PASSPHRASE);
+ assert(r_vk);
+
+ r = LUKS_open_key_with_hdr(keyslot, kc->u.p.passphrase, kc->u.p.passphrase_size,
+ crypt_get_hdr(cd, CRYPT_LUKS1), r_vk, cd);
+ if (r < 0)
+ kc->error = r;
+
+ return r;
+}
+
+static int get_luks2_volume_key_by_passphrase(struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ int keyslot,
+ struct volume_key **r_vk)
+{
+ return get_luks2_key_by_passphrase(cd, kc, keyslot, CRYPT_DEFAULT_SEGMENT, r_vk);
+}
+
+static int get_passphrase_by_passphrase(struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ const char **r_passphrase,
+ size_t *r_passphrase_size)
+{
+ assert(cd);
+ assert(kc && kc->type == CRYPT_KC_TYPE_PASSPHRASE);
+ assert(r_passphrase);
+ assert(r_passphrase_size);
+
+ *r_passphrase = kc->u.p.passphrase;
+ *r_passphrase_size = kc->u.p.passphrase_size;
+
+ return 0;
+}
+
+static int get_passphrase_by_keyfile(struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ const char **r_passphrase,
+ size_t *r_passphrase_size)
+{
+ int r;
+
+ assert(cd);
+ assert(kc && kc->type == CRYPT_KC_TYPE_KEYFILE);
+ assert(r_passphrase);
+ assert(r_passphrase_size);
+
+ if (!kc->i_passphrase) {
+ r = crypt_keyfile_device_read(cd, kc->u.kf.keyfile,
+ &kc->i_passphrase, &kc->i_passphrase_size,
+ kc->u.kf.keyfile_offset, kc->u.kf.keyfile_size, 0);
+ if (r < 0) {
+ kc->error = r;
+ return r;
+ }
+ }
+
+ *r_passphrase = kc->i_passphrase;
+ *r_passphrase_size = kc->i_passphrase_size;
+
+ return 0;
+}
+
+static int get_luks2_key_by_keyfile(struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ int keyslot,
+ int segment,
+ struct volume_key **r_vk)
+{
+ int r;
+ const char *passphrase;
+ size_t passphrase_size;
+
+ assert(cd);
+ assert(kc && kc->type == CRYPT_KC_TYPE_KEYFILE);
+ assert(r_vk);
+
+ r = get_passphrase_by_keyfile(cd, kc, &passphrase, &passphrase_size);
+ if (r)
+ return r;
+
+ r = LUKS2_keyslot_open(cd, keyslot, segment, passphrase, passphrase_size, r_vk);
+ if (r < 0)
+ kc->error = r;
+
+ return r;
+}
+
+static int get_luks2_volume_key_by_keyfile(struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ int keyslot,
+ struct volume_key **r_vk)
+{
+ return get_luks2_key_by_keyfile(cd, kc, keyslot, CRYPT_DEFAULT_SEGMENT, r_vk);
+}
+
+static int get_luks1_volume_key_by_keyfile(struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ int keyslot,
+ struct volume_key **r_vk)
+{
+ int r;
+ const char *passphrase;
+ size_t passphrase_size;
+
+ assert(cd);
+ assert(kc && kc->type == CRYPT_KC_TYPE_KEYFILE);
+ assert(r_vk);
+
+ r = get_passphrase_by_keyfile(cd, kc, &passphrase, &passphrase_size);
+ if (r)
+ return r;
+
+ r = LUKS_open_key_with_hdr(keyslot, passphrase, passphrase_size,
+ crypt_get_hdr(cd, CRYPT_LUKS1), r_vk, cd);
+ if (r < 0)
+ kc->error = r;
+
+ return r;
+}
+
+static int get_key_by_key(struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ int keyslot __attribute__((unused)),
+ int segment __attribute__((unused)),
+ struct volume_key **r_vk)
+{
+ assert(kc && kc->type == CRYPT_KC_TYPE_KEY);
+ assert(r_vk);
+
+ if (!kc->u.k.volume_key) {
+ kc->error = -ENOENT;
+ return kc->error;
+ }
+
+ *r_vk = crypt_alloc_volume_key(kc->u.k.volume_key_size, kc->u.k.volume_key);
+ if (!*r_vk) {
+ kc->error = -ENOMEM;
+ return kc->error;
+ }
+
+ return 0;
+}
+
+static int get_volume_key_by_key(struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ int keyslot __attribute__((unused)),
+ struct volume_key **r_vk)
+{
+ return get_key_by_key(cd, kc, -2 /* unused */, -2 /* unused */, r_vk);
+}
+
+static int get_luks2_key_by_token(struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ int keyslot __attribute__((unused)),
+ int segment,
+ struct volume_key **r_vk)
+{
+ int r;
+
+ assert(cd);
+ assert(kc && kc->type == CRYPT_KC_TYPE_TOKEN);
+ assert(r_vk);
+
+ r = LUKS2_token_unlock_key(cd, crypt_get_hdr(cd, CRYPT_LUKS2), kc->u.t.id, kc->u.t.type,
+ kc->u.t.pin, kc->u.t.pin_size, segment, kc->u.t.usrptr, r_vk);
+ if (r < 0)
+ kc->error = r;
+
+ return r;
+}
+
+static int get_luks2_volume_key_by_token(struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ int keyslot __attribute__((unused)),
+ struct volume_key **r_vk)
+{
+ return get_luks2_key_by_token(cd, kc, -2 /* unused */, CRYPT_DEFAULT_SEGMENT, r_vk);
+}
+
+static int get_passphrase_by_token(struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ const char **r_passphrase,
+ size_t *r_passphrase_size)
+{
+ int r;
+
+ assert(cd);
+ assert(kc && kc->type == CRYPT_KC_TYPE_TOKEN);
+ assert(r_passphrase);
+ assert(r_passphrase_size);
+
+ if (!kc->i_passphrase) {
+ r = LUKS2_token_unlock_passphrase(cd, crypt_get_hdr(cd, CRYPT_LUKS2), kc->u.t.id,
+ kc->u.t.type, kc->u.t.pin, kc->u.t.pin_size,
+ kc->u.t.usrptr, &kc->i_passphrase, &kc->i_passphrase_size);
+ if (r < 0) {
+ kc->error = r;
+ return r;
+ }
+ kc->u.t.id = r;
+ }
+
+ *r_passphrase = kc->i_passphrase;
+ *r_passphrase_size = kc->i_passphrase_size;
+
+ return kc->u.t.id;
+}
+
+static void unlock_method_init_internal(struct crypt_keyslot_context *kc)
+{
+ assert(kc);
+
+ kc->error = 0;
+ kc->i_passphrase = NULL;
+ kc->i_passphrase_size = 0;
+}
+
+void crypt_keyslot_unlock_by_key_init_internal(struct crypt_keyslot_context *kc,
+ const char *volume_key,
+ size_t volume_key_size)
+{
+ assert(kc);
+
+ kc->type = CRYPT_KC_TYPE_KEY;
+ kc->u.k.volume_key = volume_key;
+ kc->u.k.volume_key_size = volume_key_size;
+ kc->get_luks2_key = get_key_by_key;
+ kc->get_luks2_volume_key = get_volume_key_by_key;
+ kc->get_luks1_volume_key = get_volume_key_by_key;
+ kc->get_passphrase = NULL; /* keyslot key context does not provide passphrase */
+ unlock_method_init_internal(kc);
+}
+
+void crypt_keyslot_unlock_by_passphrase_init_internal(struct crypt_keyslot_context *kc,
+ const char *passphrase,
+ size_t passphrase_size)
+{
+ assert(kc);
+
+ kc->type = CRYPT_KC_TYPE_PASSPHRASE;
+ kc->u.p.passphrase = passphrase;
+ kc->u.p.passphrase_size = passphrase_size;
+ kc->get_luks2_key = get_luks2_key_by_passphrase;
+ kc->get_luks2_volume_key = get_luks2_volume_key_by_passphrase;
+ kc->get_luks1_volume_key = get_luks1_volume_key_by_passphrase;
+ kc->get_passphrase = get_passphrase_by_passphrase;
+ unlock_method_init_internal(kc);
+}
+
+void crypt_keyslot_unlock_by_keyfile_init_internal(struct crypt_keyslot_context *kc,
+ const char *keyfile,
+ size_t keyfile_size,
+ uint64_t keyfile_offset)
+{
+ assert(kc);
+
+ kc->type = CRYPT_KC_TYPE_KEYFILE;
+ kc->u.kf.keyfile = keyfile;
+ kc->u.kf.keyfile_size = keyfile_size;
+ kc->u.kf.keyfile_offset = keyfile_offset;
+ kc->get_luks2_key = get_luks2_key_by_keyfile;
+ kc->get_luks2_volume_key = get_luks2_volume_key_by_keyfile;
+ kc->get_luks1_volume_key = get_luks1_volume_key_by_keyfile;
+ kc->get_passphrase = get_passphrase_by_keyfile;
+ unlock_method_init_internal(kc);
+}
+
+void crypt_keyslot_unlock_by_token_init_internal(struct crypt_keyslot_context *kc,
+ int token,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ void *usrptr)
+{
+ assert(kc);
+
+ kc->type = CRYPT_KC_TYPE_TOKEN;
+ kc->u.t.id = token;
+ kc->u.t.type = type;
+ kc->u.t.pin = pin;
+ kc->u.t.pin_size = pin_size;
+ kc->u.t.usrptr = usrptr;
+ kc->get_luks2_key = get_luks2_key_by_token;
+ kc->get_luks2_volume_key = get_luks2_volume_key_by_token;
+ kc->get_luks1_volume_key = NULL; /* LUKS1 is not supported */
+ kc->get_passphrase = get_passphrase_by_token;
+ unlock_method_init_internal(kc);
+}
+
+void crypt_keyslot_context_destroy_internal(struct crypt_keyslot_context *kc)
+{
+ if (!kc)
+ return;
+
+ crypt_safe_free(kc->i_passphrase);
+ kc->i_passphrase = NULL;
+ kc->i_passphrase_size = 0;
+}
+
+void crypt_keyslot_context_free(struct crypt_keyslot_context *kc)
+{
+ crypt_keyslot_context_destroy_internal(kc);
+ free(kc);
+}
+
+int crypt_keyslot_context_init_by_passphrase(struct crypt_device *cd,
+ const char *passphrase,
+ size_t passphrase_size,
+ struct crypt_keyslot_context **kc)
+{
+ struct crypt_keyslot_context *tmp;
+
+ if (!kc || !passphrase)
+ return -EINVAL;
+
+ tmp = malloc(sizeof(*tmp));
+ if (!tmp)
+ return -ENOMEM;
+
+ crypt_keyslot_unlock_by_passphrase_init_internal(tmp, passphrase, passphrase_size);
+
+ *kc = tmp;
+
+ return 0;
+}
+
+int crypt_keyslot_context_init_by_keyfile(struct crypt_device *cd,
+ const char *keyfile,
+ size_t keyfile_size,
+ uint64_t keyfile_offset,
+ struct crypt_keyslot_context **kc)
+{
+ struct crypt_keyslot_context *tmp;
+
+ if (!kc || !keyfile)
+ return -EINVAL;
+
+ tmp = malloc(sizeof(*tmp));
+ if (!tmp)
+ return -ENOMEM;
+
+ crypt_keyslot_unlock_by_keyfile_init_internal(tmp, keyfile, keyfile_size, keyfile_offset);
+
+ *kc = tmp;
+
+ return 0;
+}
+
+int crypt_keyslot_context_init_by_token(struct crypt_device *cd,
+ int token,
+ const char *type,
+ const char *pin, size_t pin_size,
+ void *usrptr,
+ struct crypt_keyslot_context **kc)
+{
+ struct crypt_keyslot_context *tmp;
+
+ if (!kc || (token < 0 && token != CRYPT_ANY_TOKEN))
+ return -EINVAL;
+
+ tmp = malloc(sizeof(*tmp));
+ if (!tmp)
+ return -ENOMEM;
+
+ crypt_keyslot_unlock_by_token_init_internal(tmp, token, type, pin, pin_size, usrptr);
+
+ *kc = tmp;
+
+ return 0;
+}
+
+int crypt_keyslot_context_init_by_volume_key(struct crypt_device *cd,
+ const char *volume_key,
+ size_t volume_key_size,
+ struct crypt_keyslot_context **kc)
+{
+ struct crypt_keyslot_context *tmp;
+
+ if (!kc)
+ return -EINVAL;
+
+ tmp = malloc(sizeof(*tmp));
+ if (!tmp)
+ return -ENOMEM;
+
+ crypt_keyslot_unlock_by_key_init_internal(tmp, volume_key, volume_key_size);
+
+ *kc = tmp;
+
+ return 0;
+}
+
+int crypt_keyslot_context_get_error(struct crypt_keyslot_context *kc)
+{
+ return kc ? kc->error : -EINVAL;
+}
+
+int crypt_keyslot_context_set_pin(struct crypt_device *cd,
+ const char *pin, size_t pin_size,
+ struct crypt_keyslot_context *kc)
+{
+ if (!kc || kc->type != CRYPT_KC_TYPE_TOKEN)
+ return -EINVAL;
+
+ kc->u.t.pin = pin;
+ kc->u.t.pin_size = pin_size;
+ kc->error = 0;
+
+ return 0;
+}
+
+int crypt_keyslot_context_get_type(const struct crypt_keyslot_context *kc)
+{
+ return kc ? kc->type : -EINVAL;
+}
+
+const char *keyslot_context_type_string(const struct crypt_keyslot_context *kc)
+{
+ assert(kc);
+
+ switch (kc->type) {
+ case CRYPT_KC_TYPE_PASSPHRASE:
+ return "passphrase";
+ case CRYPT_KC_TYPE_KEYFILE:
+ return "keyfile";
+ case CRYPT_KC_TYPE_TOKEN:
+ return "token";
+ case CRYPT_KC_TYPE_KEY:
+ return "key";
+ default:
+ return "<unknown>";
+ }
+}
diff --git a/lib/keyslot_context.h b/lib/keyslot_context.h
new file mode 100644
index 0000000..7ca7428
--- /dev/null
+++ b/lib/keyslot_context.h
@@ -0,0 +1,111 @@
+/*
+ * LUKS - Linux Unified Key Setup, keyslot unlock helpers
+ *
+ * Copyright (C) 2022-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2022-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef KEYSLOT_CONTEXT_H
+#define KEYSLOT_CONTEXT_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "internal.h"
+
+typedef int (*keyslot_context_get_key) (
+ struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ int keyslot,
+ int segment,
+ struct volume_key **r_vk);
+
+typedef int (*keyslot_context_get_volume_key) (
+ struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ int keyslot,
+ struct volume_key **r_vk);
+
+typedef int (*keyslot_context_get_passphrase) (
+ struct crypt_device *cd,
+ struct crypt_keyslot_context *kc,
+ const char **r_passphrase,
+ size_t *r_passphrase_size);
+
+/* crypt_keyslot_context */
+struct crypt_keyslot_context {
+ int type;
+
+ union {
+ struct {
+ const char *passphrase;
+ size_t passphrase_size;
+ } p;
+ struct {
+ const char *keyfile;
+ uint64_t keyfile_offset;
+ size_t keyfile_size;
+ } kf;
+ struct {
+ int id;
+ const char *type;
+ const char *pin;
+ size_t pin_size;
+ void *usrptr;
+ } t;
+ struct {
+ const char *volume_key;
+ size_t volume_key_size;
+ } k;
+ } u;
+
+ int error;
+
+ char *i_passphrase;
+ size_t i_passphrase_size;
+
+ keyslot_context_get_key get_luks2_key;
+ keyslot_context_get_volume_key get_luks1_volume_key;
+ keyslot_context_get_volume_key get_luks2_volume_key;
+ keyslot_context_get_passphrase get_passphrase;
+};
+
+void crypt_keyslot_context_destroy_internal(struct crypt_keyslot_context *method);
+
+void crypt_keyslot_unlock_by_key_init_internal(struct crypt_keyslot_context *kc,
+ const char *volume_key,
+ size_t volume_key_size);
+
+void crypt_keyslot_unlock_by_passphrase_init_internal(struct crypt_keyslot_context *kc,
+ const char *passphrase,
+ size_t passphrase_size);
+
+void crypt_keyslot_unlock_by_keyfile_init_internal(struct crypt_keyslot_context *kc,
+ const char *keyfile,
+ size_t keyfile_size,
+ uint64_t keyfile_offset);
+
+void crypt_keyslot_unlock_by_token_init_internal(struct crypt_keyslot_context *kc,
+ int token,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ void *usrptr);
+
+const char *keyslot_context_type_string(const struct crypt_keyslot_context *kc);
+
+#endif /* KEYSLOT_CONTEXT_H */
diff --git a/lib/libcryptsetup.h b/lib/libcryptsetup.h
new file mode 100644
index 0000000..e899829
--- /dev/null
+++ b/lib/libcryptsetup.h
@@ -0,0 +1,2881 @@
+/*
+ * libcryptsetup - cryptsetup library
+ *
+ * Copyright (C) 2004 Jana Saout <jana@saout.de>
+ * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file libcryptsetup.h
+ * @brief Public cryptsetup API
+ *
+ * For more verbose examples of LUKS related use cases,
+ * please read @ref index "examples".
+ */
+
+#ifndef _LIBCRYPTSETUP_H
+#define _LIBCRYPTSETUP_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+#include <stdint.h>
+
+/**
+ * @defgroup crypt-init Cryptsetup device context initialization
+ * Set of functions for creating and destroying @e crypt_device context
+ * @addtogroup crypt-init
+ * @{
+ */
+
+struct crypt_device; /* crypt device handle */
+struct crypt_keyslot_context;
+
+/**
+ * Initialize crypt device handle and check if the provided device exists.
+ *
+ * @param cd Returns pointer to crypt device handle
+ * @param device Path to the backing device.
+ * If @e device is not a block device but a path to some file,
+ * the function will try to create a loopdevice and attach
+ * the file to the loopdevice with AUTOCLEAR flag set.
+ * If @e device is @e NULL function it will initialize dm backend only.
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ * @note Note that logging is not initialized here, possible messages use
+ * default log function.
+ */
+int crypt_init(struct crypt_device **cd, const char *device);
+
+/**
+ * Initialize crypt device handle with optional data device and check
+ * if devices exist.
+ *
+ * @param cd Returns pointer to crypt device handle
+ * @param device Path to the backing device or detached header.
+ * @param data_device Path to the data device or @e NULL.
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ * @note Note that logging is not initialized here, possible messages use
+ * default log function.
+ */
+int crypt_init_data_device(struct crypt_device **cd,
+ const char *device,
+ const char *data_device);
+
+/**
+ * Initialize crypt device handle from provided active device name,
+ * and, optionally, from separate metadata (header) device
+ * and check if provided device exists.
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ * @param cd returns crypt device handle for active device
+ * @param name name of active crypt device
+ * @param header_device optional device containing on-disk header
+ * (@e NULL if it the same as underlying device on there is no on-disk header)
+ *
+ * @post In case @e device points to active LUKS device but header load fails,
+ * context device type is set to @e NULL and @e 0 is returned as if it were successful.
+ * Context with @e NULL device type can only be deactivated by crypt_deactivate
+ *
+ * @note @link crypt_init_by_name @endlink is equivalent to calling
+ * crypt_init_by_name_and_header(cd, name, NULL);
+ */
+int crypt_init_by_name_and_header(struct crypt_device **cd,
+ const char *name,
+ const char *header_device);
+
+/**
+ * This is equivalent to call
+ * @ref crypt_init_by_name_and_header "crypt_init_by_name_and_header(cd, name, NULL)"
+ *
+ * @sa crypt_init_by_name_and_header
+ */
+int crypt_init_by_name(struct crypt_device **cd, const char *name);
+
+/**
+ * Release crypt device context and used memory.
+ *
+ * @param cd crypt device handle
+ */
+void crypt_free(struct crypt_device *cd);
+
+/**
+ * Set confirmation callback (yes/no).
+ *
+ * If code need confirmation (like resetting uuid or restoring LUKS header from file)
+ * this function is called. If not defined, everything is confirmed.
+ *
+ * Callback function @e confirm should return @e 0 if operation is declined,
+ * other values mean accepted.
+ *
+ * @param cd crypt device handle
+ * @param confirm user defined confirm callback reference; use
+ * @p msg for message for user to confirm and
+ * @p usrptr for identification in callback
+ * @param usrptr provided identification in callback
+ *
+ * @note Current version of cryptsetup API requires confirmation for UUID change and
+ * LUKS header restore only.
+ */
+void crypt_set_confirm_callback(struct crypt_device *cd,
+ int (*confirm)(const char *msg, void *usrptr),
+ void *usrptr);
+
+/**
+ * Set data device
+ * For LUKS it is encrypted data device when LUKS header is separated.
+ * For VERITY it is data device when hash device is separated.
+ *
+ * @param cd crypt device handle
+ * @param device path to device
+ *
+ * @returns 0 on success or negative errno value otherwise.
+ */
+int crypt_set_data_device(struct crypt_device *cd, const char *device);
+
+/**
+ * Set data device offset in 512-byte sectors.
+ * Used for LUKS.
+ * This function is replacement for data alignment fields in LUKS param struct.
+ * If set to 0 (default), old behaviour is preserved.
+ * This value is reset on @link crypt_load @endlink.
+ *
+ * @param cd crypt device handle
+ * @param data_offset data offset in bytes
+ *
+ * @returns 0 on success or negative errno value otherwise.
+ *
+ * @note Data offset must be aligned to multiple of 8 (alignment to 4096-byte sectors)
+ * and must be big enough to accommodate the whole LUKS header with all keyslots.
+ * @note Data offset is enforced by this function, device topology
+ * information is no longer used after calling this function.
+ */
+int crypt_set_data_offset(struct crypt_device *cd, uint64_t data_offset);
+
+/** @} */
+
+/**
+ * @defgroup crypt-log Cryptsetup logging
+ * Set of functions and defines used in cryptsetup for
+ * logging purposes
+ * @addtogroup crypt-log
+ * @{
+ */
+
+/** normal log level */
+#define CRYPT_LOG_NORMAL 0
+/** error log level */
+#define CRYPT_LOG_ERROR 1
+/** verbose log level */
+#define CRYPT_LOG_VERBOSE 2
+/** debug log level - always on stdout */
+#define CRYPT_LOG_DEBUG -1
+/** debug log level - additional JSON output (for LUKS2) */
+#define CRYPT_LOG_DEBUG_JSON -2
+
+/**
+ * Set log function.
+ *
+ * @param cd crypt device handle (can be @e NULL to set default log function)
+ * @param log user defined log function reference; use
+ * @p level for log level,
+ * @p msg for message, and
+ * @p usrptr for identification in callback
+ * @param usrptr provided identification in callback
+ */
+void crypt_set_log_callback(struct crypt_device *cd,
+ void (*log)(int level, const char *msg, void *usrptr),
+ void *usrptr);
+
+/**
+ * Defines log function or use the default one otherwise.
+ *
+ * @see crypt_set_log_callback
+ *
+ * @param cd crypt device handle
+ * @param level log level
+ * @param msg log message
+ */
+void crypt_log(struct crypt_device *cd, int level, const char *msg);
+
+/**
+ * Log function with variable arguments.
+ *
+ * @param cd crypt device handle
+ * @param level log level
+ * @param format formatted log message
+ */
+void crypt_logf(struct crypt_device *cd, int level, const char *format, ...);
+/** @} */
+
+/**
+ * @defgroup crypt-set Cryptsetup settings (RNG, PBKDF, locking)
+ * @addtogroup crypt-set
+ * @{
+ */
+
+/** CRYPT_RNG_URANDOM - use /dev/urandom */
+#define CRYPT_RNG_URANDOM 0
+/** CRYPT_RNG_RANDOM - use /dev/random (waits if no entropy in system) */
+#define CRYPT_RNG_RANDOM 1
+
+/**
+ * Set which RNG (random number generator) is used for generating long term key
+ *
+ * @param cd crypt device handle
+ * @param rng_type kernel random number generator to use
+ *
+ */
+void crypt_set_rng_type(struct crypt_device *cd, int rng_type);
+
+/**
+ * Get which RNG (random number generator) is used for generating long term key.
+ *
+ * @param cd crypt device handle
+ * @return RNG type on success or negative errno value otherwise.
+ *
+ */
+int crypt_get_rng_type(struct crypt_device *cd);
+
+/**
+ * PBKDF parameters.
+ */
+struct crypt_pbkdf_type {
+ const char *type; /**< PBKDF algorithm */
+ const char *hash; /**< Hash algorithm */
+ uint32_t time_ms; /**< Requested time cost [milliseconds] */
+ uint32_t iterations; /**< Iterations, 0 or benchmarked value. */
+ uint32_t max_memory_kb; /**< Requested or benchmarked memory cost [kilobytes] */
+ uint32_t parallel_threads;/**< Requested parallel cost [threads] */
+ uint32_t flags; /**< CRYPT_PBKDF* flags */
+};
+
+/** Iteration time set by crypt_set_iteration_time(), for compatibility only. */
+#define CRYPT_PBKDF_ITER_TIME_SET (UINT32_C(1) << 0)
+/** Never run benchmarks, use pre-set value or defaults. */
+#define CRYPT_PBKDF_NO_BENCHMARK (UINT32_C(1) << 1)
+
+/** PBKDF2 according to RFC2898, LUKS1 legacy */
+#define CRYPT_KDF_PBKDF2 "pbkdf2"
+/** Argon2i according to RFC */
+#define CRYPT_KDF_ARGON2I "argon2i"
+/** Argon2id according to RFC */
+#define CRYPT_KDF_ARGON2ID "argon2id"
+
+/**
+ * Set default PBKDF (Password-Based Key Derivation Algorithm) for next keyslot
+ * about to get created with any crypt_keyslot_add_*() call.
+ *
+ * @param cd crypt device handle
+ * @param pbkdf PBKDF parameters
+ *
+ * @return 0 on success or negative errno value otherwise.
+ *
+ * @note For LUKS1, only PBKDF2 is supported, other settings will be rejected.
+ * @note For non-LUKS context types the call succeeds, but PBKDF is not used.
+ */
+int crypt_set_pbkdf_type(struct crypt_device *cd,
+ const struct crypt_pbkdf_type *pbkdf);
+
+/**
+ * Get PBKDF (Password-Based Key Derivation Algorithm) parameters.
+ *
+ * @param pbkdf_type type of PBKDF
+ *
+ * @return struct on success or NULL value otherwise.
+ *
+ */
+const struct crypt_pbkdf_type *crypt_get_pbkdf_type_params(const char *pbkdf_type);
+
+/**
+ * Get default PBKDF (Password-Based Key Derivation Algorithm) settings for keyslots.
+ * Works only with LUKS device handles (both versions).
+ *
+ * @param type type of device (see @link crypt-type @endlink)
+ *
+ * @return struct on success or NULL value otherwise.
+ *
+ */
+const struct crypt_pbkdf_type *crypt_get_pbkdf_default(const char *type);
+
+/**
+ * Get current PBKDF (Password-Based Key Derivation Algorithm) settings for keyslots.
+ * Works only with LUKS device handles (both versions).
+ *
+ * @param cd crypt device handle
+ *
+ * @return struct on success or NULL value otherwise.
+ *
+ */
+const struct crypt_pbkdf_type *crypt_get_pbkdf_type(struct crypt_device *cd);
+
+/**
+ * Set how long should cryptsetup iterate in PBKDF2 function.
+ * Default value heads towards the iterations which takes around 1 second.
+ * \b Deprecated, only for backward compatibility.
+ * Use @link crypt_set_pbkdf_type @endlink.
+ *
+ * @param cd crypt device handle
+ * @param iteration_time_ms the time in ms
+ *
+ * @note If the time value is not acceptable for active PBKDF, value is quietly ignored.
+ */
+void crypt_set_iteration_time(struct crypt_device *cd, uint64_t iteration_time_ms);
+
+/**
+ * Helper to lock/unlock memory to avoid swap sensitive data to disk.
+ * \b Deprecated, only for backward compatibility. Memory with keys are locked automatically.
+ *
+ * @param cd crypt device handle, can be @e NULL
+ * @param lock 0 to unlock otherwise lock memory
+ *
+ * @returns Value indicating whether the memory is locked (function can be called multiple times).
+ *
+ * @note Only root can do this.
+ * @note It locks/unlocks all process memory, not only crypt context.
+ */
+int crypt_memory_lock(struct crypt_device *cd, int lock) __attribute__((deprecated));
+
+/**
+ * Set global lock protection for on-disk metadata (file-based locking).
+ *
+ * @param cd crypt device handle, can be @e NULL
+ * @param enable 0 to disable locking otherwise enable it (default)
+ *
+ * @returns @e 0 on success or negative errno value otherwise.
+ *
+ * @note Locking applied only for some metadata formats (LUKS2).
+ * @note The switch is global on the library level.
+ * In current version locking can be only switched off and cannot be switched on later.
+ */
+int crypt_metadata_locking(struct crypt_device *cd, int enable);
+
+/**
+ * Set metadata header area sizes. This applies only to LUKS2.
+ * These values limit amount of metadata anf number of supportable keyslots.
+ *
+ * @param cd crypt device handle, can be @e NULL
+ * @param metadata_size size in bytes of JSON area + 4k binary header
+ * @param keyslots_size size in bytes of binary keyslots area
+ *
+ * @returns @e 0 on success or negative errno value otherwise.
+ *
+ * @note The metadata area is stored twice and both copies contain 4k binary header.
+ * Only 16,32,64,128,256,512,1024,2048 and 4096 kB value is allowed (see LUKS2 specification).
+ * @note Keyslots area size must be multiple of 4k with maximum 128MB.
+ */
+int crypt_set_metadata_size(struct crypt_device *cd,
+ uint64_t metadata_size,
+ uint64_t keyslots_size);
+
+/**
+ * Get metadata header area sizes. This applies only to LUKS2.
+ * These values limit amount of metadata anf number of supportable keyslots.
+ *
+ * @param cd crypt device handle
+ * @param metadata_size size in bytes of JSON area + 4k binary header
+ * @param keyslots_size size in bytes of binary keyslots area
+ *
+ * @returns @e 0 on success or negative errno value otherwise.
+ */
+int crypt_get_metadata_size(struct crypt_device *cd,
+ uint64_t *metadata_size,
+ uint64_t *keyslots_size);
+
+/** @} */
+
+/**
+ * @defgroup crypt-type Cryptsetup on-disk format types
+ * Set of functions, \#defines and structs related
+ * to on-disk format types
+ * @addtogroup crypt-type
+ * @{
+ */
+
+/** plain crypt device, no on-disk header */
+#define CRYPT_PLAIN "PLAIN"
+/** LUKS version 1 header on-disk */
+#define CRYPT_LUKS1 "LUKS1"
+/** LUKS version 2 header on-disk */
+#define CRYPT_LUKS2 "LUKS2"
+/** loop-AES compatibility mode */
+#define CRYPT_LOOPAES "LOOPAES"
+/** dm-verity mode */
+#define CRYPT_VERITY "VERITY"
+/** TCRYPT (TrueCrypt-compatible and VeraCrypt-compatible) mode */
+#define CRYPT_TCRYPT "TCRYPT"
+/** INTEGRITY dm-integrity device */
+#define CRYPT_INTEGRITY "INTEGRITY"
+/** BITLK (BitLocker-compatible mode) */
+#define CRYPT_BITLK "BITLK"
+/** FVAULT2 (FileVault2-compatible mode) */
+#define CRYPT_FVAULT2 "FVAULT2"
+
+/** LUKS any version */
+#define CRYPT_LUKS NULL
+
+/**
+ * Get device type
+ *
+ * @param cd crypt device handle
+ * @return string according to device type or @e NULL if not known.
+ */
+const char *crypt_get_type(struct crypt_device *cd);
+
+/**
+ * Get device default LUKS type
+ *
+ * @return string according to device type (CRYPT_LUKS1 or CRYPT_LUKS2).
+ */
+const char *crypt_get_default_type(void);
+
+/**
+ *
+ * Structure used as parameter for PLAIN device type.
+ *
+ * @see crypt_format
+ */
+struct crypt_params_plain {
+ const char *hash; /**< password hash function */
+ uint64_t offset; /**< offset in sectors */
+ uint64_t skip; /**< IV offset / initialization sector */
+ uint64_t size; /**< size of mapped device or @e 0 for autodetection */
+ uint32_t sector_size; /**< sector size in bytes (@e 0 means 512 for compatibility) */
+};
+
+/**
+ * Structure used as parameter for LUKS device type.
+ *
+ * @see crypt_format, crypt_load
+ *
+ * @note during crypt_format @e data_device attribute determines
+ * if the LUKS header is separated from encrypted payload device
+ *
+ */
+struct crypt_params_luks1 {
+ const char *hash; /**< hash used in LUKS header */
+ size_t data_alignment; /**< data area alignment in 512B sectors, data offset is multiple of this */
+ const char *data_device; /**< detached encrypted data device or @e NULL */
+};
+
+/**
+ *
+ * Structure used as parameter for loop-AES device type.
+ *
+ * @see crypt_format
+ *
+ */
+struct crypt_params_loopaes {
+ const char *hash; /**< key hash function */
+ uint64_t offset; /**< offset in sectors */
+ uint64_t skip; /**< IV offset / initialization sector */
+};
+
+/**
+ *
+ * Structure used as parameter for dm-verity device type.
+ *
+ * @see crypt_format, crypt_load
+ *
+ */
+struct crypt_params_verity {
+ const char *hash_name; /**< hash function */
+ const char *data_device; /**< data_device (CRYPT_VERITY_CREATE_HASH) */
+ const char *hash_device; /**< hash_device (output only) */
+ const char *fec_device; /**< fec_device (output only) */
+ const char *salt; /**< salt */
+ uint32_t salt_size; /**< salt size (in bytes) */
+ uint32_t hash_type; /**< in-kernel hashing type */
+ uint32_t data_block_size; /**< data block size (in bytes) */
+ uint32_t hash_block_size; /**< hash block size (in bytes) */
+ uint64_t data_size; /**< data area size (in data blocks) */
+ uint64_t hash_area_offset; /**< hash/header offset (in bytes) */
+ uint64_t fec_area_offset; /**< FEC/header offset (in bytes) */
+ uint32_t fec_roots; /**< Reed-Solomon FEC roots */
+ uint32_t flags; /**< CRYPT_VERITY* flags */
+};
+
+/** No on-disk header (only hashes) */
+#define CRYPT_VERITY_NO_HEADER (UINT32_C(1) << 0)
+/** Verity hash in userspace before activation */
+#define CRYPT_VERITY_CHECK_HASH (UINT32_C(1) << 1)
+/** Create hash - format hash device */
+#define CRYPT_VERITY_CREATE_HASH (UINT32_C(1) << 2)
+/** Root hash signature required for activation */
+#define CRYPT_VERITY_ROOT_HASH_SIGNATURE (UINT32_C(1) << 3)
+
+/**
+ *
+ * Structure used as parameter for TCRYPT device type.
+ *
+ * @see crypt_load
+ *
+ */
+struct crypt_params_tcrypt {
+ const char *passphrase; /**< passphrase to unlock header (input only) */
+ size_t passphrase_size; /**< passphrase size (input only, max length is 64) */
+ const char **keyfiles; /**< keyfile paths to unlock header (input only) */
+ unsigned int keyfiles_count;/**< keyfiles count (input only) */
+ const char *hash_name; /**< hash function for PBKDF */
+ const char *cipher; /**< cipher chain c1[-c2[-c3]] */
+ const char *mode; /**< cipher block mode */
+ size_t key_size; /**< key size in bytes (the whole chain) */
+ uint32_t flags; /**< CRYPT_TCRYPT* flags */
+ uint32_t veracrypt_pim; /**< VeraCrypt Personal Iteration Multiplier */
+};
+
+/** Include legacy modes when scanning for header */
+#define CRYPT_TCRYPT_LEGACY_MODES (UINT32_C(1) << 0)
+/** Try to load hidden header (describing hidden device) */
+#define CRYPT_TCRYPT_HIDDEN_HEADER (UINT32_C(1) << 1)
+/** Try to load backup header */
+#define CRYPT_TCRYPT_BACKUP_HEADER (UINT32_C(1) << 2)
+/** Device contains encrypted system (with boot loader) */
+#define CRYPT_TCRYPT_SYSTEM_HEADER (UINT32_C(1) << 3)
+/** Include VeraCrypt modes when scanning for header,
+ * all other TCRYPT flags applies as well.
+ * VeraCrypt device is reported as TCRYPT type.
+ */
+#define CRYPT_TCRYPT_VERA_MODES (UINT32_C(1) << 4)
+
+/**
+ *
+ * Structure used as parameter for dm-integrity device type.
+ *
+ * @see crypt_format, crypt_load
+ *
+ * @note In bitmap tracking mode, the journal is implicitly disabled.
+ * As an ugly workaround for compatibility, journal_watermark is overloaded
+ * to mean 512-bytes sectors-per-bit and journal_commit_time means bitmap flush time.
+ * All other journal parameters are not applied in the bitmap mode.
+ */
+struct crypt_params_integrity {
+ uint64_t journal_size; /**< size of journal in bytes */
+ unsigned int journal_watermark; /**< journal flush watermark in percents; in bitmap mode sectors-per-bit */
+ unsigned int journal_commit_time; /**< journal commit time (or bitmap flush time) in ms */
+ uint32_t interleave_sectors; /**< number of interleave sectors (power of two) */
+ uint32_t tag_size; /**< tag size per-sector in bytes */
+ uint32_t sector_size; /**< sector size in bytes */
+ uint32_t buffer_sectors; /**< number of sectors in one buffer */
+ const char *integrity; /**< integrity algorithm, NULL for LUKS2 */
+ uint32_t integrity_key_size; /**< integrity key size in bytes, info only, 0 for LUKS2 */
+
+ const char *journal_integrity; /**< journal integrity algorithm */
+ const char *journal_integrity_key; /**< journal integrity key, only for crypt_load */
+ uint32_t journal_integrity_key_size; /**< journal integrity key size in bytes, only for crypt_load */
+
+ const char *journal_crypt; /**< journal encryption algorithm */
+ const char *journal_crypt_key; /**< journal crypt key, only for crypt_load */
+ uint32_t journal_crypt_key_size; /**< journal crypt key size in bytes, only for crypt_load */
+};
+
+/**
+ * Structure used as parameter for LUKS2 device type.
+ *
+ * @see crypt_format, crypt_load
+ *
+ * @note during crypt_format @e data_device attribute determines
+ * if the LUKS2 header is separated from encrypted payload device
+ *
+ */
+struct crypt_params_luks2 {
+ const struct crypt_pbkdf_type *pbkdf; /**< PBKDF (and hash) parameters or @e NULL*/
+ const char *integrity; /**< integrity algorithm or @e NULL */
+ const struct crypt_params_integrity *integrity_params; /**< Data integrity parameters or @e NULL*/
+ size_t data_alignment; /**< data area alignment in 512B sectors, data offset is multiple of this */
+ const char *data_device; /**< detached encrypted data device or @e NULL */
+ uint32_t sector_size; /**< encryption sector size, 0 triggers auto-detection for optimal encryption sector size */
+ const char *label; /**< header label or @e NULL*/
+ const char *subsystem; /**< header subsystem label or @e NULL*/
+};
+/** @} */
+
+/**
+ * @defgroup crypt-actions Cryptsetup device context actions
+ * Set of functions for formatting and manipulating with specific crypt_type
+ * @addtogroup crypt-actions
+ * @{
+ */
+
+/**
+ * Create (format) new crypt device (and possible header on-disk) but do not activate it.
+ *
+ * @pre @e cd contains initialized and not formatted device context (device type must @b not be set)
+ *
+ * @param cd crypt device handle
+ * @param type type of device (optional params struct must be of this type)
+ * @param cipher (e.g. "aes")
+ * @param cipher_mode including IV specification (e.g. "xts-plain")
+ * @param uuid requested UUID or @e NULL if it should be generated
+ * @param volume_key pre-generated volume key or @e NULL if it should be generated (only for LUKS)
+ * @param volume_key_size size of volume key in bytes.
+ * @param params crypt type specific parameters (see @link crypt-type @endlink)
+ *
+ * @returns @e 0 on success or negative errno value otherwise.
+ *
+ * @note Note that crypt_format does not create LUKS keyslot (any version). To create keyslot
+ * call any crypt_keyslot_add_* function.
+ * @note For VERITY @link crypt-type @endlink, only uuid parameter is used, other parameters
+ * are ignored and verity specific attributes are set through mandatory params option.
+ */
+int crypt_format(struct crypt_device *cd,
+ const char *type,
+ const char *cipher,
+ const char *cipher_mode,
+ const char *uuid,
+ const char *volume_key,
+ size_t volume_key_size,
+ void *params);
+
+/**
+ * Set format compatibility flags.
+ *
+ * @param cd crypt device handle
+ * @param flags CRYPT_COMPATIBILITY_* flags
+ */
+void crypt_set_compatibility(struct crypt_device *cd, uint32_t flags);
+
+/**
+ * Get compatibility flags.
+ *
+ * @param cd crypt device handle
+ *
+ * @returns compatibility flags
+ */
+uint32_t crypt_get_compatibility(struct crypt_device *cd);
+
+/** dm-integrity device uses less effective (legacy) padding (old kernels) */
+#define CRYPT_COMPAT_LEGACY_INTEGRITY_PADDING (UINT32_C(1) << 0)
+/** dm-integrity device does not protect superblock with HMAC (old kernels) */
+#define CRYPT_COMPAT_LEGACY_INTEGRITY_HMAC (UINT32_C(1) << 1)
+/** dm-integrity allow recalculating of volumes with HMAC keys (old kernels) */
+#define CRYPT_COMPAT_LEGACY_INTEGRITY_RECALC (UINT32_C(1) << 2)
+
+/**
+ * Convert to new type for already existing device.
+ *
+ * @param cd crypt device handle
+ * @param type type of device (optional params struct must be of this type)
+ * @param params crypt type specific parameters (see @link crypt-type @endlink)
+ *
+ * @returns 0 on success or negative errno value otherwise.
+ *
+ * @note Currently, only LUKS1->LUKS2 and LUKS2->LUKS1 conversions are supported.
+ * Not all LUKS2 devices may be converted back to LUKS1. To make such a conversion
+ * possible all active LUKS2 keyslots must be in LUKS1 compatible mode (i.e. pbkdf
+ * type must be PBKDF2) and device cannot be formatted with any authenticated
+ * encryption mode.
+ *
+ * @note Device must be offline for conversion. UUID change is not possible for active
+ * devices.
+ */
+int crypt_convert(struct crypt_device *cd,
+ const char *type,
+ void *params);
+
+/**
+ * Set new UUID for already existing device.
+ *
+ * @param cd crypt device handle
+ * @param uuid requested UUID or @e NULL if it should be generated
+ *
+ * @returns 0 on success or negative errno value otherwise.
+ *
+ * @note Currently, only LUKS device type are supported
+ */
+int crypt_set_uuid(struct crypt_device *cd,
+ const char *uuid);
+
+/**
+ * Set new labels (label and subsystem) for already existing device.
+ *
+ * @param cd crypt device handle
+ * @param label requested label or @e NULL
+ * @param subsystem requested subsystem label or @e NULL
+ *
+ * @returns 0 on success or negative errno value otherwise.
+ *
+ * @note Currently, only LUKS2 device type is supported
+ */
+int crypt_set_label(struct crypt_device *cd,
+ const char *label,
+ const char *subsystem);
+
+/**
+ * Get the label of an existing device.
+ *
+ * @param cd crypt device handle
+ *
+ * @return label, or @e NULL otherwise
+ */
+const char *crypt_get_label(struct crypt_device *cd);
+
+/**
+ * Get the subsystem of an existing device.
+ *
+ * @param cd crypt device handle
+ *
+ * @return subsystem, or @e NULL otherwise
+ */
+const char *crypt_get_subsystem(struct crypt_device *cd);
+
+/**
+ * Enable or disable loading of volume keys via kernel keyring. When set to
+ * 'enabled' library loads key in kernel keyring first and pass the key
+ * description to dm-crypt instead of binary key copy. If set to 'disabled'
+ * library fallbacks to old method of loading volume key directly in
+ * dm-crypt target.
+ *
+ * @param cd crypt device handle, can be @e NULL
+ * @param enable 0 to disable loading of volume keys via kernel keyring
+ * (classical method) otherwise enable it (default)
+ *
+ * @returns @e 0 on success or negative errno value otherwise.
+ *
+ * @note Currently loading of volume keys via kernel keyring is supported
+ * (and enabled by default) only for LUKS2 devices.
+ * @note The switch is global on the library level.
+ */
+int crypt_volume_key_keyring(struct crypt_device *cd, int enable);
+
+/**
+ * Load crypt device parameters from on-disk header.
+ *
+ * @param cd crypt device handle
+ * @param requested_type @link crypt-type @endlink or @e NULL for all known
+ * @param params crypt type specific parameters (see @link crypt-type @endlink)
+ *
+ * @returns 0 on success or negative errno value otherwise.
+ *
+ * @post In case LUKS header is read successfully but payload device is too small
+ * error is returned and device type in context is set to @e NULL
+ *
+ * @note Note that load works only for device types with on-disk metadata.
+ * @note Function does not print visible error message if metadata is not present.
+ *
+ */
+int crypt_load(struct crypt_device *cd,
+ const char *requested_type,
+ void *params);
+
+/**
+ * Try to repair crypt device LUKS on-disk header if invalid.
+ *
+ * @param cd crypt device handle
+ * @param requested_type @link crypt-type @endlink or @e NULL for all known
+ * @param params crypt type specific parameters (see @link crypt-type @endlink)
+ *
+ * @returns 0 on success or negative errno value otherwise.
+ *
+ * @note For LUKS2 device crypt_repair bypass blkid checks and
+ * perform auto-recovery even though there're third party device
+ * signatures found by blkid probes. Currently the crypt_repair on LUKS2
+ * works only if exactly one header checksum does not match or exactly
+ * one header is missing.
+ */
+int crypt_repair(struct crypt_device *cd,
+ const char *requested_type,
+ void *params);
+
+/**
+ * Resize crypt device.
+ *
+ * @param cd - crypt device handle
+ * @param name - name of device to resize
+ * @param new_size - new device size in sectors or @e 0 to use all of the underlying device size
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ * @note Most notably it returns -EPERM when device was activated with volume key
+ * in kernel keyring and current device handle (context) doesn't have verified key
+ * loaded in kernel. To load volume key for already active device use any of
+ * @link crypt_activate_by_passphrase @endlink, @link crypt_activate_by_keyfile @endlink,
+ * @link crypt_activate_by_keyfile_offset @endlink, @link crypt_activate_by_volume_key @endlink,
+ * @link crypt_activate_by_keyring @endlink or @link crypt_activate_by_token @endlink with flag
+ * @e CRYPT_ACTIVATE_KEYRING_KEY raised and @e name parameter set to @e NULL.
+ */
+int crypt_resize(struct crypt_device *cd,
+ const char *name,
+ uint64_t new_size);
+
+/**
+ * Suspend crypt device.
+ *
+ * @param cd crypt device handle, can be @e NULL
+ * @param name name of device to suspend
+ *
+ * @return 0 on success or negative errno value otherwise.
+ *
+ * @note Only LUKS device type is supported
+ *
+ */
+int crypt_suspend(struct crypt_device *cd,
+ const char *name);
+
+/**
+ * Resume crypt device using passphrase.
+ *
+ *
+ * @param cd crypt device handle
+ * @param name name of device to resume
+ * @param keyslot requested keyslot or CRYPT_ANY_SLOT
+ * @param passphrase passphrase used to unlock volume key
+ * @param passphrase_size size of @e passphrase (binary data)
+ *
+ * @return unlocked key slot number or negative errno otherwise.
+ *
+ * @note Only LUKS device type is supported
+ */
+int crypt_resume_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *passphrase,
+ size_t passphrase_size);
+
+/**
+ * Resume crypt device using key file.
+ *
+ * @param cd crypt device handle
+ * @param name name of device to resume
+ * @param keyslot requested keyslot or CRYPT_ANY_SLOT
+ * @param keyfile key file used to unlock volume key
+ * @param keyfile_size number of bytes to read from keyfile, 0 is unlimited
+ * @param keyfile_offset number of bytes to skip at start of keyfile
+ *
+ * @return unlocked key slot number or negative errno otherwise.
+ */
+int crypt_resume_by_keyfile_device_offset(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ uint64_t keyfile_offset);
+
+/**
+ * Backward compatible crypt_resume_by_keyfile_device_offset() (with size_t offset).
+ */
+int crypt_resume_by_keyfile_offset(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ size_t keyfile_offset);
+
+/**
+ * Backward compatible crypt_resume_by_keyfile_device_offset() (without offset).
+ */
+int crypt_resume_by_keyfile(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size);
+/**
+ * Resume crypt device using provided volume key.
+ *
+ * @param cd crypt device handle
+ * @param name name of device to resume
+ * @param volume_key provided volume key
+ * @param volume_key_size size of volume_key
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ */
+int crypt_resume_by_volume_key(struct crypt_device *cd,
+ const char *name,
+ const char *volume_key,
+ size_t volume_key_size);
+/**
+ * Resume crypt device using LUKS2 token.
+ *
+ * @param cd LUKS2 crypt device handle
+ * @param name name of device to resume
+ * @param type restrict type of token, if @e NULL all types are allowed
+ * @param pin passphrase (or PIN) to unlock token (may be binary data)
+ * @param pin_size size of @e pin
+ * @param usrptr provided identification in callback
+ *
+ * @return unlocked key slot number or negative errno otherwise.
+ *
+ * @note EPERM errno means token provided passphrase successfully, but
+ * passphrase did not unlock any keyslot associated with the token.
+ *
+ * @note ENOENT errno means no token (or subsequently assigned keyslot) was
+ * eligible to resume LUKS2 device.
+ *
+ * @note ENOANO errno means that token is PIN protected and was either missing
+ * (NULL) or wrong.
+ *
+ * @note Negative EAGAIN errno means token handler requires additional hardware
+ * not present in the system to unlock keyslot.
+ *
+ * @note with @param token set to CRYPT_ANY_TOKEN libcryptsetup runs best effort loop
+ * to resume device using any available token. It may happen that various token handlers
+ * return different error codes. At the end loop returns error codes in the following
+ * order (from the most significant to the least) any negative errno except those
+ * listed below, non negative token id (success), -ENOANO, -EAGAIN, -EPERM, -ENOENT.
+ */
+int crypt_resume_by_token_pin(struct crypt_device *cd,
+ const char *name,
+ const char *type,
+ int token,
+ const char *pin,
+ size_t pin_size,
+ void *usrptr);
+/** @} */
+
+/**
+ * @defgroup crypt-keyslot LUKS keyslots
+ * @addtogroup crypt-keyslot
+ * @{
+ */
+
+/** iterate through all keyslots and find first one that fits */
+#define CRYPT_ANY_SLOT -1
+
+/**
+ * Add key slot using provided passphrase.
+ *
+ * @pre @e cd contains initialized and formatted LUKS device context
+ *
+ * @param cd crypt device handle
+ * @param keyslot requested keyslot or @e CRYPT_ANY_SLOT
+ * @param passphrase passphrase used to unlock volume key
+ * @param passphrase_size size of passphrase (binary data)
+ * @param new_passphrase passphrase for new keyslot
+ * @param new_passphrase_size size of @e new_passphrase (binary data)
+ *
+ * @return allocated key slot number or negative errno otherwise.
+ */
+int crypt_keyslot_add_by_passphrase(struct crypt_device *cd,
+ int keyslot,
+ const char *passphrase,
+ size_t passphrase_size,
+ const char *new_passphrase,
+ size_t new_passphrase_size);
+
+/**
+ * Change defined key slot using provided passphrase.
+ *
+ * @pre @e cd contains initialized and formatted LUKS device context
+ *
+ * @param cd crypt device handle
+ * @param keyslot_old old keyslot or @e CRYPT_ANY_SLOT
+ * @param keyslot_new new keyslot (can be the same as old)
+ * @param passphrase passphrase used to unlock volume key
+ * @param passphrase_size size of passphrase (binary data)
+ * @param new_passphrase passphrase for new keyslot
+ * @param new_passphrase_size size of @e new_passphrase (binary data)
+ *
+ * @return allocated key slot number or negative errno otherwise.
+ */
+int crypt_keyslot_change_by_passphrase(struct crypt_device *cd,
+ int keyslot_old,
+ int keyslot_new,
+ const char *passphrase,
+ size_t passphrase_size,
+ const char *new_passphrase,
+ size_t new_passphrase_size);
+
+/**
+* Add key slot using provided key file path.
+ *
+ * @pre @e cd contains initialized and formatted LUKS device context
+ *
+ * @param cd crypt device handle
+ * @param keyslot requested keyslot or @e CRYPT_ANY_SLOT
+ * @param keyfile key file used to unlock volume key
+ * @param keyfile_size number of bytes to read from keyfile, @e 0 is unlimited
+ * @param keyfile_offset number of bytes to skip at start of keyfile
+ * @param new_keyfile keyfile for new keyslot
+ * @param new_keyfile_size number of bytes to read from @e new_keyfile, @e 0 is unlimited
+ * @param new_keyfile_offset number of bytes to skip at start of new_keyfile
+ *
+ * @return allocated key slot number or negative errno otherwise.
+ */
+int crypt_keyslot_add_by_keyfile_device_offset(struct crypt_device *cd,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ uint64_t keyfile_offset,
+ const char *new_keyfile,
+ size_t new_keyfile_size,
+ uint64_t new_keyfile_offset);
+
+/**
+ * Backward compatible crypt_keyslot_add_by_keyfile_device_offset() (with size_t offset).
+ */
+int crypt_keyslot_add_by_keyfile_offset(struct crypt_device *cd,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ size_t keyfile_offset,
+ const char *new_keyfile,
+ size_t new_keyfile_size,
+ size_t new_keyfile_offset);
+
+/**
+ * Backward compatible crypt_keyslot_add_by_keyfile_device_offset() (without offset).
+ */
+int crypt_keyslot_add_by_keyfile(struct crypt_device *cd,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ const char *new_keyfile,
+ size_t new_keyfile_size);
+
+/**
+ * Add key slot using provided volume key.
+ *
+ * @pre @e cd contains initialized and formatted LUKS device context
+ *
+ * @param cd crypt device handle
+ * @param keyslot requested keyslot or CRYPT_ANY_SLOT
+ * @param volume_key provided volume key or @e NULL if used after crypt_format
+ * @param volume_key_size size of volume_key
+ * @param passphrase passphrase for new keyslot
+ * @param passphrase_size size of passphrase
+ *
+ * @return allocated key slot number or negative errno otherwise.
+ */
+int crypt_keyslot_add_by_volume_key(struct crypt_device *cd,
+ int keyslot,
+ const char *volume_key,
+ size_t volume_key_size,
+ const char *passphrase,
+ size_t passphrase_size);
+
+/** create keyslot with volume key not associated with current dm-crypt segment */
+#define CRYPT_VOLUME_KEY_NO_SEGMENT (UINT32_C(1) << 0)
+
+/** create keyslot with new volume key and assign it to current dm-crypt segment */
+#define CRYPT_VOLUME_KEY_SET (UINT32_C(1) << 1)
+
+/** Assign key to first matching digest before creating new digest */
+#define CRYPT_VOLUME_KEY_DIGEST_REUSE (UINT32_C(1) << 2)
+
+/**
+ * Add key slot using provided key.
+ *
+ * @pre @e cd contains initialized and formatted LUKS2 device context
+ *
+ * @param cd crypt device handle
+ * @param keyslot requested keyslot or CRYPT_ANY_SLOT
+ * @param volume_key provided volume key or @e NULL (see note below)
+ * @param volume_key_size size of volume_key
+ * @param passphrase passphrase for new keyslot
+ * @param passphrase_size size of passphrase
+ * @param flags key flags to set
+ *
+ * @return allocated key slot number or negative errno otherwise.
+ *
+ * @note in case volume_key is @e NULL following first matching rule will apply:
+ * @li if cd is device handle used in crypt_format() by current process, the volume
+ * key generated (or passed) in crypt_format() will be stored in keyslot.
+ * @li if CRYPT_VOLUME_KEY_NO_SEGMENT flag is raised the new volume_key will be
+ * generated and stored in keyslot. The keyslot will become unbound (unusable to
+ * dm-crypt device activation).
+ * @li fails with -EINVAL otherwise
+ *
+ * @warning CRYPT_VOLUME_KEY_SET flag force updates volume key. It is @b not @b reencryption!
+ * By doing so you will most probably destroy your ciphertext data device. It's supposed
+ * to be used only in wrapped keys scheme for key refresh process where real (inner) volume
+ * key stays untouched. It may be involed on active @e keyslot which makes the (previously
+ * unbound) keyslot new regular keyslot.
+ */
+int crypt_keyslot_add_by_key(struct crypt_device *cd,
+ int keyslot,
+ const char *volume_key,
+ size_t volume_key_size,
+ const char *passphrase,
+ size_t passphrase_size,
+ uint32_t flags);
+
+/**
+ * @defgroup crypt-keyslot-context Crypt keyslot context
+ * @addtogroup crypt-keyslot-context
+ * @{
+ */
+
+/**
+ * Release crypt keyslot context and used memory.
+ *
+ * @param kc crypt keyslot context
+ */
+void crypt_keyslot_context_free(struct crypt_keyslot_context *kc);
+
+/**
+ * Initialize keyslot context via passphrase.
+ *
+ * @param cd crypt device handle initialized to LUKS device context
+ * @param passphrase passphrase for a keyslot
+ * @param passphrase_size size of passphrase
+ * @param kc returns crypt keyslot context handle type CRYPT_KC_TYPE_PASSPHRASE
+ *
+ * @return zero on success or negative errno otherwise.
+ */
+int crypt_keyslot_context_init_by_passphrase(struct crypt_device *cd,
+ const char *passphrase,
+ size_t passphrase_size,
+ struct crypt_keyslot_context **kc);
+
+/**
+ * Initialize keyslot context via key file path.
+ *
+ * @param cd crypt device handle initialized to LUKS device context
+ *
+ * @param keyfile key file with passphrase for a keyslot
+ * @param keyfile_size number of bytes to read from keyfile, @e 0 is unlimited
+ * @param keyfile_offset number of bytes to skip at start of keyfile
+ * @param kc returns crypt keyslot context handle type CRYPT_KC_TYPE_KEYFILE
+ *
+ * @return zero on success or negative errno otherwise.
+ */
+int crypt_keyslot_context_init_by_keyfile(struct crypt_device *cd,
+ const char *keyfile,
+ size_t keyfile_size,
+ uint64_t keyfile_offset,
+ struct crypt_keyslot_context **kc);
+
+/**
+ * Initialize keyslot context via LUKS2 token.
+ *
+ * @param cd crypt device handle initialized to LUKS2 device context
+ *
+ * @param token token providing passphrase for a keyslot or CRYPT_ANY_TOKEN
+ * @param type restrict type of token, if @e NULL all types are allowed
+ * @param pin passphrase (or PIN) to unlock token (may be binary data)
+ * @param pin_size size of @e pin
+ * @param usrptr provided identification in callback
+ * @param kc returns crypt keyslot context handle type CRYPT_KC_TYPE_TOKEN
+ *
+ * @return zero on success or negative errno otherwise.
+ */
+int crypt_keyslot_context_init_by_token(struct crypt_device *cd,
+ int token,
+ const char *type,
+ const char *pin, size_t pin_size,
+ void *usrptr,
+ struct crypt_keyslot_context **kc);
+
+/**
+ * Initialize keyslot context via key.
+ *
+ * @param cd crypt device handle initialized to LUKS device context
+ *
+ * @param volume_key provided volume key or @e NULL if used after crypt_format
+ * or with CRYPT_VOLUME_KEY_NO_SEGMENT flag
+ * @param volume_key_size size of volume_key
+ * @param kc returns crypt keyslot context handle type CRYPT_KC_TYPE_KEY
+ *
+ * @return zero on success or negative errno otherwise.
+ */
+int crypt_keyslot_context_init_by_volume_key(struct crypt_device *cd,
+ const char *volume_key,
+ size_t volume_key_size,
+ struct crypt_keyslot_context **kc);
+
+/**
+ * Get error code per keyslot context from last failed call.
+ *
+ * @note If @link crypt_keyslot_add_by_keyslot_context @endlink passed with
+ * no negative return code. The return value of this function is undefined.
+ *
+ * @param kc keyslot context involved in failed @link crypt_keyslot_add_by_keyslot_context @endlink
+ *
+ * @return Negative errno if keyslot context caused a failure, zero otherwise.
+ */
+int crypt_keyslot_context_get_error(struct crypt_keyslot_context *kc);
+
+/**
+ * Set new pin to token based keyslot context.
+ *
+ * @note Use when @link crypt_keyslot_add_by_keyslot_context @endlink failed
+ * and token keyslot context returned -ENOANO error code via
+ * @link crypt_keyslot_context_get_error @endlink.
+ *
+ * @param cd crypt device handle initialized to LUKS2 device context
+ * @param pin passphrase (or PIN) to unlock token (may be binary data)
+ * @param pin_size size of @e pin
+ * @param kc LUKS2 keyslot context (only @link CRYPT_KC_TYPE_TOKEN @endlink is allowed)
+ *
+ * @return zero on success or negative errno otherwise
+ */
+int crypt_keyslot_context_set_pin(struct crypt_device *cd,
+ const char *pin, size_t pin_size,
+ struct crypt_keyslot_context *kc);
+
+/**
+ * @defgroup crypt-keyslot-context-types Crypt keyslot context
+ * @addtogroup crypt-keyslot-context-types
+ * @{
+ */
+/** keyslot context initialized by passphrase (@link crypt_keyslot_context_init_by_passphrase @endlink) */
+#define CRYPT_KC_TYPE_PASSPHRASE INT16_C(1)
+/** keyslot context initialized by keyfile (@link crypt_keyslot_context_init_by_keyfile @endlink) */
+#define CRYPT_KC_TYPE_KEYFILE INT16_C(2)
+/** keyslot context initialized by token (@link crypt_keyslot_context_init_by_token @endlink) */
+#define CRYPT_KC_TYPE_TOKEN INT16_C(3)
+/** keyslot context initialized by volume key or unbound key (@link crypt_keyslot_context_init_by_volume_key @endlink) */
+#define CRYPT_KC_TYPE_KEY INT16_C(4)
+/** @} */
+
+/**
+ * Get type identifier for crypt keyslot context.
+ *
+ * @param kc keyslot context
+ *
+ * @return crypt keyslot context type id (see @link crypt-keyslot-context-types @endlink) or negative errno otherwise.
+ */
+int crypt_keyslot_context_get_type(const struct crypt_keyslot_context *kc);
+/** @} */
+
+/**
+ * Add key slot by volume key provided by keyslot context (kc). New
+ * keyslot will be protected by passphrase provided by new keyslot context (new_kc).
+ * See @link crypt-keyslot-context @endlink for context initialization routines.
+ *
+ * @pre @e cd contains initialized and formatted LUKS device context.
+ *
+ * @param cd crypt device handle
+ * @param keyslot_existing existing keyslot or CRYPT_ANY_SLOT to get volume key from.
+ * @param kc keyslot context providing volume key.
+ * @param keyslot_new new keyslot or CRYPT_ANY_SLOT (first free number is used).
+ * @param new_kc keyslot context providing passphrase for new keyslot.
+ * @param flags key flags to set
+ *
+ * @return allocated key slot number or negative errno otherwise.
+ *
+ * @note new_kc can not be @e CRYPT_KC_TYPE_KEY type keyslot context.
+ *
+ * @note For kc parameter with type @e CRYPT_KC_TYPE_KEY the keyslot_existing
+ * parameter is ignored.
+ *
+ * @note in case there is no active LUKS keyslot to get existing volume key from, one of following must apply:
+ * @li @e cd must be device handle used in crypt_format() by current process (it holds reference to generated volume key)
+ * @li kc must be of @e CRYPT_KC_TYPE_KEY type with valid volume key.
+ *
+ * @note With CRYPT_VOLUME_KEY_NO_SEGMENT flag raised and kc of type @e CRYPT_KC_TYPE_KEY with @e volume_key set to @e NULL
+ * the new volume_key will be generated and stored in new keyslot. The keyslot will become unbound (unusable to
+ * dm-crypt device activation).
+ *
+ * @warning CRYPT_VOLUME_KEY_SET flag force updates volume key. It is @b not @b reencryption!
+ * By doing so you will most probably destroy your ciphertext data device. It's supposed
+ * to be used only in wrapped keys scheme for key refresh process where real (inner) volume
+ * key stays untouched. It may be involed on active @e keyslot which makes the (previously
+ * unbound) keyslot new regular keyslot.
+ */
+int crypt_keyslot_add_by_keyslot_context(struct crypt_device *cd,
+ int keyslot_existing,
+ struct crypt_keyslot_context *kc,
+ int keyslot_new,
+ struct crypt_keyslot_context *new_kc,
+ uint32_t flags);
+
+/**
+ * Destroy (and disable) key slot.
+ *
+ * @pre @e cd contains initialized and formatted LUKS device context
+ *
+ * @param cd crypt device handle
+ * @param keyslot requested key slot to destroy
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ * @note Note that there is no passphrase verification used.
+ */
+int crypt_keyslot_destroy(struct crypt_device *cd, int keyslot);
+/** @} */
+
+/**
+ * @defgroup crypt-aflags Device runtime attributes
+ * Activation flags
+ * @addtogroup crypt-aflags
+ * @{
+ */
+
+/** device is read only */
+#define CRYPT_ACTIVATE_READONLY (UINT32_C(1) << 0)
+/** only reported for device without uuid */
+#define CRYPT_ACTIVATE_NO_UUID (UINT32_C(1) << 1)
+/** activate even if cannot grant exclusive access (DANGEROUS) */
+#define CRYPT_ACTIVATE_SHARED (UINT32_C(1) << 2)
+/** enable discards aka TRIM */
+#define CRYPT_ACTIVATE_ALLOW_DISCARDS (UINT32_C(1) << 3)
+/** skip global udev rules in activation ("private device"), input only */
+#define CRYPT_ACTIVATE_PRIVATE (UINT32_C(1) << 4)
+/** corruption detected (verity), output only */
+#define CRYPT_ACTIVATE_CORRUPTED (UINT32_C(1) << 5)
+/** use same_cpu_crypt option for dm-crypt */
+#define CRYPT_ACTIVATE_SAME_CPU_CRYPT (UINT32_C(1) << 6)
+/** use submit_from_crypt_cpus for dm-crypt */
+#define CRYPT_ACTIVATE_SUBMIT_FROM_CRYPT_CPUS (UINT32_C(1) << 7)
+/** dm-verity: ignore_corruption flag - ignore corruption, log it only */
+#define CRYPT_ACTIVATE_IGNORE_CORRUPTION (UINT32_C(1) << 8)
+/** dm-verity: restart_on_corruption flag - restart kernel on corruption */
+#define CRYPT_ACTIVATE_RESTART_ON_CORRUPTION (UINT32_C(1) << 9)
+/** dm-verity: ignore_zero_blocks - do not verify zero blocks */
+#define CRYPT_ACTIVATE_IGNORE_ZERO_BLOCKS (UINT32_C(1) << 10)
+/** key loaded in kernel keyring instead directly in dm-crypt */
+#define CRYPT_ACTIVATE_KEYRING_KEY (UINT32_C(1) << 11)
+/** dm-integrity: direct writes, do not use journal */
+#define CRYPT_ACTIVATE_NO_JOURNAL (UINT32_C(1) << 12)
+/** dm-integrity: recovery mode - no journal, no integrity checks */
+#define CRYPT_ACTIVATE_RECOVERY (UINT32_C(1) << 13)
+/** ignore persistently stored flags */
+#define CRYPT_ACTIVATE_IGNORE_PERSISTENT (UINT32_C(1) << 14)
+/** dm-verity: check_at_most_once - check data blocks only the first time */
+#define CRYPT_ACTIVATE_CHECK_AT_MOST_ONCE (UINT32_C(1) << 15)
+/** allow activation check including unbound keyslots (keyslots without segments) */
+#define CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY (UINT32_C(1) << 16)
+/** dm-integrity: activate automatic recalculation */
+#define CRYPT_ACTIVATE_RECALCULATE (UINT32_C(1) << 17)
+/** reactivate existing and update flags, input only */
+#define CRYPT_ACTIVATE_REFRESH (UINT32_C(1) << 18)
+/** Use global lock to serialize memory hard KDF on activation (OOM workaround) */
+#define CRYPT_ACTIVATE_SERIALIZE_MEMORY_HARD_PBKDF (UINT32_C(1) << 19)
+/** dm-integrity: direct writes, use bitmap to track dirty sectors */
+#define CRYPT_ACTIVATE_NO_JOURNAL_BITMAP (UINT32_C(1) << 20)
+/** device is suspended (key should be wiped from memory), output only */
+#define CRYPT_ACTIVATE_SUSPENDED (UINT32_C(1) << 21)
+/** use IV sector counted in sector_size instead of default 512 bytes sectors */
+#define CRYPT_ACTIVATE_IV_LARGE_SECTORS (UINT32_C(1) << 22)
+/** dm-verity: panic_on_corruption flag - panic kernel on corruption */
+#define CRYPT_ACTIVATE_PANIC_ON_CORRUPTION (UINT32_C(1) << 23)
+/** dm-crypt: bypass internal workqueue and process read requests synchronously. */
+#define CRYPT_ACTIVATE_NO_READ_WORKQUEUE (UINT32_C(1) << 24)
+/** dm-crypt: bypass internal workqueue and process write requests synchronously. */
+#define CRYPT_ACTIVATE_NO_WRITE_WORKQUEUE (UINT32_C(1) << 25)
+/** dm-integrity: reset automatic recalculation */
+#define CRYPT_ACTIVATE_RECALCULATE_RESET (UINT32_C(1) << 26)
+/** dm-verity: try to use tasklets */
+#define CRYPT_ACTIVATE_TASKLETS (UINT32_C(1) << 27)
+
+/**
+ * Active device runtime attributes
+ */
+struct crypt_active_device {
+ uint64_t offset; /**< offset in sectors */
+ uint64_t iv_offset; /**< IV initialization sector */
+ uint64_t size; /**< active device size */
+ uint32_t flags; /**< activation flags */
+};
+
+/**
+ * Receive runtime attributes of active crypt device.
+ *
+ * @param cd crypt device handle (can be @e NULL)
+ * @param name name of active device
+ * @param cad preallocated active device attributes to fill
+ *
+ * @return @e 0 on success or negative errno value otherwise
+ *
+ */
+int crypt_get_active_device(struct crypt_device *cd,
+ const char *name,
+ struct crypt_active_device *cad);
+
+/**
+ * Get detected number of integrity failures.
+ *
+ * @param cd crypt device handle (can be @e NULL)
+ * @param name name of active device
+ *
+ * @return number of integrity failures or @e 0 otherwise
+ *
+ */
+uint64_t crypt_get_active_integrity_failures(struct crypt_device *cd,
+ const char *name);
+/** @} */
+
+/**
+ * @defgroup crypt-pflags LUKS2 Device persistent flags and requirements
+ * @addtogroup crypt-pflags
+ * @{
+ */
+
+/**
+ * LUKS2 header requirements
+ */
+/** Unfinished offline reencryption */
+#define CRYPT_REQUIREMENT_OFFLINE_REENCRYPT (UINT32_C(1) << 0)
+/** Online reencryption in-progress */
+#define CRYPT_REQUIREMENT_ONLINE_REENCRYPT (UINT32_C(1) << 1)
+/** unknown requirement in header (output only) */
+#define CRYPT_REQUIREMENT_UNKNOWN (UINT32_C(1) << 31)
+
+/**
+ * Persistent flags type
+ */
+typedef enum {
+ CRYPT_FLAGS_ACTIVATION, /**< activation flags, @see aflags */
+ CRYPT_FLAGS_REQUIREMENTS /**< requirements flags */
+} crypt_flags_type;
+
+/**
+ * Set persistent flags.
+ *
+ * @param cd crypt device handle (can be @e NULL)
+ * @param type type to set (CRYPT_FLAGS_ACTIVATION or CRYPT_FLAGS_REQUIREMENTS)
+ * @param flags flags to set
+ *
+ * @return @e 0 on success or negative errno value otherwise
+ *
+ * @note Valid only for LUKS2.
+ *
+ * @note Not all activation flags can be stored. Only ALLOW_DISCARD,
+ * SAME_CPU_CRYPT, SUBMIT_FROM_CRYPT_CPU and NO_JOURNAL can be
+ * stored persistently.
+ *
+ * @note Only requirements flags recognised by current library may be set.
+ * CRYPT_REQUIREMENT_UNKNOWN is illegal (output only) in set operation.
+ */
+int crypt_persistent_flags_set(struct crypt_device *cd,
+ crypt_flags_type type,
+ uint32_t flags);
+
+/**
+ * Get persistent flags stored in header.
+ *
+ * @param cd crypt device handle (can be @e NULL)
+ * @param type flags type to retrieve (CRYPT_FLAGS_ACTIVATION or CRYPT_FLAGS_REQUIREMENTS)
+ * @param flags reference to output variable
+ *
+ * @return @e 0 on success or negative errno value otherwise
+ */
+int crypt_persistent_flags_get(struct crypt_device *cd,
+ crypt_flags_type type,
+ uint32_t *flags);
+/** @} */
+
+/**
+ * @defgroup crypt-activation Device activation
+ * @addtogroup crypt-activation
+ * @{
+ */
+
+/**
+ * Activate device or check passphrase.
+ *
+ * @param cd crypt device handle
+ * @param name name of device to create, if @e NULL only check passphrase
+ * @param keyslot requested keyslot to check or @e CRYPT_ANY_SLOT
+ * @param passphrase passphrase used to unlock volume key
+ * @param passphrase_size size of @e passphrase
+ * @param flags activation flags
+ *
+ * @return unlocked key slot number or negative errno otherwise.
+ */
+int crypt_activate_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *passphrase,
+ size_t passphrase_size,
+ uint32_t flags);
+
+/**
+ * Activate device or check using key file.
+ *
+ * @param cd crypt device handle
+ * @param name name of device to create, if @e NULL only check keyfile
+ * @param keyslot requested keyslot to check or CRYPT_ANY_SLOT
+ * @param keyfile key file used to unlock volume key
+ * @param keyfile_size number of bytes to read from keyfile, 0 is unlimited
+ * @param keyfile_offset number of bytes to skip at start of keyfile
+ * @param flags activation flags
+ *
+ * @return unlocked key slot number or negative errno otherwise.
+ */
+int crypt_activate_by_keyfile_device_offset(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ uint64_t keyfile_offset,
+ uint32_t flags);
+
+/**
+ * Backward compatible crypt_activate_by_keyfile_device_offset() (with size_t offset).
+ */
+int crypt_activate_by_keyfile_offset(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ size_t keyfile_offset,
+ uint32_t flags);
+
+/**
+ * Backward compatible crypt_activate_by_keyfile_device_offset() (without offset).
+ */
+int crypt_activate_by_keyfile(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ uint32_t flags);
+
+/**
+ * Activate device using provided volume key.
+ *
+ * @param cd crypt device handle
+ * @param name name of device to create, if @e NULL only check volume key
+ * @param volume_key provided volume key (or @e NULL to use internal)
+ * @param volume_key_size size of volume_key
+ * @param flags activation flags
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ * @note If @e NULL is used for volume_key, device has to be initialized
+ * by previous operation (like @ref crypt_format
+ * or @ref crypt_init_by_name)
+ * @note For VERITY the volume key means root hash required for activation.
+ * Because kernel dm-verity is always read only, you have to provide
+ * CRYPT_ACTIVATE_READONLY flag always.
+ * @note For TCRYPT the volume key should be always NULL
+ * the key from decrypted header is used instead.
+ */
+int crypt_activate_by_volume_key(struct crypt_device *cd,
+ const char *name,
+ const char *volume_key,
+ size_t volume_key_size,
+ uint32_t flags);
+
+/**
+ * Activate VERITY device using provided key and optional signature).
+ *
+ * @param cd crypt device handle
+ * @param name name of device to create
+ * @param volume_key provided volume key
+ * @param volume_key_size size of volume_key
+ * @param signature buffer with signature for the key
+ * @param signature_size bsize of signature buffer
+ * @param flags activation flags
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ * @note For VERITY the volume key means root hash required for activation.
+ * Because kernel dm-verity is always read only, you have to provide
+ * CRYPT_ACTIVATE_READONLY flag always.
+ */
+int crypt_activate_by_signed_key(struct crypt_device *cd,
+ const char *name,
+ const char *volume_key,
+ size_t volume_key_size,
+ const char *signature,
+ size_t signature_size,
+ uint32_t flags);
+
+/**
+ * Activate device using passphrase stored in kernel keyring.
+ *
+ * @param cd crypt device handle
+ * @param name name of device to create, if @e NULL only check passphrase in keyring
+ * @param key_description kernel keyring key description library should look
+ * for passphrase in
+ * @param keyslot requested keyslot to check or CRYPT_ANY_SLOT
+ * @param flags activation flags
+ *
+ * @return @e unlocked keyslot number on success or negative errno value otherwise.
+ *
+ * @note Keyslot passphrase must be stored in 'user' key type
+ * and the key has to be reachable for process context
+ * on behalf of which this function is called.
+ */
+int crypt_activate_by_keyring(struct crypt_device *cd,
+ const char *name,
+ const char *key_description,
+ int keyslot,
+ uint32_t flags);
+
+/** lazy deactivation - remove once last user releases it */
+#define CRYPT_DEACTIVATE_DEFERRED (UINT32_C(1) << 0)
+/** force deactivation - if the device is busy, it is replaced by error device */
+#define CRYPT_DEACTIVATE_FORCE (UINT32_C(1) << 1)
+/** if set, remove lazy deactivation */
+#define CRYPT_DEACTIVATE_DEFERRED_CANCEL (UINT32_C(1) << 2)
+
+/**
+ * Deactivate crypt device. This function tries to remove active device-mapper
+ * mapping from kernel. Also, sensitive data like the volume key are removed from
+ * memory
+ *
+ * @param cd crypt device handle, can be @e NULL
+ * @param name name of device to deactivate
+ * @param flags deactivation flags
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ */
+int crypt_deactivate_by_name(struct crypt_device *cd,
+ const char *name,
+ uint32_t flags);
+
+/**
+ * Deactivate crypt device. See @ref crypt_deactivate_by_name with empty @e flags.
+ */
+int crypt_deactivate(struct crypt_device *cd, const char *name);
+/** @} */
+
+/**
+ * @defgroup crypt-key Volume Key manipulation
+ * @addtogroup crypt-key
+ * @{
+ */
+
+/**
+ * Get volume key from crypt device.
+ *
+ * @param cd crypt device handle
+ * @param keyslot use this keyslot or @e CRYPT_ANY_SLOT
+ * @param volume_key buffer for volume key
+ * @param volume_key_size on input, size of buffer @e volume_key,
+ * on output size of @e volume_key
+ * @param passphrase passphrase used to unlock volume key
+ * @param passphrase_size size of @e passphrase
+ *
+ * @return unlocked key slot number or negative errno otherwise.
+ *
+ * @note For TCRYPT cipher chain is the volume key concatenated
+ * for all ciphers in chain.
+ * @note For VERITY the volume key means root hash used for activation.
+ * @note For LUKS devices, if passphrase is @e NULL and volume key is cached in
+ * device context it returns the volume key generated in preceding
+ * @link crypt_format @endlink call.
+ */
+int crypt_volume_key_get(struct crypt_device *cd,
+ int keyslot,
+ char *volume_key,
+ size_t *volume_key_size,
+ const char *passphrase,
+ size_t passphrase_size);
+
+/**
+ * Get volume key from crypt device by keyslot context.
+ *
+ * @param cd crypt device handle
+ * @param keyslot use this keyslot or @e CRYPT_ANY_SLOT
+ * @param volume_key buffer for volume key
+ * @param volume_key_size on input, size of buffer @e volume_key,
+ * on output size of @e volume_key
+ * @param kc keyslot context used to unlock volume key
+ *
+ * @return unlocked key slot number or negative errno otherwise.
+ *
+ * @note See @link crypt-keyslot-context-types @endlink for info on keyslot
+ * context initialization.
+ * @note For TCRYPT cipher chain is the volume key concatenated
+ * for all ciphers in chain (kc may be NULL).
+ * @note For VERITY the volume key means root hash used for activation
+ * (kc may be NULL).
+ * @note For LUKS devices, if kc is @e NULL and volume key is cached in
+ * device context it returns the volume key generated in preceding
+ * @link crypt_format @endlink call.
+ * @note @link CRYPT_KC_TYPE_TOKEN @endlink keyslot context is usable only with LUKS2 devices.
+ * @note @link CRYPT_KC_TYPE_KEY @endlink keyslot context can not be used.
+ * @note To get LUKS2 unbound key, keyslot parameter must not be @e CRYPT_ANY_SLOT.
+ * @note EPERM errno means provided keyslot context could not unlock any (or selected)
+ * keyslot.
+ * @note ENOENT errno means no LUKS keyslot is available to retrieve volume key from
+ * and there's no cached volume key in device handle.
+ */
+int crypt_volume_key_get_by_keyslot_context(struct crypt_device *cd,
+ int keyslot,
+ char *volume_key,
+ size_t *volume_key_size,
+ struct crypt_keyslot_context *kc);
+
+/**
+ * Verify that provided volume key is valid for crypt device.
+ *
+ * @param cd crypt device handle
+ * @param volume_key provided volume key
+ * @param volume_key_size size of @e volume_key
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ * @note Negative EPERM return value means that passed volume_key
+ * did not pass digest verification routine (not a valid volume
+ * key).
+ */
+int crypt_volume_key_verify(struct crypt_device *cd,
+ const char *volume_key,
+ size_t volume_key_size);
+/** @} */
+
+/**
+ * @defgroup crypt-devstat Crypt and Verity device status
+ * @addtogroup crypt-devstat
+ * @{
+ */
+
+/**
+ * Device status
+ */
+typedef enum {
+ CRYPT_INVALID, /**< device mapping is invalid in this context */
+ CRYPT_INACTIVE, /**< no such mapped device */
+ CRYPT_ACTIVE, /**< device is active */
+ CRYPT_BUSY /**< device is active and has open count > 0 */
+} crypt_status_info;
+
+/**
+ * Get status info about device name.
+ *
+ * @param cd crypt device handle, can be @e NULL
+ * @param name crypt device name
+ *
+ * @return value defined by crypt_status_info.
+ *
+ */
+crypt_status_info crypt_status(struct crypt_device *cd, const char *name);
+
+/**
+ * Dump text-formatted information about crypt or verity device to log output.
+ *
+ * @param cd crypt device handle
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ */
+int crypt_dump(struct crypt_device *cd);
+
+/**
+ * Dump JSON-formatted information about LUKS2 device
+ *
+ * @param cd crypt device handle (only LUKS2 format supported)
+ * @param json buffer with JSON, if NULL use log callback for output
+ * @param flags dump flags (reserved)
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ */
+int crypt_dump_json(struct crypt_device *cd, const char **json, uint32_t flags);
+
+/**
+ * Get cipher used in device.
+ *
+ * @param cd crypt device handle
+ *
+ * @return used cipher, e.g. "aes" or @e NULL otherwise
+ *
+ */
+const char *crypt_get_cipher(struct crypt_device *cd);
+
+/**
+ * Get cipher mode used in device.
+ *
+ * @param cd crypt device handle
+ *
+ * @return used cipher mode e.g. "xts-plain" or @e otherwise
+ *
+ */
+const char *crypt_get_cipher_mode(struct crypt_device *cd);
+
+/**
+ * Get device UUID.
+ *
+ * @param cd crypt device handle
+ *
+ * @return device UUID or @e NULL if not set
+ *
+ */
+const char *crypt_get_uuid(struct crypt_device *cd);
+
+/**
+ * Get path to underlying device.
+ *
+ * @param cd crypt device handle
+ *
+ * @return path to underlying device name
+ *
+ */
+const char *crypt_get_device_name(struct crypt_device *cd);
+
+/**
+ * Get path to detached metadata device or @e NULL if it is not detached.
+ *
+ * @param cd crypt device handle
+ *
+ * @return path to underlying device name
+ *
+ */
+const char *crypt_get_metadata_device_name(struct crypt_device *cd);
+
+/**
+ * Get device offset in 512-bytes sectors where real data starts (on underlying device).
+ *
+ * @param cd crypt device handle
+ *
+ * @return device offset in sectors
+ *
+ */
+uint64_t crypt_get_data_offset(struct crypt_device *cd);
+
+/**
+ * Get IV offset in 512-bytes sectors (skip).
+ *
+ * @param cd crypt device handle
+ *
+ * @return IV offset
+ *
+ */
+uint64_t crypt_get_iv_offset(struct crypt_device *cd);
+
+/**
+ * Get size (in bytes) of volume key for crypt device.
+ *
+ * @param cd crypt device handle
+ *
+ * @return volume key size
+ *
+ * @note For LUKS2, this function can be used only if there is at least
+ * one keyslot assigned to data segment.
+ */
+int crypt_get_volume_key_size(struct crypt_device *cd);
+
+/**
+ * Get size (in bytes) of encryption sector for crypt device.
+ *
+ * @param cd crypt device handle
+ *
+ * @return sector size
+ *
+ */
+int crypt_get_sector_size(struct crypt_device *cd);
+
+/**
+ * Check if initialized LUKS context uses detached header
+ * (LUKS header located on a different device than data.)
+ *
+ * @param cd crypt device handle
+ *
+ * @return @e 1 if detached header is used, @e 0 if not
+ * or negative errno value otherwise.
+ *
+ * @note This is a runtime attribute, it does not say
+ * if a LUKS device requires detached header.
+ * This function works only with LUKS devices.
+ */
+int crypt_header_is_detached(struct crypt_device *cd);
+
+/**
+ * Get device parameters for VERITY device.
+ *
+ * @param cd crypt device handle
+ * @param vp verity device info
+ *
+ * @e 0 on success or negative errno value otherwise.
+ *
+ */
+int crypt_get_verity_info(struct crypt_device *cd,
+ struct crypt_params_verity *vp);
+
+/**
+ * Get device parameters for INTEGRITY device.
+ *
+ * @param cd crypt device handle
+ * @param ip verity device info
+ *
+ * @e 0 on success or negative errno value otherwise.
+ *
+ */
+int crypt_get_integrity_info(struct crypt_device *cd,
+ struct crypt_params_integrity *ip);
+/** @} */
+
+/**
+ * @defgroup crypt-benchmark Benchmarking
+ * Benchmarking of algorithms
+ * @addtogroup crypt-benchmark
+ * @{
+ */
+
+/**
+ * Informational benchmark for ciphers.
+ *
+ * @param cd crypt device handle
+ * @param cipher (e.g. "aes")
+ * @param cipher_mode (e.g. "xts"), IV generator is ignored
+ * @param volume_key_size size of volume key in bytes
+ * @param iv_size size of IV in bytes
+ * @param buffer_size size of encryption buffer in bytes used in test
+ * @param encryption_mbs measured encryption speed in MiB/s
+ * @param decryption_mbs measured decryption speed in MiB/s
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ * @note If encryption_buffer_size is too small and encryption time
+ * cannot be properly measured, -ERANGE is returned.
+ */
+int crypt_benchmark(struct crypt_device *cd,
+ const char *cipher,
+ const char *cipher_mode,
+ size_t volume_key_size,
+ size_t iv_size,
+ size_t buffer_size,
+ double *encryption_mbs,
+ double *decryption_mbs);
+
+/**
+ * Informational benchmark for PBKDF.
+ *
+ * @param cd crypt device handle
+ * @param pbkdf PBKDF parameters
+ * @param password password for benchmark
+ * @param password_size size of password
+ * @param salt salt for benchmark
+ * @param salt_size size of salt
+ * @param volume_key_size output volume key size
+ * @param progress callback function
+ * @param usrptr provided identification in callback
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ */
+int crypt_benchmark_pbkdf(struct crypt_device *cd,
+ struct crypt_pbkdf_type *pbkdf,
+ const char *password,
+ size_t password_size,
+ const char *salt,
+ size_t salt_size,
+ size_t volume_key_size,
+ int (*progress)(uint32_t time_ms, void *usrptr),
+ void *usrptr);
+/** @} */
+
+/**
+ * @addtogroup crypt-keyslot
+ * @{
+ */
+
+/**
+ * Crypt keyslot info
+ */
+typedef enum {
+ CRYPT_SLOT_INVALID, /**< invalid keyslot */
+ CRYPT_SLOT_INACTIVE, /**< keyslot is inactive (free) */
+ CRYPT_SLOT_ACTIVE, /**< keyslot is active (used) */
+ CRYPT_SLOT_ACTIVE_LAST,/**< keylost is active (used)
+ * and last used at the same time */
+ CRYPT_SLOT_UNBOUND /**< keyslot is active and not bound
+ * to any crypt segment (LUKS2 only) */
+} crypt_keyslot_info;
+
+/**
+ * Get information about particular key slot.
+ *
+ * @param cd crypt device handle
+ * @param keyslot requested keyslot to check or CRYPT_ANY_SLOT
+ *
+ * @return value defined by crypt_keyslot_info
+ *
+ */
+crypt_keyslot_info crypt_keyslot_status(struct crypt_device *cd, int keyslot);
+
+/**
+ * Crypt keyslot priority
+ */
+typedef enum {
+ CRYPT_SLOT_PRIORITY_INVALID =-1, /**< no such slot */
+ CRYPT_SLOT_PRIORITY_IGNORE = 0, /**< CRYPT_ANY_SLOT will ignore it for open */
+ CRYPT_SLOT_PRIORITY_NORMAL = 1, /**< default priority, tried after preferred */
+ CRYPT_SLOT_PRIORITY_PREFER = 2, /**< will try to open first */
+} crypt_keyslot_priority;
+
+/**
+ * Get keyslot priority (LUKS2)
+ *
+ * @param cd crypt device handle
+ * @param keyslot keyslot number
+ *
+ * @return value defined by crypt_keyslot_priority
+ */
+crypt_keyslot_priority crypt_keyslot_get_priority(struct crypt_device *cd, int keyslot);
+
+/**
+ * Set keyslot priority (LUKS2)
+ *
+ * @param cd crypt device handle
+ * @param keyslot keyslot number
+ * @param priority priority defined in crypt_keyslot_priority
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ */
+int crypt_keyslot_set_priority(struct crypt_device *cd, int keyslot, crypt_keyslot_priority priority);
+
+/**
+ * Get number of keyslots supported for device type.
+ *
+ * @param type crypt device type
+ *
+ * @return slot count or negative errno otherwise if device
+ * doesn't not support keyslots.
+ */
+int crypt_keyslot_max(const char *type);
+
+/**
+ * Get keyslot area pointers (relative to metadata device).
+ *
+ * @param cd crypt device handle
+ * @param keyslot keyslot number
+ * @param offset offset on metadata device (in bytes)
+ * @param length length of keyslot area (in bytes)
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ */
+int crypt_keyslot_area(struct crypt_device *cd,
+ int keyslot,
+ uint64_t *offset,
+ uint64_t *length);
+
+/**
+ * Get size (in bytes) of stored key in particular keyslot.
+ * Use for LUKS2 unbound keyslots, for other keyslots it is the same as @ref crypt_get_volume_key_size
+ *
+ * @param cd crypt device handle
+ * @param keyslot keyslot number
+ *
+ * @return volume key size or negative errno value otherwise.
+ *
+ */
+int crypt_keyslot_get_key_size(struct crypt_device *cd, int keyslot);
+
+/**
+ * Get cipher and key size for keyslot encryption.
+ * Use for LUKS2 keyslot to set different encryption type than for data encryption.
+ * Parameters will be used for next keyslot operations.
+ *
+ * @param cd crypt device handle
+ * @param keyslot keyslot number of CRYPT_ANY_SLOT for default
+ * @param key_size encryption key size (in bytes)
+ *
+ * @return cipher specification on success or @e NULL.
+ *
+ * @note This is the encryption of keyslot itself, not the data encryption algorithm!
+ */
+const char *crypt_keyslot_get_encryption(struct crypt_device *cd, int keyslot, size_t *key_size);
+
+/**
+ * Get PBKDF parameters for keyslot.
+ *
+ * @param cd crypt device handle
+ * @param keyslot keyslot number
+ * @param pbkdf struct with returned PBKDF parameters
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ */
+int crypt_keyslot_get_pbkdf(struct crypt_device *cd, int keyslot, struct crypt_pbkdf_type *pbkdf);
+
+/**
+ * Set encryption for keyslot.
+ * Use for LUKS2 keyslot to set different encryption type than for data encryption.
+ * Parameters will be used for next keyslot operations that create or change a keyslot.
+ *
+ * @param cd crypt device handle
+ * @param cipher (e.g. "aes-xts-plain64")
+ * @param key_size encryption key size (in bytes)
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ * @note To reset to default keyslot encryption (the same as for data)
+ * set cipher to NULL and key size to 0.
+ */
+int crypt_keyslot_set_encryption(struct crypt_device *cd,
+ const char *cipher,
+ size_t key_size);
+
+/**
+ * Get directory where mapped crypt devices are created
+ *
+ * @return the directory path
+ */
+const char *crypt_get_dir(void);
+
+/** @} */
+
+/**
+ * @defgroup crypt-backup Device metadata backup
+ * @addtogroup crypt-backup
+ * @{
+ */
+/**
+ * Backup header and keyslots to file.
+ *
+ * @param cd crypt device handle
+ * @param requested_type @link crypt-type @endlink or @e NULL for all known
+ * @param backup_file file to backup header to
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ */
+int crypt_header_backup(struct crypt_device *cd,
+ const char *requested_type,
+ const char *backup_file);
+
+/**
+ * Restore header and keyslots from backup file.
+ *
+ * @param cd crypt device handle
+ * @param requested_type @link crypt-type @endlink or @e NULL for all known
+ * @param backup_file file to restore header from
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ */
+int crypt_header_restore(struct crypt_device *cd,
+ const char *requested_type,
+ const char *backup_file);
+/** @} */
+
+/**
+ * @defgroup crypt-dbg Library debug level
+ * Set library debug level
+ * @addtogroup crypt-dbg
+ * @{
+ */
+
+/** Debug all */
+#define CRYPT_DEBUG_ALL -1
+/** Debug all with additional JSON dump (for LUKS2) */
+#define CRYPT_DEBUG_JSON -2
+/** Debug none */
+#define CRYPT_DEBUG_NONE 0
+
+/**
+ * Set the debug level for library
+ *
+ * @param level debug level
+ *
+ */
+void crypt_set_debug_level(int level);
+/** @} */
+
+/**
+ * @defgroup crypt-keyfile Function to read keyfile
+ * @addtogroup crypt-keyfile
+ * @{
+ */
+
+/**
+ * Read keyfile
+ *
+ * @param cd crypt device handle
+ * @param keyfile keyfile to read
+ * @param key buffer for key
+ * @param key_size_read size of read key
+ * @param keyfile_offset key offset in keyfile
+ * @param key_size exact key length to read from file or 0
+ * @param flags keyfile read flags
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ * @note If key_size is set to zero we read internal max length
+ * and actual size read is returned via key_size_read parameter.
+ */
+int crypt_keyfile_device_read(struct crypt_device *cd,
+ const char *keyfile,
+ char **key, size_t *key_size_read,
+ uint64_t keyfile_offset,
+ size_t key_size,
+ uint32_t flags);
+
+/**
+ * Backward compatible crypt_keyfile_device_read() (with size_t offset).
+ */
+int crypt_keyfile_read(struct crypt_device *cd,
+ const char *keyfile,
+ char **key, size_t *key_size_read,
+ size_t keyfile_offset,
+ size_t key_size,
+ uint32_t flags);
+
+/** Read key only to the first end of line (\\n). */
+#define CRYPT_KEYFILE_STOP_EOL (UINT32_C(1) << 0)
+/** @} */
+
+/**
+ * @defgroup crypt-wipe Function to wipe device
+ * @addtogroup crypt-wipe
+ * @{
+ */
+/**
+ * Wipe pattern
+ */
+typedef enum {
+ CRYPT_WIPE_ZERO, /**< Fill with zeroes */
+ CRYPT_WIPE_RANDOM, /**< Use RNG to fill data */
+ CRYPT_WIPE_ENCRYPTED_ZERO, /**< Obsolete, same as CRYPT_WIPE_RANDOM */
+ CRYPT_WIPE_SPECIAL, /**< Compatibility only, do not use (Gutmann method) */
+} crypt_wipe_pattern;
+
+/**
+ * Wipe/Fill (part of) a device with the selected pattern.
+ *
+ * @param cd crypt device handle
+ * @param dev_path path to device to wipe or @e NULL if data device should be used
+ * @param pattern selected wipe pattern
+ * @param offset offset on device (in bytes)
+ * @param length length of area to be wiped (in bytes)
+ * @param wipe_block_size used block for wiping (one step) (in bytes)
+ * @param flags wipe flags
+ * @param progress callback function called after each @e wipe_block_size or @e NULL
+ * @param usrptr provided identification in callback
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ * @note A @e progress callback can interrupt wipe process by returning non-zero code.
+ *
+ * @note If the error values is -EIO or -EINTR, some part of the device could
+ * be overwritten. Other error codes (-EINVAL, -ENOMEM) means that no IO was performed.
+ */
+int crypt_wipe(struct crypt_device *cd,
+ const char *dev_path, /* if null, use data device */
+ crypt_wipe_pattern pattern,
+ uint64_t offset,
+ uint64_t length,
+ size_t wipe_block_size,
+ uint32_t flags,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
+ void *usrptr
+);
+
+/** Use direct-io */
+#define CRYPT_WIPE_NO_DIRECT_IO (UINT32_C(1) << 0)
+/** @} */
+
+/**
+ * @defgroup crypt-tokens LUKS2 token wrapper access
+ *
+ * Utilities for handling tokens LUKS2
+ * Token is a device or a method how to read password for particular keyslot
+ * automatically. It can be chunk of data stored on hardware token or
+ * just a metadata how to generate the password.
+ *
+ * @addtogroup crypt-tokens
+ * @{
+ */
+
+/**
+ * Get number of tokens supported for device type.
+ *
+ * @param type crypt device type
+ *
+ * @return token count or negative errno otherwise if device
+ * doesn't not support tokens.
+ *
+ * @note Real number of supported tokens for a particular device depends
+ * on usable metadata area size.
+ */
+int crypt_token_max(const char *type);
+
+/** Iterate through all tokens */
+#define CRYPT_ANY_TOKEN -1
+
+/**
+ * Get content of a token definition in JSON format.
+ *
+ * @param cd crypt device handle
+ * @param token token id
+ * @param json buffer with JSON
+ *
+ * @return allocated token id or negative errno otherwise.
+ */
+int crypt_token_json_get(struct crypt_device *cd,
+ int token,
+ const char **json);
+
+/**
+ * Store content of a token definition in JSON format.
+ *
+ * @param cd crypt device handle
+ * @param token token id or @e CRYPT_ANY_TOKEN to allocate new one
+ * @param json buffer with JSON or @e NULL to remove token
+ *
+ * @return allocated token id or negative errno otherwise.
+ *
+ * @note The buffer must be in proper JSON format and must contain at least
+ * string "type" with slot type and an array of string names "keyslots".
+ * Keyslots array contains assignments to particular slots and can be empty.
+ */
+int crypt_token_json_set(struct crypt_device *cd,
+ int token,
+ const char *json);
+
+/**
+ * Token info
+ */
+typedef enum {
+ CRYPT_TOKEN_INVALID, /**< token is invalid */
+ CRYPT_TOKEN_INACTIVE, /**< token is empty (free) */
+ CRYPT_TOKEN_INTERNAL, /**< active internal token with driver */
+ CRYPT_TOKEN_INTERNAL_UNKNOWN, /**< active internal token (reserved name) with missing token driver */
+ CRYPT_TOKEN_EXTERNAL, /**< active external (user defined) token with driver */
+ CRYPT_TOKEN_EXTERNAL_UNKNOWN, /**< active external (user defined) token with missing token driver */
+} crypt_token_info;
+
+/**
+ * Get info for specific token.
+ *
+ * @param cd crypt device handle
+ * @param token existing token id
+ * @param type pointer for returned type string
+ *
+ * @return token status info. For any returned status (besides CRYPT_TOKEN_INVALID
+ * and CRYPT_TOKEN_INACTIVE) and if type parameter is not NULL it will
+ * contain address of type string.
+ *
+ * @note if required, create a copy of string referenced in *type before calling next
+ * libcryptsetup API function. The reference may become invalid.
+ */
+crypt_token_info crypt_token_status(struct crypt_device *cd, int token, const char **type);
+
+/**
+ * LUKS2 keyring token parameters.
+ *
+ * @see crypt_token_builtin_set
+ *
+ */
+struct crypt_token_params_luks2_keyring {
+ const char *key_description; /**< Reference in keyring */
+};
+
+/**
+ * Create a new luks2 keyring token.
+ *
+ * @param cd crypt device handle
+ * @param token token id or @e CRYPT_ANY_TOKEN to allocate new one
+ * @param params luks2 keyring token params
+ *
+ * @return allocated token id or negative errno otherwise.
+ *
+ */
+int crypt_token_luks2_keyring_set(struct crypt_device *cd,
+ int token,
+ const struct crypt_token_params_luks2_keyring *params);
+
+/**
+ * Get LUKS2 keyring token params
+ *
+ * @param cd crypt device handle
+ * @param token existing luks2 keyring token id
+ * @param params returned luks2 keyring token params
+ *
+ * @return allocated token id or negative errno otherwise.
+ *
+ * @note do not call free() on params members. Members are valid only
+ * until next libcryptsetup function is called.
+ */
+int crypt_token_luks2_keyring_get(struct crypt_device *cd,
+ int token,
+ struct crypt_token_params_luks2_keyring *params);
+
+/**
+ * Assign a token to particular keyslot.
+ * (There can be more keyslots assigned to one token id.)
+ *
+ * @param cd crypt device handle
+ * @param token token id
+ * @param keyslot keyslot to be assigned to token (CRYPT_ANY SLOT
+ * assigns all active keyslots to token)
+ *
+ * @return allocated token id or negative errno otherwise.
+ */
+int crypt_token_assign_keyslot(struct crypt_device *cd,
+ int token,
+ int keyslot);
+
+/**
+ * Unassign a token from particular keyslot.
+ * (There can be more keyslots assigned to one token id.)
+ *
+ * @param cd crypt device handle
+ * @param token token id
+ * @param keyslot keyslot to be unassigned from token (CRYPT_ANY SLOT
+ * unassigns all active keyslots from token)
+ *
+ * @return allocated token id or negative errno otherwise.
+ */
+int crypt_token_unassign_keyslot(struct crypt_device *cd,
+ int token,
+ int keyslot);
+
+/**
+ * Get info about token assignment to particular keyslot.
+ *
+ * @param cd crypt device handle
+ * @param token token id
+ * @param keyslot keyslot
+ *
+ * @return 0 on success (token exists and is assigned to the keyslot),
+ * -ENOENT if token is not assigned to a keyslot (token, keyslot
+ * or both may be inactive) or other negative errno otherwise.
+ */
+int crypt_token_is_assigned(struct crypt_device *cd,
+ int token,
+ int keyslot);
+
+/**
+ * Token handler open function prototype.
+ * This function retrieves password from a token and return allocated buffer
+ * containing this password. This buffer has to be deallocated by calling
+ * free() function and content should be wiped before deallocation.
+ *
+ * @param cd crypt device handle
+ * @param token token id
+ * @param buffer returned allocated buffer with password
+ * @param buffer_len length of the buffer
+ * @param usrptr user data in @link crypt_activate_by_token @endlink
+ *
+ * @return 0 on success (token passed LUKS2 keyslot passphrase in buffer) or
+ * negative errno otherwise.
+ *
+ * @note Negative ENOANO errno means that token is PIN protected and caller should
+ * use @link crypt_activate_by_token_pin @endlink with PIN provided.
+ *
+ * @note Negative EAGAIN errno means token handler requires additional hardware
+ * not present in the system.
+ */
+typedef int (*crypt_token_open_func) (
+ struct crypt_device *cd,
+ int token,
+ char **buffer,
+ size_t *buffer_len,
+ void *usrptr);
+
+/**
+ * Token handler open with passphrase/PIN function prototype.
+ * This function retrieves password from a token and return allocated buffer
+ * containing this password. This buffer has to be deallocated by calling
+ * free() function and content should be wiped before deallocation.
+ *
+ * @param cd crypt device handle
+ * @param token token id
+ * @param pin passphrase (or PIN) to unlock token (may be binary data)
+ * @param pin_size size of @e pin
+ * @param buffer returned allocated buffer with password
+ * @param buffer_len length of the buffer
+ * @param usrptr user data in @link crypt_activate_by_token @endlink
+ *
+ * @return 0 on success (token passed LUKS2 keyslot passphrase in buffer) or
+ * negative errno otherwise.
+ *
+ * @note Negative ENOANO errno means that token is PIN protected and PIN was
+ * missing or wrong.
+ *
+ * @note Negative EAGAIN errno means token handler requires additional hardware
+ * not present in the system.
+ */
+typedef int (*crypt_token_open_pin_func) (
+ struct crypt_device *cd,
+ int token,
+ const char *pin,
+ size_t pin_size,
+ char **buffer,
+ size_t *buffer_len,
+ void *usrptr);
+
+/**
+ * Token handler buffer free function prototype.
+ * This function is used by library to free the buffer with keyslot
+ * passphrase when it's no longer needed. If not defined the library
+ * overwrites buffer with zeroes and call free().
+ *
+ * @param buffer the buffer with keyslot passphrase
+ * @param buffer_len the buffer length
+ */
+typedef void (*crypt_token_buffer_free_func) (void *buffer, size_t buffer_len);
+
+/**
+ * Token handler validate function prototype.
+ * This function validates JSON representation of user defined token for additional data
+ * specific for its token type. If defined in the handler, it's called
+ * during @link crypt_activate_by_token @endlink. It may also be called during
+ * @link crypt_token_json_set @endlink when appropriate token handler was registered before
+ * with @link crypt_token_register @endlink.
+ *
+ * @param cd crypt device handle
+ * @param json buffer with JSON
+ */
+typedef int (*crypt_token_validate_func) (struct crypt_device *cd, const char *json);
+
+/**
+ * Token handler dump function prototype.
+ * This function is supposed to print token implementation specific details. It gets
+ * called during @link crypt_dump @endlink if token handler was registered before.
+ *
+ * @param cd crypt device handle
+ * @param json buffer with token JSON
+ *
+ * @note dump implementations are advised to use @link crypt_log @endlink function
+ * to dump token details.
+ */
+typedef void (*crypt_token_dump_func) (struct crypt_device *cd, const char *json);
+
+/**
+ * Token handler version function prototype.
+ * This function is supposed to return pointer to version string information.
+ *
+ * @note The returned string is advised to contain only version.
+ * For example '1.0.0' or 'v1.2.3.4'.
+ *
+ */
+typedef const char * (*crypt_token_version_func) (void);
+
+/**
+ * Token handler
+ */
+typedef struct {
+ const char *name; /**< token handler name */
+ crypt_token_open_func open; /**< token handler open function */
+ crypt_token_buffer_free_func buffer_free; /**< token handler buffer_free function (optional) */
+ crypt_token_validate_func validate; /**< token handler validate function (optional) */
+ crypt_token_dump_func dump; /**< token handler dump function (optional) */
+} crypt_token_handler;
+
+/**
+ * Register token handler
+ *
+ * @param handler token handler to register
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ */
+int crypt_token_register(const crypt_token_handler *handler);
+
+/**
+ * Report configured path where library searches for external token handlers
+ *
+ * @return @e absolute path when external tokens are enabled or @e NULL otherwise.
+ */
+const char *crypt_token_external_path(void);
+
+/**
+ * Disable external token handlers (plugins) support
+ * If disabled, it cannot be enabled again.
+ */
+void crypt_token_external_disable(void);
+
+/** ABI version for external token in libcryptsetup-token-[name].so */
+#define CRYPT_TOKEN_ABI_VERSION1 "CRYPTSETUP_TOKEN_1.0"
+
+/** open by token - ABI exported symbol for external token (mandatory) */
+#define CRYPT_TOKEN_ABI_OPEN "cryptsetup_token_open"
+/** open by token with PIN - ABI exported symbol for external token */
+#define CRYPT_TOKEN_ABI_OPEN_PIN "cryptsetup_token_open_pin"
+/** deallocate callback - ABI exported symbol for external token */
+#define CRYPT_TOKEN_ABI_BUFFER_FREE "cryptsetup_token_buffer_free"
+/** validate token metadata - ABI exported symbol for external token */
+#define CRYPT_TOKEN_ABI_VALIDATE "cryptsetup_token_validate"
+/** dump token metadata - ABI exported symbol for external token */
+#define CRYPT_TOKEN_ABI_DUMP "cryptsetup_token_dump"
+/** token version - ABI exported symbol for external token */
+#define CRYPT_TOKEN_ABI_VERSION "cryptsetup_token_version"
+
+/**
+ * Activate device or check key using a token.
+ *
+ * @param cd crypt device handle
+ * @param name name of device to create, if @e NULL only check token
+ * @param token requested token to check or CRYPT_ANY_TOKEN to check all
+ * @param usrptr provided identification in callback
+ * @param flags activation flags
+ *
+ * @return unlocked key slot number or negative errno otherwise.
+ *
+ * @note EPERM errno means token provided passphrase successfully, but
+ * passphrase did not unlock any keyslot associated with the token.
+ *
+ * @note ENOENT errno means no token (or subsequently assigned keyslot) was
+ * eligible to unlock device.
+ *
+ * @note ENOANO errno means that token is PIN protected and you should call
+ * @link crypt_activate_by_token_pin @endlink with PIN
+ *
+ * @note Negative EAGAIN errno means token handler requires additional hardware
+ * not present in the system.
+ *
+ * @note with @e token set to CRYPT_ANY_TOKEN libcryptsetup runs best effort loop
+ * to unlock device using any available token. It may happen that various token handlers
+ * return different error codes. At the end loop returns error codes in the following
+ * order (from the most significant to the least) any negative errno except those
+ * listed below, non negative token id (success), -ENOANO, -EAGAIN, -EPERM, -ENOENT.
+ */
+int crypt_activate_by_token(struct crypt_device *cd,
+ const char *name,
+ int token,
+ void *usrptr,
+ uint32_t flags);
+
+/**
+ * Activate device or check key using a token with PIN.
+ *
+ * @param cd crypt device handle
+ * @param name name of device to create, if @e NULL only check token
+ * @param type restrict type of token, if @e NULL all types are allowed
+ * @param token requested token to check or CRYPT_ANY_TOKEN to check all
+ * @param pin passphrase (or PIN) to unlock token (may be binary data)
+ * @param pin_size size of @e pin
+ * @param usrptr provided identification in callback
+ * @param flags activation flags
+ *
+ * @return unlocked key slot number or negative errno otherwise.
+ *
+ * @note EPERM errno means token provided passphrase successfully, but
+ * passphrase did not unlock any keyslot associated with the token.
+ *
+ * @note ENOENT errno means no token (or subsequently assigned keyslot) was
+ * eligible to unlock device.
+ *
+ * @note ENOANO errno means that token is PIN protected and was either missing
+ * (NULL) or wrong.
+ *
+ * @note Negative EAGAIN errno means token handler requires additional hardware
+ * not present in the system.
+ *
+ * @note with @e token set to CRYPT_ANY_TOKEN libcryptsetup runs best effort loop
+ * to unlock device using any available token. It may happen that various token handlers
+ * return different error codes. At the end loop returns error codes in the following
+ * order (from the most significant to the least) any negative errno except those
+ * listed below, non negative token id (success), -ENOANO, -EAGAIN, -EPERM, -ENOENT.
+ */
+int crypt_activate_by_token_pin(struct crypt_device *cd,
+ const char *name,
+ const char *type,
+ int token,
+ const char *pin,
+ size_t pin_size,
+ void *usrptr,
+ uint32_t flags);
+/** @} */
+
+/**
+ * @defgroup crypt-reencryption LUKS2 volume reencryption support
+ *
+ * Set of functions to handling LUKS2 volume reencryption
+ *
+ * @addtogroup crypt-reencryption
+ * @{
+ */
+
+/** Initialize reencryption metadata but do not run reencryption yet. (in) */
+#define CRYPT_REENCRYPT_INITIALIZE_ONLY (UINT32_C(1) << 0)
+/** Move the first segment, used only with datashift resilience mode
+ * and subvariants. (in/out) */
+#define CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT (UINT32_C(1) << 1)
+/** Resume already initialized reencryption only. (in) */
+#define CRYPT_REENCRYPT_RESUME_ONLY (UINT32_C(1) << 2)
+/** Run reencryption recovery only. (in) */
+#define CRYPT_REENCRYPT_RECOVERY (UINT32_C(1) << 3)
+/** Reencryption requires metadata protection. (in/out) */
+#define CRYPT_REENCRYPT_REPAIR_NEEDED (UINT32_C(1) << 4)
+
+/**
+ * Reencryption direction
+ */
+typedef enum {
+ CRYPT_REENCRYPT_FORWARD = 0, /**< forward direction */
+ CRYPT_REENCRYPT_BACKWARD /**< backward direction */
+} crypt_reencrypt_direction_info;
+
+/**
+ * Reencryption mode
+ */
+typedef enum {
+ CRYPT_REENCRYPT_REENCRYPT = 0, /**< Reencryption mode */
+ CRYPT_REENCRYPT_ENCRYPT, /**< Encryption mode */
+ CRYPT_REENCRYPT_DECRYPT, /**< Decryption mode */
+} crypt_reencrypt_mode_info;
+
+/**
+ * LUKS2 reencryption options.
+ */
+struct crypt_params_reencrypt {
+ crypt_reencrypt_mode_info mode; /**< Reencryption mode, immutable after first init. */
+ crypt_reencrypt_direction_info direction; /**< Reencryption direction, immutable after first init. */
+ const char *resilience; /**< Resilience mode: "none", "checksum", "journal", "datashift",
+ "datashift-checksum" or "datashift-journal".
+ "datashift" mode is immutable, "datashift-" subvariant can be only
+ changed to other "datashift-" subvariant */
+ const char *hash; /**< Used hash for "checksum" resilience type, ignored otherwise. */
+ uint64_t data_shift; /**< Used in "datashift" mode (and subvariants), must be non-zero,
+ immutable after first init. */
+ uint64_t max_hotzone_size; /**< Maximum hotzone size (may be lowered by library). For "datashift-" subvariants
+ it is used to set size of moved segment (decryption only). */
+ uint64_t device_size; /**< Reencrypt only initial part of the data device. */
+ const struct crypt_params_luks2 *luks2; /**< LUKS2 parameters for the final reencryption volume.*/
+ uint32_t flags; /**< Reencryption flags. */
+};
+
+/**
+ * Initialize reencryption metadata using passphrase.
+ *
+ * This function initializes on-disk metadata to include all reencryption segments,
+ * according to the provided options.
+ * If metadata already contains ongoing reencryption metadata, it loads these parameters
+ * (in this situation all parameters except @e name and @e passphrase can be omitted).
+ *
+ * @param cd crypt device handle
+ * @param name name of active device or @e NULL for offline reencryption
+ * @param passphrase passphrase used to unlock volume key
+ * @param passphrase_size size of @e passphrase (binary data)
+ * @param keyslot_old keyslot to unlock existing device or CRYPT_ANY_SLOT
+ * @param keyslot_new existing (unbound) reencryption keyslot; must be set except for decryption
+ * @param cipher cipher specification (e.g. "aes")
+ * @param cipher_mode cipher mode and IV (e.g. "xts-plain64")
+ * @param params reencryption parameters @link crypt_params_reencrypt @endlink.
+ *
+ * @return reencryption key slot number or negative errno otherwise.
+ */
+int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ const char *passphrase,
+ size_t passphrase_size,
+ int keyslot_old,
+ int keyslot_new,
+ const char *cipher,
+ const char *cipher_mode,
+ const struct crypt_params_reencrypt *params);
+
+/**
+ * Initialize reencryption metadata using passphrase in keyring.
+ *
+ * This function initializes on-disk metadata to include all reencryption segments,
+ * according to the provided options.
+ * If metadata already contains ongoing reencryption metadata, it loads these parameters
+ * (in this situation all parameters except @e name and @e key_description can be omitted).
+ *
+ * @param cd crypt device handle
+ * @param name name of active device or @e NULL for offline reencryption
+ * @param key_description passphrase (key) identification in keyring
+ * @param keyslot_old keyslot to unlock existing device or CRYPT_ANY_SLOT
+ * @param keyslot_new existing (unbound) reencryption keyslot; must be set except for decryption
+ * @param cipher cipher specification (e.g. "aes")
+ * @param cipher_mode cipher mode and IV (e.g. "xts-plain64")
+ * @param params reencryption parameters @link crypt_params_reencrypt @endlink.
+ *
+ * @return reencryption key slot number or negative errno otherwise.
+ */
+int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
+ const char *name,
+ const char *key_description,
+ int keyslot_old,
+ int keyslot_new,
+ const char *cipher,
+ const char *cipher_mode,
+ const struct crypt_params_reencrypt *params);
+
+/**
+ * Legacy data reencryption function.
+ *
+ * @param cd crypt device handle
+ * @param progress is a callback function reporting device \b size,
+ * current \b offset of reencryption and provided \b usrptr identification
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ *
+ * @deprecated Use @link crypt_reencrypt_run @endlink instead.
+ */
+int crypt_reencrypt(struct crypt_device *cd,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
+__attribute__((deprecated));
+
+/**
+ * Run data reencryption.
+ *
+ * @param cd crypt device handle
+ * @param progress is a callback function reporting device \b size,
+ * current \b offset of reencryption and provided \b usrptr identification
+ * @param usrptr progress specific data
+ *
+ * @return @e 0 on success or negative errno value otherwise.
+ */
+int crypt_reencrypt_run(struct crypt_device *cd,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
+ void *usrptr);
+
+/**
+ * Reencryption status info
+ */
+typedef enum {
+ CRYPT_REENCRYPT_NONE = 0, /**< No reencryption in progress */
+ CRYPT_REENCRYPT_CLEAN, /**< Ongoing reencryption in a clean state. */
+ CRYPT_REENCRYPT_CRASH, /**< Aborted reencryption that need internal recovery. */
+ CRYPT_REENCRYPT_INVALID /**< Invalid state. */
+} crypt_reencrypt_info;
+
+/**
+ * LUKS2 reencryption status.
+ *
+ * @param cd crypt device handle
+ * @param params reencryption parameters
+ *
+ * @return reencryption status info and parameters.
+ */
+crypt_reencrypt_info crypt_reencrypt_status(struct crypt_device *cd,
+ struct crypt_params_reencrypt *params);
+/** @} */
+
+/**
+ * @defgroup crypt-memory Safe memory helpers functions
+ * @addtogroup crypt-memory
+ * @{
+ */
+
+/**
+ * Allocate safe memory (content is safely wiped on deallocation).
+ *
+ * @param size size of memory in bytes
+ *
+ * @return pointer to allocated memory or @e NULL.
+ */
+void *crypt_safe_alloc(size_t size);
+
+/**
+ * Release safe memory, content is safely wiped.
+ * The pointer must be allocated with @link crypt_safe_alloc @endlink
+ *
+ * @param data pointer to memory to be deallocated
+ */
+void crypt_safe_free(void *data);
+
+/**
+ * Reallocate safe memory (content is copied and safely wiped on deallocation).
+ *
+ * @param data pointer to memory to be deallocated
+ * @param size new size of memory in bytes
+ *
+ * @return pointer to allocated memory or @e NULL.
+ */
+void *crypt_safe_realloc(void *data, size_t size);
+
+/**
+ * Safe clear memory area (compile should not compile this call out).
+ *
+ * @param data pointer to memory to be cleared
+ * @param size size of memory in bytes
+ */
+void crypt_safe_memzero(void *data, size_t size);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _LIBCRYPTSETUP_H */
diff --git a/lib/libcryptsetup.pc.in b/lib/libcryptsetup.pc.in
new file mode 100644
index 0000000..7836293
--- /dev/null
+++ b/lib/libcryptsetup.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: cryptsetup
+Description: cryptsetup library
+Version: @LIBCRYPTSETUP_VERSION@
+Cflags: -I${includedir}
+Libs: -L${libdir} -lcryptsetup
+Requires.private: @PKGMODULES@
diff --git a/lib/libcryptsetup.sym b/lib/libcryptsetup.sym
new file mode 100644
index 0000000..d0f0d98
--- /dev/null
+++ b/lib/libcryptsetup.sym
@@ -0,0 +1,167 @@
+CRYPTSETUP_2.0 {
+ global:
+ crypt_init;
+ crypt_init_data_device;
+ crypt_init_by_name;
+ crypt_init_by_name_and_header;
+
+ crypt_set_log_callback;
+ crypt_set_confirm_callback;
+ crypt_set_iteration_time;
+ crypt_set_uuid;
+ crypt_set_label;
+ crypt_set_data_device;
+
+ crypt_set_compatibility;
+ crypt_get_compatibility;
+
+ crypt_memory_lock;
+ crypt_metadata_locking;
+ crypt_format;
+ crypt_convert;
+ crypt_load;
+ crypt_repair;
+ crypt_resize;
+ crypt_suspend;
+ crypt_resume_by_passphrase;
+ crypt_resume_by_keyfile;
+ crypt_resume_by_keyfile_offset;
+ crypt_resume_by_keyfile_device_offset;
+ crypt_resume_by_volume_key;
+ crypt_free;
+
+ crypt_keyslot_add_by_passphrase;
+ crypt_keyslot_change_by_passphrase;
+ crypt_keyslot_add_by_keyfile;
+ crypt_keyslot_add_by_keyfile_offset;
+ crypt_keyslot_add_by_keyfile_device_offset;
+ crypt_keyslot_add_by_volume_key;
+ crypt_keyslot_add_by_key;
+
+ crypt_keyslot_set_priority;
+ crypt_keyslot_get_priority;
+
+ crypt_token_json_get;
+ crypt_token_json_set;
+ crypt_token_status;
+ crypt_token_luks2_keyring_get;
+ crypt_token_luks2_keyring_set;
+ crypt_token_assign_keyslot;
+ crypt_token_unassign_keyslot;
+ crypt_token_is_assigned;
+ crypt_token_register;
+
+ crypt_activate_by_token;
+
+ crypt_keyslot_destroy;
+ crypt_activate_by_passphrase;
+ crypt_activate_by_keyfile;
+ crypt_activate_by_keyfile_offset;
+ crypt_activate_by_keyfile_device_offset;
+ crypt_activate_by_volume_key;
+ crypt_activate_by_signed_key;
+ crypt_activate_by_keyring;
+ crypt_deactivate;
+ crypt_deactivate_by_name;
+ crypt_volume_key_get;
+ crypt_volume_key_verify;
+ crypt_volume_key_keyring;
+ crypt_status;
+ crypt_dump;
+ crypt_benchmark;
+ crypt_benchmark_pbkdf;
+ crypt_get_cipher;
+ crypt_get_cipher_mode;
+ crypt_get_integrity_info;
+ crypt_get_uuid;
+ crypt_set_data_offset;
+ crypt_get_data_offset;
+ crypt_get_iv_offset;
+ crypt_get_volume_key_size;
+ crypt_get_device_name;
+ crypt_get_metadata_device_name;
+ crypt_get_metadata_size;
+ crypt_set_metadata_size;
+ crypt_get_verity_info;
+ crypt_get_sector_size;
+
+ crypt_get_type;
+ crypt_get_default_type;
+ crypt_get_active_device;
+ crypt_get_active_integrity_failures;
+ crypt_persistent_flags_set;
+ crypt_persistent_flags_get;
+
+ crypt_set_rng_type;
+ crypt_get_rng_type;
+ crypt_set_pbkdf_type;
+ crypt_get_pbkdf_type;
+ crypt_get_pbkdf_type_params;
+ crypt_get_pbkdf_default;
+
+ crypt_keyslot_max;
+ crypt_keyslot_area;
+ crypt_keyslot_status;
+ crypt_keyslot_get_key_size;
+ crypt_keyslot_set_encryption;
+ crypt_keyslot_get_encryption;
+ crypt_keyslot_get_pbkdf;
+
+ crypt_get_dir;
+ crypt_set_debug_level;
+ crypt_log;
+
+ crypt_header_backup;
+ crypt_header_restore;
+
+ crypt_keyfile_read;
+ crypt_keyfile_device_read;
+
+ crypt_wipe;
+
+ crypt_reencrypt_init_by_passphrase;
+ crypt_reencrypt_init_by_keyring;
+ crypt_reencrypt;
+ crypt_reencrypt_status;
+
+ crypt_safe_alloc;
+ crypt_safe_realloc;
+ crypt_safe_free;
+ crypt_safe_memzero;
+ local:
+ *;
+};
+
+CRYPTSETUP_2.4 {
+ global:
+ crypt_reencrypt_run;
+ crypt_token_max;
+ crypt_header_is_detached;
+ crypt_logf;
+ crypt_activate_by_token_pin;
+ crypt_dump_json;
+ crypt_format;
+ crypt_token_external_disable;
+ crypt_token_external_path;
+} CRYPTSETUP_2.0;
+
+CRYPTSETUP_2.5 {
+ global:
+ crypt_get_label;
+ crypt_get_subsystem;
+ crypt_resume_by_token_pin;
+} CRYPTSETUP_2.4;
+
+CRYPTSETUP_2.6 {
+ global:
+ crypt_keyslot_context_free;
+ crypt_keyslot_context_init_by_passphrase;
+ crypt_keyslot_context_init_by_keyfile;
+ crypt_keyslot_context_init_by_token;
+ crypt_keyslot_context_init_by_volume_key;
+ crypt_keyslot_context_get_error;
+ crypt_keyslot_context_set_pin;
+ crypt_keyslot_context_get_type;
+ crypt_keyslot_add_by_keyslot_context;
+ crypt_volume_key_get_by_keyslot_context;
+} CRYPTSETUP_2.5;
diff --git a/lib/libcryptsetup_macros.h b/lib/libcryptsetup_macros.h
new file mode 100644
index 0000000..55187ab
--- /dev/null
+++ b/lib/libcryptsetup_macros.h
@@ -0,0 +1,70 @@
+/*
+ * Definitions of common constant and generic macros of libcryptsetup
+ *
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _LIBCRYPTSETUP_MACROS_H
+#define _LIBCRYPTSETUP_MACROS_H
+
+/* to silent gcc -Wcast-qual for const cast */
+#define CONST_CAST(x) (x)(uintptr_t)
+
+/* to silent clang -Wcast-align when working with byte arrays */
+#define VOIDP_CAST(x) (x)(void*)
+
+#define UNUSED(x) (void)(x)
+
+#ifndef ARRAY_SIZE
+# define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#define BITFIELD_SIZE(BF_PTR) (sizeof(*(BF_PTR)) * 8)
+
+#define MOVE_REF(x, y) \
+ do { \
+ __typeof__(x) *_px = &(x), *_py = &(y); \
+ *_px = *_py; \
+ *_py = NULL; \
+ } while (0)
+
+#define FREE_AND_NULL(x) do { free(x); x = NULL; } while (0)
+
+#define AT_LEAST(a, b) ({ __typeof__(a) __at_least = (a); (__at_least >= (b))?__at_least:(b); })
+
+#define SHIFT_4K 12
+#define SECTOR_SHIFT 9
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+#define MAX_SECTOR_SIZE 4096 /* min page size among all platforms */
+#define ROUND_SECTOR(x) (((x) + SECTOR_SIZE - 1) / SECTOR_SIZE)
+
+#define MISALIGNED(a, b) ((a) & ((b) - 1))
+#define MISALIGNED_4K(a) MISALIGNED((a), 1 << SHIFT_4K)
+#define MISALIGNED_512(a) MISALIGNED((a), 1 << SECTOR_SHIFT)
+#define NOTPOW2(a) MISALIGNED((a), (a))
+
+#define DEFAULT_DISK_ALIGNMENT 1048576 /* 1MiB */
+#define DEFAULT_MEM_ALIGNMENT 4096
+
+#define DM_UUID_LEN 129
+#define DM_BY_ID_PREFIX "dm-uuid-"
+#define DM_BY_ID_PREFIX_LEN 8
+#define DM_UUID_PREFIX "CRYPT-"
+#define DM_UUID_PREFIX_LEN 6
+
+#endif /* _LIBCRYPTSETUP_MACROS_H */
diff --git a/lib/libcryptsetup_symver.h b/lib/libcryptsetup_symver.h
new file mode 100644
index 0000000..a5aa8f9
--- /dev/null
+++ b/lib/libcryptsetup_symver.h
@@ -0,0 +1,103 @@
+/*
+ * Helpers for defining versioned symbols
+ *
+ * Copyright (C) 2021-2023 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _LIBCRYPTSETUP_SYMVER_H
+#define _LIBCRYPTSETUP_SYMVER_H
+
+/*
+ * Note on usage:
+ *
+ * Do not use CRYPT_SYMBOL_EXPORT_NEW and CRYPT_SYMBOL_EXPORT_OLD on public
+ * symbols being exported only once. Linker will handle it automatically as
+ * always.
+ *
+ * It's supposed to be used only with symbols that are exported in at least
+ * two versions simultaneously as follows:
+ *
+ * - the latest version is marked with _NEW variant and all other compatible
+ * symbols should be marked with _OLD variant
+ *
+ * Examples:
+ *
+ * - int crypt_func_X(unsigned *x, long y) gets introduced in CRYPTSETUP_2.4.
+ *
+ * No need to use any macro referenced here, just add proper version
+ * mapping in libcryptsetup.sym file.
+ *
+ * In later version CRYPTSETUP_2.5 symbol crypt_func_X has to fixed
+ * in incompatible way by adding new function parameter. The new version
+ * has to be added in mapping file libcryptsetup.sym as well.
+ *
+ * The definition of compatible function gets prefixed with following macro:
+ *
+ * CRYPT_SYMBOL_EXPORT_OLD(int, crypt_func_X, 2, 4,
+ * unsigned *x, long y)
+ * {
+ * function body
+ * }
+ *
+ * Whereas new version introduced in CRYPTSETUP_2.5 is defined as follows:
+ *
+ * CRYPT_SYMBOL_EXPORT_NEW(int, crypt_func_X, 2, 5,
+ * unsigned *x, long y, void *new_parameter)
+ * {
+ * function body
+ * }
+ *
+ * If in later version CRYPTSETUP_2.6 yet another version of crypt_func_X gets
+ * introduced it will be prefixed with CRYPT_SYMBOL_EXPORT_NEW(int, crypt_func_X, 2, 6...)
+ * macro and all previous versions CRYPTSETUP_2.4 and CRYPTSETUP_2.5 will be
+ * under CRYPT_SYMBOL_EXPORT_OLD(int, crypt_func_X, ...) macro
+ */
+
+#if HAVE_ATTRIBUTE_SYMVER
+# define _CRYPT_SYMVER(_local_sym, _public_sym, _ver_str, _maj, _min) \
+ __attribute__((__symver__(#_public_sym _ver_str #_maj "." #_min)))
+#endif
+
+#if !defined(_CRYPT_SYMVER) && (defined(__GNUC__) || defined(__clang__))
+# define _CRYPT_SYMVER(_local_sym, _public_sym, _ver_str, _maj, _min) \
+ __asm__(".symver " #_local_sym "," #_public_sym _ver_str #_maj "." #_min);
+#endif
+
+#define _CRYPT_FUNC(_public_sym, _prefix_str, _maj, _min, _ret, ...) \
+ _ret __##_public_sym##_v##_maj##_##_min(__VA_ARGS__); \
+ _CRYPT_SYMVER(__##_public_sym##_v##_maj##_##_min, _public_sym, _prefix_str "CRYPTSETUP_", _maj, _min) \
+ _ret __##_public_sym##_v##_maj##_##_min(__VA_ARGS__)
+
+#ifdef _CRYPT_SYMVER
+
+# define CRYPT_SYMBOL_EXPORT_OLD(_ret, _public_sym, _maj, _min, ...) \
+ _CRYPT_FUNC(_public_sym, "@", _maj, _min, _ret, __VA_ARGS__)
+# define CRYPT_SYMBOL_EXPORT_NEW(_ret, _public_sym, _maj, _min, ...) \
+ _CRYPT_FUNC(_public_sym, "@@", _maj, _min, _ret, __VA_ARGS__)
+
+#else /* no support for symbol versioning at all */
+
+# define CRYPT_SYMBOL_EXPORT_OLD(_ret, _public_sym, _maj, _min, ...) \
+ static inline __attribute__((unused)) \
+ _ret __##_public_sym##_v##_maj##_##_min(__VA_ARGS__)
+
+# define CRYPT_SYMBOL_EXPORT_NEW(_ret, _public_sym, _maj, _min, ...) \
+ _ret _public_sym(__VA_ARGS__)
+
+#endif
+
+#endif /* _LIBCRYPTSETUP_SYMVER_H */
diff --git a/lib/libdevmapper.c b/lib/libdevmapper.c
new file mode 100644
index 0000000..9c5fc0c
--- /dev/null
+++ b/lib/libdevmapper.c
@@ -0,0 +1,3181 @@
+/*
+ * libdevmapper - device-mapper backend for cryptsetup
+ *
+ * Copyright (C) 2004 Jana Saout <jana@saout.de>
+ * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libdevmapper.h>
+#include <uuid/uuid.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_SYSMACROS_H
+# include <sys/sysmacros.h> /* for major, minor */
+#endif
+#include "internal.h"
+
+#define DM_CRYPT_TARGET "crypt"
+#define DM_VERITY_TARGET "verity"
+#define DM_INTEGRITY_TARGET "integrity"
+#define DM_LINEAR_TARGET "linear"
+#define DM_ERROR_TARGET "error"
+#define DM_ZERO_TARGET "zero"
+#define RETRY_COUNT 5
+
+/* Set if DM target versions were probed */
+static bool _dm_ioctl_checked = false;
+static bool _dm_crypt_checked = false;
+static bool _dm_verity_checked = false;
+static bool _dm_integrity_checked = false;
+static bool _dm_zero_checked = false;
+
+static int _quiet_log = 0;
+static uint32_t _dm_flags = 0;
+
+static struct crypt_device *_context = NULL;
+static int _dm_use_count = 0;
+
+/* Check if we have DM flag to instruct kernel to force wipe buffers */
+#if !HAVE_DECL_DM_TASK_SECURE_DATA
+static int dm_task_secure_data(struct dm_task *dmt) { return 1; }
+#endif
+
+/* Compatibility for old device-mapper without udev support */
+#if HAVE_DECL_DM_UDEV_DISABLE_DISK_RULES_FLAG
+#define CRYPT_TEMP_UDEV_FLAGS DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG | \
+ DM_UDEV_DISABLE_DISK_RULES_FLAG | \
+ DM_UDEV_DISABLE_OTHER_RULES_FLAG
+#define _dm_task_set_cookie dm_task_set_cookie
+#define _dm_udev_wait dm_udev_wait
+#else
+#define CRYPT_TEMP_UDEV_FLAGS 0
+static int _dm_task_set_cookie(struct dm_task *dmt, uint32_t *cookie, uint16_t flags) { return 0; }
+static int _dm_udev_wait(uint32_t cookie) { return 0; };
+#endif
+
+static int _dm_use_udev(void)
+{
+#ifdef USE_UDEV /* cannot be enabled if devmapper is too old */
+ return dm_udev_get_sync_support();
+#else
+ return 0;
+#endif
+}
+
+__attribute__((format(printf, 4, 5)))
+static void set_dm_error(int level,
+ const char *file __attribute__((unused)),
+ int line __attribute__((unused)),
+ const char *f, ...)
+{
+ char *msg = NULL;
+ va_list va;
+
+ va_start(va, f);
+ if (vasprintf(&msg, f, va) > 0) {
+ if (level < 4 && !_quiet_log) {
+ log_err(_context, "%s", msg);
+ } else {
+ /* We do not use DM visual stack backtrace here */
+ if (strncmp(msg, "<backtrace>", 11))
+ log_dbg(_context, "%s", msg);
+ }
+ }
+ free(msg);
+ va_end(va);
+}
+
+static int _dm_satisfies_version(unsigned target_maj, unsigned target_min, unsigned target_patch,
+ unsigned actual_maj, unsigned actual_min, unsigned actual_patch)
+{
+ if (actual_maj > target_maj)
+ return 1;
+
+ if (actual_maj == target_maj && actual_min > target_min)
+ return 1;
+
+ if (actual_maj == target_maj && actual_min == target_min && actual_patch >= target_patch)
+ return 1;
+
+ return 0;
+}
+
+static void _dm_set_crypt_compat(struct crypt_device *cd,
+ unsigned crypt_maj,
+ unsigned crypt_min,
+ unsigned crypt_patch)
+{
+ if (_dm_crypt_checked || crypt_maj == 0)
+ return;
+
+ log_dbg(cd, "Detected dm-crypt version %i.%i.%i.",
+ crypt_maj, crypt_min, crypt_patch);
+
+ if (_dm_satisfies_version(1, 2, 0, crypt_maj, crypt_min, crypt_patch))
+ _dm_flags |= DM_KEY_WIPE_SUPPORTED;
+ else
+ log_dbg(cd, "Suspend and resume disabled, no wipe key support.");
+
+ if (_dm_satisfies_version(1, 10, 0, crypt_maj, crypt_min, crypt_patch))
+ _dm_flags |= DM_LMK_SUPPORTED;
+
+ /* not perfect, 2.6.33 supports with 1.7.0 */
+ if (_dm_satisfies_version(1, 8, 0, crypt_maj, crypt_min, crypt_patch))
+ _dm_flags |= DM_PLAIN64_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 11, 0, crypt_maj, crypt_min, crypt_patch))
+ _dm_flags |= DM_DISCARDS_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 13, 0, crypt_maj, crypt_min, crypt_patch))
+ _dm_flags |= DM_TCW_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 14, 0, crypt_maj, crypt_min, crypt_patch)) {
+ _dm_flags |= DM_SAME_CPU_CRYPT_SUPPORTED;
+ _dm_flags |= DM_SUBMIT_FROM_CRYPT_CPUS_SUPPORTED;
+ }
+
+ if (_dm_satisfies_version(1, 18, 1, crypt_maj, crypt_min, crypt_patch))
+ _dm_flags |= DM_KERNEL_KEYRING_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 17, 0, crypt_maj, crypt_min, crypt_patch)) {
+ _dm_flags |= DM_SECTOR_SIZE_SUPPORTED;
+ _dm_flags |= DM_CAPI_STRING_SUPPORTED;
+ }
+
+ if (_dm_satisfies_version(1, 19, 0, crypt_maj, crypt_min, crypt_patch))
+ _dm_flags |= DM_BITLK_EBOIV_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 20, 0, crypt_maj, crypt_min, crypt_patch))
+ _dm_flags |= DM_BITLK_ELEPHANT_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 22, 0, crypt_maj, crypt_min, crypt_patch))
+ _dm_flags |= DM_CRYPT_NO_WORKQUEUE_SUPPORTED;
+
+ _dm_crypt_checked = true;
+}
+
+static void _dm_set_verity_compat(struct crypt_device *cd,
+ unsigned verity_maj,
+ unsigned verity_min,
+ unsigned verity_patch)
+{
+ if (_dm_verity_checked || verity_maj == 0)
+ return;
+
+ log_dbg(cd, "Detected dm-verity version %i.%i.%i.",
+ verity_maj, verity_min, verity_patch);
+
+ _dm_flags |= DM_VERITY_SUPPORTED;
+
+ /*
+ * ignore_corruption, restart_on corruption is available since 1.2 (kernel 4.1)
+ * ignore_zero_blocks since 1.3 (kernel 4.5)
+ * (but some dm-verity targets 1.2 don't support it)
+ * FEC is added in 1.3 as well.
+ * Check at most once is added in 1.4 (kernel 4.17).
+ */
+ if (_dm_satisfies_version(1, 3, 0, verity_maj, verity_min, verity_patch)) {
+ _dm_flags |= DM_VERITY_ON_CORRUPTION_SUPPORTED;
+ _dm_flags |= DM_VERITY_FEC_SUPPORTED;
+ }
+
+ if (_dm_satisfies_version(1, 5, 0, verity_maj, verity_min, verity_patch))
+ _dm_flags |= DM_VERITY_SIGNATURE_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 7, 0, verity_maj, verity_min, verity_patch))
+ _dm_flags |= DM_VERITY_PANIC_CORRUPTION_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 9, 0, verity_maj, verity_min, verity_patch))
+ _dm_flags |= DM_VERITY_TASKLETS_SUPPORTED;
+
+ _dm_verity_checked = true;
+}
+
+static void _dm_set_integrity_compat(struct crypt_device *cd,
+ unsigned integrity_maj,
+ unsigned integrity_min,
+ unsigned integrity_patch)
+{
+ if (_dm_integrity_checked || integrity_maj == 0)
+ return;
+
+ log_dbg(cd, "Detected dm-integrity version %i.%i.%i.",
+ integrity_maj, integrity_min, integrity_patch);
+
+ _dm_flags |= DM_INTEGRITY_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 2, 0, integrity_maj, integrity_min, integrity_patch))
+ _dm_flags |= DM_INTEGRITY_RECALC_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 3, 0, integrity_maj, integrity_min, integrity_patch))
+ _dm_flags |= DM_INTEGRITY_BITMAP_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 4, 0, integrity_maj, integrity_min, integrity_patch))
+ _dm_flags |= DM_INTEGRITY_FIX_PADDING_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 6, 0, integrity_maj, integrity_min, integrity_patch))
+ _dm_flags |= DM_INTEGRITY_DISCARDS_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 7, 0, integrity_maj, integrity_min, integrity_patch))
+ _dm_flags |= DM_INTEGRITY_FIX_HMAC_SUPPORTED;
+
+ if (_dm_satisfies_version(1, 8, 0, integrity_maj, integrity_min, integrity_patch))
+ _dm_flags |= DM_INTEGRITY_RESET_RECALC_SUPPORTED;
+
+ _dm_integrity_checked = true;
+}
+
+static void _dm_set_zero_compat(struct crypt_device *cd,
+ unsigned zero_maj,
+ unsigned zero_min,
+ unsigned zero_patch)
+{
+ if (_dm_zero_checked || zero_maj == 0)
+ return;
+
+ log_dbg(cd, "Detected dm-zero version %i.%i.%i.",
+ zero_maj, zero_min, zero_patch);
+
+ _dm_zero_checked = true;
+}
+
+/* We use this for loading target module */
+static void _dm_check_target(dm_target_type target_type)
+{
+#if HAVE_DECL_DM_DEVICE_GET_TARGET_VERSION
+ struct dm_task *dmt;
+ const char *target_name = NULL;
+
+ if (!(_dm_flags & DM_GET_TARGET_VERSION_SUPPORTED))
+ return;
+
+ if (target_type == DM_CRYPT)
+ target_name = DM_CRYPT_TARGET;
+ else if (target_type == DM_VERITY)
+ target_name = DM_VERITY_TARGET;
+ else if (target_type == DM_INTEGRITY)
+ target_name = DM_INTEGRITY_TARGET;
+ else
+ return;
+
+ if (!(dmt = dm_task_create(DM_DEVICE_GET_TARGET_VERSION)))
+ return;
+
+ if (dm_task_set_name(dmt, target_name))
+ dm_task_run(dmt);
+
+ dm_task_destroy(dmt);
+#endif
+}
+
+static int _dm_check_versions(struct crypt_device *cd, dm_target_type target_type)
+{
+ struct dm_task *dmt;
+ struct dm_versions *target, *last_target;
+ char dm_version[16];
+ unsigned dm_maj, dm_min, dm_patch;
+ int r = 0;
+
+ if ((target_type == DM_CRYPT && _dm_crypt_checked) ||
+ (target_type == DM_VERITY && _dm_verity_checked) ||
+ (target_type == DM_INTEGRITY && _dm_integrity_checked) ||
+ (target_type == DM_ZERO && _dm_zero_checked) ||
+ (target_type == DM_LINEAR) ||
+ (_dm_crypt_checked && _dm_verity_checked && _dm_integrity_checked && _dm_zero_checked))
+ return 1;
+
+ /* Shut up DM while checking */
+ _quiet_log = 1;
+
+ _dm_check_target(target_type);
+
+ if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
+ goto out;
+
+ if (!dm_task_run(dmt))
+ goto out;
+
+ if (!dm_task_get_driver_version(dmt, dm_version, sizeof(dm_version)))
+ goto out;
+
+ if (!_dm_ioctl_checked) {
+ if (sscanf(dm_version, "%u.%u.%u", &dm_maj, &dm_min, &dm_patch) != 3)
+ goto out;
+ log_dbg(cd, "Detected dm-ioctl version %u.%u.%u.", dm_maj, dm_min, dm_patch);
+
+ if (_dm_satisfies_version(4, 20, 0, dm_maj, dm_min, dm_patch))
+ _dm_flags |= DM_SECURE_SUPPORTED;
+#if HAVE_DECL_DM_TASK_DEFERRED_REMOVE
+ if (_dm_satisfies_version(4, 27, 0, dm_maj, dm_min, dm_patch))
+ _dm_flags |= DM_DEFERRED_SUPPORTED;
+#endif
+#if HAVE_DECL_DM_DEVICE_GET_TARGET_VERSION
+ if (_dm_satisfies_version(4, 41, 0, dm_maj, dm_min, dm_patch))
+ _dm_flags |= DM_GET_TARGET_VERSION_SUPPORTED;
+#endif
+ }
+
+ target = dm_task_get_versions(dmt);
+ do {
+ last_target = target;
+ if (!strcmp(DM_CRYPT_TARGET, target->name)) {
+ _dm_set_crypt_compat(cd, (unsigned)target->version[0],
+ (unsigned)target->version[1],
+ (unsigned)target->version[2]);
+ } else if (!strcmp(DM_VERITY_TARGET, target->name)) {
+ _dm_set_verity_compat(cd, (unsigned)target->version[0],
+ (unsigned)target->version[1],
+ (unsigned)target->version[2]);
+ } else if (!strcmp(DM_INTEGRITY_TARGET, target->name)) {
+ _dm_set_integrity_compat(cd, (unsigned)target->version[0],
+ (unsigned)target->version[1],
+ (unsigned)target->version[2]);
+ } else if (!strcmp(DM_ZERO_TARGET, target->name)) {
+ _dm_set_zero_compat(cd, (unsigned)target->version[0],
+ (unsigned)target->version[1],
+ (unsigned)target->version[2]);
+ }
+ target = VOIDP_CAST(struct dm_versions *)((char *) target + target->next);
+ } while (last_target != target);
+
+ r = 1;
+ if (!_dm_ioctl_checked)
+ log_dbg(cd, "Device-mapper backend running with UDEV support %sabled.",
+ _dm_use_udev() ? "en" : "dis");
+
+ _dm_ioctl_checked = true;
+out:
+ if (dmt)
+ dm_task_destroy(dmt);
+
+ _quiet_log = 0;
+ return r;
+}
+
+int dm_flags(struct crypt_device *cd, dm_target_type target, uint32_t *flags)
+{
+ _dm_check_versions(cd, target);
+ *flags = _dm_flags;
+
+ if (target == DM_UNKNOWN &&
+ _dm_crypt_checked && _dm_verity_checked && _dm_integrity_checked && _dm_zero_checked)
+ return 0;
+
+ if ((target == DM_CRYPT && _dm_crypt_checked) ||
+ (target == DM_VERITY && _dm_verity_checked) ||
+ (target == DM_INTEGRITY && _dm_integrity_checked) ||
+ (target == DM_ZERO && _dm_zero_checked) ||
+ (target == DM_LINEAR)) /* nothing to check */
+ return 0;
+
+ return -ENODEV;
+}
+
+/* This doesn't run any kernel checks, just set up userspace libdevmapper */
+void dm_backend_init(struct crypt_device *cd)
+{
+ if (!_dm_use_count++) {
+ log_dbg(cd, "Initialising device-mapper backend library.");
+ dm_log_init(set_dm_error);
+ dm_log_init_verbose(10);
+ }
+}
+
+void dm_backend_exit(struct crypt_device *cd)
+{
+ if (_dm_use_count && (!--_dm_use_count)) {
+ log_dbg(cd, "Releasing device-mapper backend.");
+ dm_log_init_verbose(0);
+ dm_log_init(NULL);
+ dm_lib_release();
+ }
+}
+
+/* libdevmapper is not context friendly, switch context on every DM call. */
+static int dm_init_context(struct crypt_device *cd, dm_target_type target)
+{
+ _context = cd;
+ if (!_dm_check_versions(cd, target)) {
+ if (getuid() || geteuid())
+ log_err(cd, _("Cannot initialize device-mapper, "
+ "running as non-root user."));
+ else
+ log_err(cd, _("Cannot initialize device-mapper. "
+ "Is dm_mod kernel module loaded?"));
+ _context = NULL;
+ return -ENOTSUP;
+ }
+ return 0;
+}
+static void dm_exit_context(void)
+{
+ _context = NULL;
+}
+
+/* Return path to DM device */
+char *dm_device_path(const char *prefix, int major, int minor)
+{
+ struct dm_task *dmt;
+ const char *name;
+ char path[PATH_MAX];
+
+ if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
+ return NULL;
+ if (!dm_task_set_minor(dmt, minor) ||
+ !dm_task_set_major(dmt, major) ||
+ !dm_task_no_flush(dmt) ||
+ !dm_task_run(dmt) ||
+ !(name = dm_task_get_name(dmt))) {
+ dm_task_destroy(dmt);
+ return NULL;
+ }
+
+ if (snprintf(path, sizeof(path), "%s%s", prefix ?: "", name) < 0)
+ path[0] = '\0';
+
+ dm_task_destroy(dmt);
+
+ return strdup(path);
+}
+
+char *dm_device_name(const char *path)
+{
+ struct stat st;
+
+ if (stat(path, &st) < 0 || !S_ISBLK(st.st_mode))
+ return NULL;
+
+ return dm_device_path(NULL, major(st.st_rdev), minor(st.st_rdev));
+}
+
+static size_t int_log10(uint64_t x)
+{
+ uint64_t r = 0;
+ for (x /= 10; x > 0; x /= 10)
+ r++;
+ return r;
+}
+
+static int cipher_dm2c(const char *org_c, const char *org_i, unsigned tag_size,
+ char *c_dm, int c_dm_size,
+ char *i_dm, int i_dm_size)
+{
+ int c_size = 0, i_size = 0, i;
+ char cipher[MAX_CAPI_ONE_LEN], mode[MAX_CAPI_ONE_LEN], iv[MAX_CAPI_ONE_LEN+1],
+ tmp[MAX_CAPI_ONE_LEN], capi[MAX_CAPI_LEN];
+
+ if (!c_dm || !c_dm_size || !i_dm || !i_dm_size)
+ return -EINVAL;
+
+ i = sscanf(org_c, "%" MAX_CAPI_ONE_LEN_STR "[^-]-%" MAX_CAPI_ONE_LEN_STR "s", cipher, tmp);
+ if (i != 2)
+ return -EINVAL;
+
+ i = sscanf(tmp, "%" MAX_CAPI_ONE_LEN_STR "[^-]-%" MAX_CAPI_ONE_LEN_STR "s", mode, iv);
+ if (i == 1) {
+ memset(iv, 0, sizeof(iv));
+ strncpy(iv, mode, sizeof(iv)-1);
+ *mode = '\0';
+ if (snprintf(capi, sizeof(capi), "%s", cipher) < 0)
+ return -EINVAL;
+ } else if (i == 2) {
+ if (snprintf(capi, sizeof(capi), "%s(%s)", mode, cipher) < 0)
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ if (!org_i) {
+ /* legacy mode: CIPHER-MODE-IV*/
+ i_size = snprintf(i_dm, i_dm_size, "%s", "");
+ c_size = snprintf(c_dm, c_dm_size, "%s", org_c);
+ } else if (!strcmp(org_i, "none")) {
+ /* IV only: capi:MODE(CIPHER)-IV */
+ i_size = snprintf(i_dm, i_dm_size, " integrity:%u:none", tag_size);
+ c_size = snprintf(c_dm, c_dm_size, "capi:%s-%s", capi, iv);
+ } else if (!strcmp(org_i, "aead") && !strcmp(mode, "ccm")) {
+ /* CCM AEAD: capi:rfc4309(MODE(CIPHER))-IV */
+ i_size = snprintf(i_dm, i_dm_size, " integrity:%u:aead", tag_size);
+ c_size = snprintf(c_dm, c_dm_size, "capi:rfc4309(%s)-%s", capi, iv);
+ } else if (!strcmp(org_i, "aead")) {
+ /* AEAD: capi:MODE(CIPHER))-IV */
+ i_size = snprintf(i_dm, i_dm_size, " integrity:%u:aead", tag_size);
+ c_size = snprintf(c_dm, c_dm_size, "capi:%s-%s", capi, iv);
+ } else if (!strcmp(org_i, "poly1305")) {
+ /* POLY1305 AEAD: capi:rfc7539(MODE(CIPHER),POLY1305)-IV */
+ i_size = snprintf(i_dm, i_dm_size, " integrity:%u:aead", tag_size);
+ c_size = snprintf(c_dm, c_dm_size, "capi:rfc7539(%s,poly1305)-%s", capi, iv);
+ } else {
+ /* other AEAD: capi:authenc(<AUTH>,MODE(CIPHER))-IV */
+ i_size = snprintf(i_dm, i_dm_size, " integrity:%u:aead", tag_size);
+ c_size = snprintf(c_dm, c_dm_size, "capi:authenc(%s,%s)-%s", org_i, capi, iv);
+ }
+
+ if (c_size < 0 || c_size == c_dm_size)
+ return -EINVAL;
+ if (i_size < 0 || i_size == i_dm_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static char *_uf(char *buf, size_t buf_size, const char *s, unsigned u)
+{
+ size_t r = snprintf(buf, buf_size, " %s:%u", s, u);
+ assert(r > 0 && r < buf_size);
+ return buf;
+}
+
+/* https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt */
+static char *get_dm_crypt_params(const struct dm_target *tgt, uint32_t flags)
+{
+ int r, max_size, null_cipher = 0, num_options = 0, keystr_len = 0;
+ char *params = NULL, *hexkey = NULL;
+ char sector_feature[32], features[512], integrity_dm[256], cipher_dm[256];
+
+ if (!tgt)
+ return NULL;
+
+ r = cipher_dm2c(tgt->u.crypt.cipher, tgt->u.crypt.integrity, tgt->u.crypt.tag_size,
+ cipher_dm, sizeof(cipher_dm), integrity_dm, sizeof(integrity_dm));
+ if (r < 0)
+ return NULL;
+
+ if (flags & CRYPT_ACTIVATE_ALLOW_DISCARDS)
+ num_options++;
+ if (flags & CRYPT_ACTIVATE_SAME_CPU_CRYPT)
+ num_options++;
+ if (flags & CRYPT_ACTIVATE_SUBMIT_FROM_CRYPT_CPUS)
+ num_options++;
+ if (flags & CRYPT_ACTIVATE_NO_READ_WORKQUEUE)
+ num_options++;
+ if (flags & CRYPT_ACTIVATE_NO_WRITE_WORKQUEUE)
+ num_options++;
+ if (flags & CRYPT_ACTIVATE_IV_LARGE_SECTORS)
+ num_options++;
+ if (tgt->u.crypt.integrity)
+ num_options++;
+ if (tgt->u.crypt.sector_size != SECTOR_SIZE)
+ num_options++;
+
+ if (num_options) { /* MAX length int32 + 15 + 15 + 23 + 18 + 19 + 17 + 13 + int32 + integrity_str */
+ r = snprintf(features, sizeof(features), " %d%s%s%s%s%s%s%s%s", num_options,
+ (flags & CRYPT_ACTIVATE_ALLOW_DISCARDS) ? " allow_discards" : "",
+ (flags & CRYPT_ACTIVATE_SAME_CPU_CRYPT) ? " same_cpu_crypt" : "",
+ (flags & CRYPT_ACTIVATE_SUBMIT_FROM_CRYPT_CPUS) ? " submit_from_crypt_cpus" : "",
+ (flags & CRYPT_ACTIVATE_NO_READ_WORKQUEUE) ? " no_read_workqueue" : "",
+ (flags & CRYPT_ACTIVATE_NO_WRITE_WORKQUEUE) ? " no_write_workqueue" : "",
+ (flags & CRYPT_ACTIVATE_IV_LARGE_SECTORS) ? " iv_large_sectors" : "",
+ (tgt->u.crypt.sector_size != SECTOR_SIZE) ?
+ _uf(sector_feature, sizeof(sector_feature), "sector_size", tgt->u.crypt.sector_size) : "",
+ integrity_dm);
+ if (r < 0 || (size_t)r >= sizeof(features))
+ goto out;
+ } else
+ *features = '\0';
+
+ if (crypt_is_cipher_null(cipher_dm))
+ null_cipher = 1;
+
+ if (null_cipher)
+ hexkey = crypt_bytes_to_hex(0, NULL);
+ else if (flags & CRYPT_ACTIVATE_KEYRING_KEY) {
+ keystr_len = strlen(tgt->u.crypt.vk->key_description) + int_log10(tgt->u.crypt.vk->keylength) + 10;
+ hexkey = crypt_safe_alloc(keystr_len);
+ if (!hexkey)
+ goto out;
+ r = snprintf(hexkey, keystr_len, ":%zu:logon:%s", tgt->u.crypt.vk->keylength, tgt->u.crypt.vk->key_description);
+ if (r < 0 || r >= keystr_len)
+ goto out;
+ } else
+ hexkey = crypt_bytes_to_hex(tgt->u.crypt.vk->keylength, tgt->u.crypt.vk->key);
+
+ if (!hexkey)
+ goto out;
+
+ max_size = strlen(hexkey) + strlen(cipher_dm) +
+ strlen(device_block_path(tgt->data_device)) +
+ strlen(features) + 64;
+ params = crypt_safe_alloc(max_size);
+ if (!params)
+ goto out;
+
+ r = snprintf(params, max_size, "%s %s %" PRIu64 " %s %" PRIu64 "%s",
+ cipher_dm, hexkey, tgt->u.crypt.iv_offset,
+ device_block_path(tgt->data_device), tgt->u.crypt.offset,
+ features);
+ if (r < 0 || r >= max_size) {
+ crypt_safe_free(params);
+ params = NULL;
+ }
+out:
+ crypt_safe_free(hexkey);
+ return params;
+}
+
+/* https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity */
+static char *get_dm_verity_params(const struct dm_target *tgt, uint32_t flags)
+{
+ int max_size, max_fec_size, max_verify_size, r, num_options = 0;
+ struct crypt_params_verity *vp;
+ char *params = NULL, *hexroot = NULL, *hexsalt = NULL;
+ char features[256], *fec_features = NULL, *verity_verify_args = NULL;
+
+ if (!tgt || !tgt->u.verity.vp)
+ return NULL;
+
+ vp = tgt->u.verity.vp;
+
+ /* These flags are not compatible */
+ if ((flags & CRYPT_ACTIVATE_RESTART_ON_CORRUPTION) &&
+ (flags & CRYPT_ACTIVATE_PANIC_ON_CORRUPTION))
+ flags &= ~CRYPT_ACTIVATE_RESTART_ON_CORRUPTION;
+ if ((flags & CRYPT_ACTIVATE_IGNORE_CORRUPTION) &&
+ (flags & (CRYPT_ACTIVATE_RESTART_ON_CORRUPTION|CRYPT_ACTIVATE_PANIC_ON_CORRUPTION)))
+ flags &= ~CRYPT_ACTIVATE_IGNORE_CORRUPTION;
+
+ if (flags & CRYPT_ACTIVATE_IGNORE_CORRUPTION)
+ num_options++;
+ if (flags & CRYPT_ACTIVATE_RESTART_ON_CORRUPTION)
+ num_options++;
+ if (flags & CRYPT_ACTIVATE_PANIC_ON_CORRUPTION)
+ num_options++;
+ if (flags & CRYPT_ACTIVATE_IGNORE_ZERO_BLOCKS)
+ num_options++;
+ if (flags & CRYPT_ACTIVATE_CHECK_AT_MOST_ONCE)
+ num_options++;
+ if (flags & CRYPT_ACTIVATE_TASKLETS)
+ num_options++;
+
+ max_fec_size = (tgt->u.verity.fec_device ? strlen(device_block_path(tgt->u.verity.fec_device)) : 0) + 256;
+ fec_features = crypt_safe_alloc(max_fec_size);
+ if (!fec_features)
+ goto out;
+
+ if (tgt->u.verity.fec_device) { /* MAX length 21 + path + 11 + int64 + 12 + int64 + 11 + int32 */
+ num_options += 8;
+ r = snprintf(fec_features, max_fec_size,
+ " use_fec_from_device %s fec_start %" PRIu64 " fec_blocks %" PRIu64 " fec_roots %" PRIu32,
+ device_block_path(tgt->u.verity.fec_device), tgt->u.verity.fec_offset,
+ tgt->u.verity.fec_blocks, vp->fec_roots);
+ if (r < 0 || r >= max_fec_size)
+ goto out;
+ } else
+ *fec_features = '\0';
+
+ max_verify_size = (tgt->u.verity.root_hash_sig_key_desc ? strlen(tgt->u.verity.root_hash_sig_key_desc) : 0) + 32;
+ verity_verify_args = crypt_safe_alloc(max_verify_size);
+ if (!verity_verify_args)
+ goto out;
+ if (tgt->u.verity.root_hash_sig_key_desc) { /* MAX length 24 + key_str */
+ num_options += 2;
+ r = snprintf(verity_verify_args, max_verify_size,
+ " root_hash_sig_key_desc %s", tgt->u.verity.root_hash_sig_key_desc);
+ if (r < 0 || r >= max_verify_size)
+ goto out;
+ } else
+ *verity_verify_args = '\0';
+
+ if (num_options) { /* MAX length int32 + 18 + 22 + 20 + 19 + 19 + 22 */
+ r = snprintf(features, sizeof(features), " %d%s%s%s%s%s%s", num_options,
+ (flags & CRYPT_ACTIVATE_IGNORE_CORRUPTION) ? " ignore_corruption" : "",
+ (flags & CRYPT_ACTIVATE_RESTART_ON_CORRUPTION) ? " restart_on_corruption" : "",
+ (flags & CRYPT_ACTIVATE_PANIC_ON_CORRUPTION) ? " panic_on_corruption" : "",
+ (flags & CRYPT_ACTIVATE_IGNORE_ZERO_BLOCKS) ? " ignore_zero_blocks" : "",
+ (flags & CRYPT_ACTIVATE_CHECK_AT_MOST_ONCE) ? " check_at_most_once" : "",
+ (flags & CRYPT_ACTIVATE_TASKLETS) ? " try_verify_in_tasklet" : "");
+ if (r < 0 || (size_t)r >= sizeof(features))
+ goto out;
+ } else
+ *features = '\0';
+
+ hexroot = crypt_bytes_to_hex(tgt->u.verity.root_hash_size, tgt->u.verity.root_hash);
+ if (!hexroot)
+ goto out;
+
+ hexsalt = crypt_bytes_to_hex(vp->salt_size, vp->salt);
+ if (!hexsalt)
+ goto out;
+
+ max_size = strlen(hexroot) + strlen(hexsalt) +
+ strlen(device_block_path(tgt->data_device)) +
+ strlen(device_block_path(tgt->u.verity.hash_device)) +
+ strlen(vp->hash_name) + strlen(features) + strlen(fec_features) + 128 +
+ strlen(verity_verify_args);
+
+ params = crypt_safe_alloc(max_size);
+ if (!params)
+ goto out;
+
+ r = snprintf(params, max_size,
+ "%u %s %s %u %u %" PRIu64 " %" PRIu64 " %s %s %s%s%s%s",
+ vp->hash_type, device_block_path(tgt->data_device),
+ device_block_path(tgt->u.verity.hash_device),
+ vp->data_block_size, vp->hash_block_size,
+ vp->data_size, tgt->u.verity.hash_offset,
+ vp->hash_name, hexroot, hexsalt, features, fec_features,
+ verity_verify_args);
+ if (r < 0 || r >= max_size) {
+ crypt_safe_free(params);
+ params = NULL;
+ }
+out:
+ crypt_safe_free(fec_features);
+ crypt_safe_free(verity_verify_args);
+ crypt_safe_free(hexroot);
+ crypt_safe_free(hexsalt);
+ return params;
+}
+
+static char *get_dm_integrity_params(const struct dm_target *tgt, uint32_t flags)
+{
+ int r, max_size, max_integrity, max_journal_integrity, max_journal_crypt, num_options = 0;
+ char *params_out = NULL, *params, *hexkey, mode, feature[6][32];
+ char *features, *integrity, *journal_integrity, *journal_crypt;
+
+ if (!tgt)
+ return NULL;
+
+ max_integrity = (tgt->u.integrity.integrity && tgt->u.integrity.vk ? tgt->u.integrity.vk->keylength * 2 : 0) +
+ (tgt->u.integrity.integrity ? strlen(tgt->u.integrity.integrity) : 0) + 32;
+ max_journal_integrity = (tgt->u.integrity.journal_integrity && tgt->u.integrity.journal_integrity_key ?
+ tgt->u.integrity.journal_integrity_key->keylength * 2 : 0) +
+ (tgt->u.integrity.journal_integrity ? strlen(tgt->u.integrity.journal_integrity) : 0) + 32;
+ max_journal_crypt = (tgt->u.integrity.journal_crypt && tgt->u.integrity.journal_crypt_key ?
+ tgt->u.integrity.journal_crypt_key->keylength * 2 : 0) +
+ (tgt->u.integrity.journal_crypt ? strlen(tgt->u.integrity.journal_crypt) : 0) + 32;
+ max_size = strlen(device_block_path(tgt->data_device)) +
+ (tgt->u.integrity.meta_device ? strlen(device_block_path(tgt->u.integrity.meta_device)) : 0) +
+ max_integrity + max_journal_integrity + max_journal_crypt + 512;
+
+ params = crypt_safe_alloc(max_size);
+ features = crypt_safe_alloc(max_size);
+ integrity = crypt_safe_alloc(max_integrity);
+ journal_integrity = crypt_safe_alloc(max_journal_integrity);
+ journal_crypt = crypt_safe_alloc(max_journal_crypt);
+ if (!params || !features || !integrity || !journal_integrity || !journal_crypt)
+ goto out;
+
+ if (tgt->u.integrity.integrity) { /* MAX length 16 + str_integrity + str_key */
+ num_options++;
+
+ if (tgt->u.integrity.vk) {
+ hexkey = crypt_bytes_to_hex(tgt->u.integrity.vk->keylength, tgt->u.integrity.vk->key);
+ if (!hexkey)
+ goto out;
+ } else
+ hexkey = NULL;
+
+ r = snprintf(integrity, max_integrity, " internal_hash:%s%s%s",
+ tgt->u.integrity.integrity, hexkey ? ":" : "", hexkey ?: "");
+ crypt_safe_free(hexkey);
+ if (r < 0 || r >= max_integrity)
+ goto out;
+ }
+
+ if (tgt->u.integrity.journal_integrity) { /* MAX length 14 + str_journal_integrity + str_key */
+ num_options++;
+
+ if (tgt->u.integrity.journal_integrity_key) {
+ hexkey = crypt_bytes_to_hex( tgt->u.integrity.journal_integrity_key->keylength,
+ tgt->u.integrity.journal_integrity_key->key);
+ if (!hexkey)
+ goto out;
+ } else
+ hexkey = NULL;
+
+ r = snprintf(journal_integrity, max_journal_integrity, " journal_mac:%s%s%s",
+ tgt->u.integrity.journal_integrity, hexkey ? ":" : "", hexkey ?: "");
+ crypt_safe_free(hexkey);
+ if (r < 0 || r >= max_journal_integrity)
+ goto out;
+ }
+
+ if (tgt->u.integrity.journal_crypt) { /* MAX length 15 + str_journal_crypt + str_key */
+ num_options++;
+
+ if (tgt->u.integrity.journal_crypt_key) {
+ hexkey = crypt_bytes_to_hex(tgt->u.integrity.journal_crypt_key->keylength,
+ tgt->u.integrity.journal_crypt_key->key);
+ if (!hexkey)
+ goto out;
+ } else
+ hexkey = NULL;
+
+ r = snprintf(journal_crypt, max_journal_crypt, " journal_crypt:%s%s%s",
+ tgt->u.integrity.journal_crypt, hexkey ? ":" : "", hexkey ?: "");
+ crypt_safe_free(hexkey);
+ if (r < 0 || r >= max_journal_crypt)
+ goto out;
+ }
+
+ if (tgt->u.integrity.journal_size)
+ num_options++;
+ if (tgt->u.integrity.journal_watermark)
+ num_options++;
+ if (tgt->u.integrity.journal_commit_time)
+ num_options++;
+ if (tgt->u.integrity.interleave_sectors)
+ num_options++;
+ if (tgt->u.integrity.sector_size)
+ num_options++;
+ if (tgt->u.integrity.buffer_sectors)
+ num_options++;
+ if (tgt->u.integrity.fix_padding)
+ num_options++;
+ if (tgt->u.integrity.fix_hmac)
+ num_options++;
+ if (tgt->u.integrity.legacy_recalc)
+ num_options++;
+ if (tgt->u.integrity.meta_device)
+ num_options++;
+ if (flags & CRYPT_ACTIVATE_RECALCULATE)
+ num_options++;
+ if (flags & CRYPT_ACTIVATE_RECALCULATE_RESET)
+ num_options++;
+ if (flags & CRYPT_ACTIVATE_ALLOW_DISCARDS)
+ num_options++;
+
+ r = snprintf(features, max_size, "%d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", num_options,
+ tgt->u.integrity.journal_size ? _uf(feature[0], sizeof(feature[0]), /* MAX length 17 + int32 */
+ "journal_sectors", (unsigned)(tgt->u.integrity.journal_size / SECTOR_SIZE)) : "",
+ tgt->u.integrity.journal_watermark ? _uf(feature[1], sizeof(feature[1]), /* MAX length 19 + int32 */
+ /* bitmap overloaded values */
+ (flags & CRYPT_ACTIVATE_NO_JOURNAL_BITMAP) ? "sectors_per_bit" : "journal_watermark",
+ tgt->u.integrity.journal_watermark) : "",
+ tgt->u.integrity.journal_commit_time ? _uf(feature[2], sizeof(feature[2]), /* MAX length 23 + int32 */
+ /* bitmap overloaded values */
+ (flags & CRYPT_ACTIVATE_NO_JOURNAL_BITMAP) ? "bitmap_flush_interval" : "commit_time",
+ tgt->u.integrity.journal_commit_time) : "",
+ tgt->u.integrity.interleave_sectors ? _uf(feature[3], sizeof(feature[3]), /* MAX length 20 + int32 */
+ "interleave_sectors", tgt->u.integrity.interleave_sectors) : "",
+ tgt->u.integrity.sector_size ? _uf(feature[4], sizeof(feature[4]), /* MAX length 12 + int32 */
+ "block_size", tgt->u.integrity.sector_size) : "",
+ tgt->u.integrity.buffer_sectors ? _uf(feature[5], sizeof(feature[5]), /* MAX length 16 + int32 */
+ "buffer_sectors", tgt->u.integrity.buffer_sectors) : "",
+ tgt->u.integrity.integrity ? integrity : "",
+ tgt->u.integrity.journal_integrity ? journal_integrity : "",
+ tgt->u.integrity.journal_crypt ? journal_crypt : "",
+ tgt->u.integrity.fix_padding ? " fix_padding" : "", /* MAX length 12 */
+ tgt->u.integrity.fix_hmac ? " fix_hmac" : "", /* MAX length 9 */
+ tgt->u.integrity.legacy_recalc ? " legacy_recalculate" : "", /* MAX length 19 */
+ flags & CRYPT_ACTIVATE_RECALCULATE ? " recalculate" : "", /* MAX length 12 */
+ flags & CRYPT_ACTIVATE_RECALCULATE_RESET ? " reset_recalculate" : "", /* MAX length 18 */
+ flags & CRYPT_ACTIVATE_ALLOW_DISCARDS ? " allow_discards" : "", /* MAX length 15 */
+ tgt->u.integrity.meta_device ? " meta_device:" : "", /* MAX length 13 + str_device */
+ tgt->u.integrity.meta_device ? device_block_path(tgt->u.integrity.meta_device) : "");
+ if (r < 0 || r >= max_size)
+ goto out;
+
+ if (flags & CRYPT_ACTIVATE_NO_JOURNAL_BITMAP)
+ mode = 'B';
+ else if (flags & CRYPT_ACTIVATE_RECOVERY)
+ mode = 'R';
+ else if (flags & CRYPT_ACTIVATE_NO_JOURNAL)
+ mode = 'D';
+ else
+ mode = 'J';
+
+ r = snprintf(params, max_size, "%s %" PRIu64 " %d %c %s",
+ device_block_path(tgt->data_device), tgt->u.integrity.offset,
+ tgt->u.integrity.tag_size, mode, features);
+ if (r < 0 || r >= max_size)
+ goto out;
+
+ params_out = params;
+out:
+ crypt_safe_free(features);
+ crypt_safe_free(integrity);
+ crypt_safe_free(journal_integrity);
+ crypt_safe_free(journal_crypt);
+ if (!params_out)
+ crypt_safe_free(params);
+
+ return params_out;
+}
+
+static char *get_dm_linear_params(const struct dm_target *tgt)
+{
+ char *params;
+ int r;
+ int max_size = strlen(device_block_path(tgt->data_device)) + int_log10(tgt->u.linear.offset) + 3;
+
+ params = crypt_safe_alloc(max_size);
+ if (!params)
+ return NULL;
+
+ r = snprintf(params, max_size, "%s %" PRIu64,
+ device_block_path(tgt->data_device), tgt->u.linear.offset);
+
+ if (r < 0 || r >= max_size) {
+ crypt_safe_free(params);
+ params = NULL;
+ }
+
+ return params;
+}
+
+static char *get_dm_zero_params(void)
+{
+ char *params = crypt_safe_alloc(1);
+ if (!params)
+ return NULL;
+
+ params[0] = 0;
+ return params;
+}
+
+/* DM helpers */
+static int _dm_remove(const char *name, int udev_wait, int deferred)
+{
+ int r = 0;
+ struct dm_task *dmt;
+ uint32_t cookie = 0;
+
+ if (!_dm_use_udev())
+ udev_wait = 0;
+
+ if (!(dmt = dm_task_create(DM_DEVICE_REMOVE)))
+ return 0;
+
+ if (!dm_task_set_name(dmt, name))
+ goto out;
+
+#if HAVE_DECL_DM_TASK_RETRY_REMOVE
+ if (!dm_task_retry_remove(dmt))
+ goto out;
+#endif
+#if HAVE_DECL_DM_TASK_DEFERRED_REMOVE
+ if (deferred && !dm_task_deferred_remove(dmt))
+ goto out;
+#endif
+ if (udev_wait && !_dm_task_set_cookie(dmt, &cookie, DM_UDEV_DISABLE_LIBRARY_FALLBACK))
+ goto out;
+
+ r = dm_task_run(dmt);
+
+ if (udev_wait)
+ (void)_dm_udev_wait(cookie);
+out:
+ dm_task_destroy(dmt);
+ return r;
+}
+
+static int _dm_simple(int task, const char *name, uint32_t dmflags)
+{
+ int r = 0;
+ struct dm_task *dmt;
+
+ if (!(dmt = dm_task_create(task)))
+ return 0;
+
+ if (name && !dm_task_set_name(dmt, name))
+ goto out;
+
+ if (task == DM_DEVICE_SUSPEND &&
+ (dmflags & DM_SUSPEND_SKIP_LOCKFS) && !dm_task_skip_lockfs(dmt))
+ goto out;
+
+ if (task == DM_DEVICE_SUSPEND &&
+ (dmflags & DM_SUSPEND_NOFLUSH) && !dm_task_no_flush(dmt))
+ goto out;
+
+ r = dm_task_run(dmt);
+out:
+ dm_task_destroy(dmt);
+ return r;
+}
+
+static int _dm_resume_device(const char *name, uint32_t flags);
+
+static int _error_device(const char *name, size_t size)
+{
+ struct dm_task *dmt;
+ int r = 0;
+
+ if (!(dmt = dm_task_create(DM_DEVICE_RELOAD)))
+ return 0;
+
+ if (!dm_task_set_name(dmt, name))
+ goto out;
+
+ if (!dm_task_add_target(dmt, UINT64_C(0), size, "error", ""))
+ goto out;
+
+ if (!dm_task_set_ro(dmt))
+ goto out;
+
+ if (!dm_task_no_open_count(dmt))
+ goto out;
+
+ if (!dm_task_run(dmt))
+ goto out;
+
+ if (_dm_resume_device(name, 0)) {
+ _dm_simple(DM_DEVICE_CLEAR, name, 0);
+ goto out;
+ }
+
+ r = 1;
+out:
+ dm_task_destroy(dmt);
+ return r;
+}
+
+int dm_error_device(struct crypt_device *cd, const char *name)
+{
+ int r;
+ struct crypt_dm_active_device dmd;
+
+ if (!name)
+ return -EINVAL;
+
+ if (dm_init_context(cd, DM_UNKNOWN))
+ return -ENOTSUP;
+
+ if ((dm_query_device(cd, name, 0, &dmd) >= 0) && _error_device(name, dmd.size))
+ r = 0;
+ else
+ r = -EINVAL;
+
+ dm_targets_free(cd, &dmd);
+
+ dm_exit_context();
+
+ return r;
+}
+
+int dm_clear_device(struct crypt_device *cd, const char *name)
+{
+ int r;
+
+ if (!name)
+ return -EINVAL;
+
+ if (dm_init_context(cd, DM_UNKNOWN))
+ return -ENOTSUP;
+
+ if (_dm_simple(DM_DEVICE_CLEAR, name, 0))
+ r = 0;
+ else
+ r = -EINVAL;
+
+ dm_exit_context();
+
+ return r;
+}
+
+int dm_remove_device(struct crypt_device *cd, const char *name, uint32_t flags)
+{
+ struct crypt_dm_active_device dmd = {};
+ int r = -EINVAL;
+ int retries = (flags & CRYPT_DEACTIVATE_FORCE) ? RETRY_COUNT : 1;
+ int deferred = (flags & CRYPT_DEACTIVATE_DEFERRED) ? 1 : 0;
+ int error_target = 0;
+ uint32_t dmt_flags;
+
+ if (!name)
+ return -EINVAL;
+
+ if (dm_init_context(cd, DM_UNKNOWN))
+ return -ENOTSUP;
+
+ if (deferred && !dm_flags(cd, DM_UNKNOWN, &dmt_flags) && !(dmt_flags & DM_DEFERRED_SUPPORTED)) {
+ log_err(cd, _("Requested deferred flag is not supported."));
+ dm_exit_context();
+ return -ENOTSUP;
+ }
+
+ do {
+ r = _dm_remove(name, 1, deferred) ? 0 : -EINVAL;
+ if (--retries && r) {
+ log_dbg(cd, "WARNING: other process locked internal device %s, %s.",
+ name, retries ? "retrying remove" : "giving up");
+ sleep(1);
+ if ((flags & CRYPT_DEACTIVATE_FORCE) && !error_target) {
+ /* If force flag is set, replace device with error, read-only target.
+ * it should stop processes from reading it and also removed underlying
+ * device from mapping, so it is usable again.
+ * Anyway, if some process try to read temporary cryptsetup device,
+ * it is bug - no other process should try touch it (e.g. udev).
+ */
+ if (!dm_query_device(cd, name, 0, &dmd)) {
+ _error_device(name, dmd.size);
+ error_target = 1;
+ }
+ }
+ }
+ } while (r == -EINVAL && retries);
+
+ dm_task_update_nodes();
+ dm_exit_context();
+
+ return r;
+}
+
+#define UUID_LEN 37 /* 36 + \0, libuuid ... */
+/*
+ * UUID has format: CRYPT-<devicetype>-[<uuid>-]<device name>
+ * CRYPT-PLAIN-name
+ * CRYPT-LUKS1-00000000000000000000000000000000-name
+ * CRYPT-TEMP-name
+ */
+static int dm_prepare_uuid(struct crypt_device *cd, const char *name, const char *type,
+ const char *uuid, char *buf, size_t buflen)
+{
+ char *ptr, uuid2[UUID_LEN] = {0};
+ uuid_t uu;
+ int i = 0;
+
+ /* Remove '-' chars */
+ if (uuid) {
+ if (uuid_parse(uuid, uu) < 0) {
+ log_dbg(cd, "Requested UUID %s has invalid format.", uuid);
+ return 0;
+ }
+
+ for (ptr = uuid2, i = 0; i < UUID_LEN; i++)
+ if (uuid[i] != '-') {
+ *ptr = uuid[i];
+ ptr++;
+ }
+ }
+
+ i = snprintf(buf, buflen, DM_UUID_PREFIX "%s%s%s%s%s",
+ type ?: "", type ? "-" : "",
+ uuid2[0] ? uuid2 : "", uuid2[0] ? "-" : "",
+ name);
+ if (i < 0)
+ return 0;
+
+ log_dbg(cd, "DM-UUID is %s", buf);
+ if ((size_t)i >= buflen)
+ log_err(cd, _("DM-UUID for device %s was truncated."), name);
+
+ return 1;
+}
+
+int lookup_dm_dev_by_uuid(struct crypt_device *cd, const char *uuid, const char *type)
+{
+ int r_udev, r;
+ char *c;
+ char dev_uuid[DM_UUID_LEN + DM_BY_ID_PREFIX_LEN] = DM_BY_ID_PREFIX;
+
+ if (!dm_prepare_uuid(cd, "", type, uuid, dev_uuid + DM_BY_ID_PREFIX_LEN, DM_UUID_LEN))
+ return -EINVAL;
+
+ c = strrchr(dev_uuid, '-');
+ if (!c)
+ return -EINVAL;
+
+ /* cut of dm name */
+ *c = '\0';
+
+ /* Either udev or sysfs can report that device is active. */
+ r = lookup_by_disk_id(dev_uuid);
+ if (r > 0)
+ return r;
+
+ r_udev = r;
+ r = lookup_by_sysfs_uuid_field(dev_uuid + DM_BY_ID_PREFIX_LEN);
+
+ return r == -ENOENT ? r_udev : r;
+}
+
+static int _add_dm_targets(struct dm_task *dmt, struct crypt_dm_active_device *dmd)
+{
+ const char *target;
+ struct dm_target *tgt = &dmd->segment;
+
+ do {
+ switch (tgt->type) {
+ case DM_CRYPT:
+ target = DM_CRYPT_TARGET;
+ break;
+ case DM_VERITY:
+ target = DM_VERITY_TARGET;
+ break;
+ case DM_INTEGRITY:
+ target = DM_INTEGRITY_TARGET;
+ break;
+ case DM_LINEAR:
+ target = DM_LINEAR_TARGET;
+ break;
+ case DM_ZERO:
+ target = DM_ZERO_TARGET;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ if (!dm_task_add_target(dmt, tgt->offset, tgt->size, target, tgt->params))
+ return -EINVAL;
+
+ tgt = tgt->next;
+ } while (tgt);
+
+ return 0;
+}
+
+static void _destroy_dm_targets_params(struct crypt_dm_active_device *dmd)
+{
+ struct dm_target *t = &dmd->segment;
+
+ do {
+ crypt_safe_free(t->params);
+ t->params = NULL;
+ t = t->next;
+ } while (t);
+}
+
+static int _create_dm_targets_params(struct crypt_dm_active_device *dmd)
+{
+ int r;
+ struct dm_target *tgt = &dmd->segment;
+
+ do {
+ if (tgt->type == DM_CRYPT)
+ tgt->params = get_dm_crypt_params(tgt, dmd->flags);
+ else if (tgt->type == DM_VERITY)
+ tgt->params = get_dm_verity_params(tgt, dmd->flags);
+ else if (tgt->type == DM_INTEGRITY)
+ tgt->params = get_dm_integrity_params(tgt, dmd->flags);
+ else if (tgt->type == DM_LINEAR)
+ tgt->params = get_dm_linear_params(tgt);
+ else if (tgt->type == DM_ZERO)
+ tgt->params = get_dm_zero_params();
+ else {
+ r = -ENOTSUP;
+ goto err;
+ }
+
+ if (!tgt->params) {
+ r = -EINVAL;
+ goto err;
+ }
+ tgt = tgt->next;
+ } while (tgt);
+
+ return 0;
+err:
+ _destroy_dm_targets_params(dmd);
+ return r;
+}
+
+static int _dm_create_device(struct crypt_device *cd, const char *name, const char *type,
+ struct crypt_dm_active_device *dmd)
+{
+ struct dm_task *dmt = NULL;
+ struct dm_info dmi;
+ char dev_uuid[DM_UUID_LEN] = {0};
+ int r = -EINVAL;
+ uint32_t cookie = 0, read_ahead = 0;
+ uint16_t udev_flags = DM_UDEV_DISABLE_LIBRARY_FALLBACK;
+
+ if (dmd->flags & CRYPT_ACTIVATE_PRIVATE)
+ udev_flags |= CRYPT_TEMP_UDEV_FLAGS;
+
+ /* All devices must have DM_UUID, only resize on old device is exception */
+ if (!dm_prepare_uuid(cd, name, type, dmd->uuid, dev_uuid, sizeof(dev_uuid)))
+ goto out;
+
+ if (!(dmt = dm_task_create(DM_DEVICE_CREATE)))
+ goto out;
+
+ if (!dm_task_set_name(dmt, name))
+ goto out;
+
+ if (!dm_task_set_uuid(dmt, dev_uuid))
+ goto out;
+
+ if (!dm_task_secure_data(dmt))
+ goto out;
+ if ((dmd->flags & CRYPT_ACTIVATE_READONLY) && !dm_task_set_ro(dmt))
+ goto out;
+
+ r = _create_dm_targets_params(dmd);
+ if (r)
+ goto out;
+
+ r = _add_dm_targets(dmt, dmd);
+ if (r)
+ goto out;
+
+ r = -EINVAL;
+
+#ifdef DM_READ_AHEAD_MINIMUM_FLAG
+ if (device_read_ahead(dmd->segment.data_device, &read_ahead) &&
+ !dm_task_set_read_ahead(dmt, read_ahead, DM_READ_AHEAD_MINIMUM_FLAG))
+ goto out;
+#endif
+ if (_dm_use_udev() && !_dm_task_set_cookie(dmt, &cookie, udev_flags))
+ goto out;
+
+ if (!dm_task_run(dmt)) {
+ r = dm_status_device(cd, name);;
+ if (r >= 0)
+ r = -EEXIST;
+ if (r != -EEXIST && r != -ENODEV)
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (dm_task_get_info(dmt, &dmi))
+ r = 0;
+
+ if (_dm_use_udev()) {
+ (void)_dm_udev_wait(cookie);
+ cookie = 0;
+ }
+
+ if (r < 0)
+ _dm_remove(name, 1, 0);
+
+out:
+ if (cookie && _dm_use_udev())
+ (void)_dm_udev_wait(cookie);
+
+ if (dmt)
+ dm_task_destroy(dmt);
+
+ dm_task_update_nodes();
+
+ /* If code just loaded target module, update versions */
+ _dm_check_versions(cd, dmd->segment.type);
+
+ _destroy_dm_targets_params(dmd);
+
+ return r;
+}
+
+static int _dm_resume_device(const char *name, uint32_t dmflags)
+{
+ struct dm_task *dmt;
+ int r = -EINVAL;
+ uint32_t cookie = 0;
+ uint16_t udev_flags = DM_UDEV_DISABLE_LIBRARY_FALLBACK;
+
+ if (dmflags & DM_RESUME_PRIVATE)
+ udev_flags |= CRYPT_TEMP_UDEV_FLAGS;
+
+ if (!(dmt = dm_task_create(DM_DEVICE_RESUME)))
+ return r;
+
+ if (!dm_task_set_name(dmt, name))
+ goto out;
+
+ if ((dmflags & DM_SUSPEND_SKIP_LOCKFS) && !dm_task_skip_lockfs(dmt))
+ goto out;
+
+ if ((dmflags & DM_SUSPEND_NOFLUSH) && !dm_task_no_flush(dmt))
+ goto out;
+
+ if (_dm_use_udev() && !_dm_task_set_cookie(dmt, &cookie, udev_flags))
+ goto out;
+
+ if (dm_task_run(dmt))
+ r = 0;
+out:
+ if (cookie && _dm_use_udev())
+ (void)_dm_udev_wait(cookie);
+
+ dm_task_destroy(dmt);
+
+ dm_task_update_nodes();
+
+ return r;
+}
+
+static int _dm_reload_device(struct crypt_device *cd, const char *name,
+ struct crypt_dm_active_device *dmd)
+{
+ int r = -EINVAL;
+ struct dm_task *dmt = NULL;
+ uint32_t read_ahead = 0;
+
+ /* All devices must have DM_UUID, only resize on old device is exception */
+ if (!(dmt = dm_task_create(DM_DEVICE_RELOAD)))
+ goto out;
+
+ if (!dm_task_set_name(dmt, name))
+ goto out;
+
+ if (!dm_task_secure_data(dmt))
+ goto out;
+ if ((dmd->flags & CRYPT_ACTIVATE_READONLY) && !dm_task_set_ro(dmt))
+ goto out;
+
+ r = _create_dm_targets_params(dmd);
+ if (r)
+ goto out;
+
+ r = _add_dm_targets(dmt, dmd);
+ if (r)
+ goto out;
+
+ r = -EINVAL;
+
+#ifdef DM_READ_AHEAD_MINIMUM_FLAG
+ if (device_read_ahead(dmd->segment.data_device, &read_ahead) &&
+ !dm_task_set_read_ahead(dmt, read_ahead, DM_READ_AHEAD_MINIMUM_FLAG))
+ goto out;
+#endif
+
+ if (dm_task_run(dmt))
+ r = 0;
+out:
+ if (dmt)
+ dm_task_destroy(dmt);
+
+ /* If code just loaded target module, update versions */
+ _dm_check_versions(cd, dmd->segment.type);
+
+ _destroy_dm_targets_params(dmd);
+
+ return r;
+}
+
+static void crypt_free_verity_params(struct crypt_params_verity *vp)
+{
+ if (!vp)
+ return;
+
+ free(CONST_CAST(void*)vp->hash_name);
+ free(CONST_CAST(void*)vp->data_device);
+ free(CONST_CAST(void*)vp->hash_device);
+ free(CONST_CAST(void*)vp->fec_device);
+ free(CONST_CAST(void*)vp->salt);
+ free(vp);
+}
+
+static void _dm_target_free_query_path(struct crypt_device *cd, struct dm_target *tgt)
+{
+ switch(tgt->type) {
+ case DM_CRYPT:
+ crypt_free_volume_key(tgt->u.crypt.vk);
+ free(CONST_CAST(void*)tgt->u.crypt.cipher);
+ break;
+ case DM_INTEGRITY:
+ free(CONST_CAST(void*)tgt->u.integrity.integrity);
+ crypt_free_volume_key(tgt->u.integrity.vk);
+
+ free(CONST_CAST(void*)tgt->u.integrity.journal_integrity);
+ crypt_free_volume_key(tgt->u.integrity.journal_integrity_key);
+
+ free(CONST_CAST(void*)tgt->u.integrity.journal_crypt);
+ crypt_free_volume_key(tgt->u.integrity.journal_crypt_key);
+
+ device_free(cd, tgt->u.integrity.meta_device);
+ break;
+ case DM_VERITY:
+ crypt_free_verity_params(tgt->u.verity.vp);
+ device_free(cd, tgt->u.verity.hash_device);
+ free(CONST_CAST(void*)tgt->u.verity.root_hash);
+ free(CONST_CAST(void*)tgt->u.verity.root_hash_sig_key_desc);
+ /* fall through */
+ case DM_LINEAR:
+ /* fall through */
+ case DM_ERROR:
+ /* fall through */
+ case DM_ZERO:
+ break;
+ default:
+ log_err(cd, _("Unknown dm target type."));
+ return;
+ }
+
+ device_free(cd, tgt->data_device);
+}
+
+static void _dm_target_erase(struct crypt_device *cd, struct dm_target *tgt)
+{
+ if (tgt->direction == TARGET_EMPTY)
+ return;
+
+ if (tgt->direction == TARGET_QUERY)
+ _dm_target_free_query_path(cd, tgt);
+
+ if (tgt->type == DM_CRYPT)
+ free(CONST_CAST(void*)tgt->u.crypt.integrity);
+}
+
+void dm_targets_free(struct crypt_device *cd, struct crypt_dm_active_device *dmd)
+{
+ struct dm_target *t = &dmd->segment, *next = t->next;
+
+ _dm_target_erase(cd, t);
+
+ while (next) {
+ t = next;
+ next = t->next;
+ _dm_target_erase(cd, t);
+ free(t);
+ }
+
+ memset(&dmd->segment, 0, sizeof(dmd->segment));
+}
+
+int dm_targets_allocate(struct dm_target *first, unsigned count)
+{
+ if (!first || first->next || !count)
+ return -EINVAL;
+
+ while (--count) {
+ first->next = crypt_zalloc(sizeof(*first));
+ if (!first->next)
+ return -ENOMEM;
+ first = first->next;
+ }
+
+ return 0;
+}
+
+static int check_retry(struct crypt_device *cd, uint32_t *dmd_flags, uint32_t dmt_flags)
+{
+ int ret = 0;
+
+ /* If discard not supported try to load without discard */
+ if ((*dmd_flags & CRYPT_ACTIVATE_ALLOW_DISCARDS) &&
+ !(dmt_flags & DM_DISCARDS_SUPPORTED)) {
+ log_dbg(cd, "Discard/TRIM is not supported");
+ *dmd_flags = *dmd_flags & ~CRYPT_ACTIVATE_ALLOW_DISCARDS;
+ ret = 1;
+ }
+
+ /* If kernel keyring is not supported load key directly in dm-crypt */
+ if ((*dmd_flags & CRYPT_ACTIVATE_KEYRING_KEY) &&
+ !(dmt_flags & DM_KERNEL_KEYRING_SUPPORTED)) {
+ log_dbg(cd, "dm-crypt does not support kernel keyring");
+ *dmd_flags = *dmd_flags & ~CRYPT_ACTIVATE_KEYRING_KEY;
+ ret = 1;
+ }
+
+ /* Drop performance options if not supported */
+ if ((*dmd_flags & (CRYPT_ACTIVATE_SAME_CPU_CRYPT | CRYPT_ACTIVATE_SUBMIT_FROM_CRYPT_CPUS)) &&
+ !(dmt_flags & (DM_SAME_CPU_CRYPT_SUPPORTED | DM_SUBMIT_FROM_CRYPT_CPUS_SUPPORTED))) {
+ log_dbg(cd, "dm-crypt does not support performance options");
+ *dmd_flags = *dmd_flags & ~(CRYPT_ACTIVATE_SAME_CPU_CRYPT | CRYPT_ACTIVATE_SUBMIT_FROM_CRYPT_CPUS);
+ ret = 1;
+ }
+
+ /* Drop no workqueue options if not supported */
+ if ((*dmd_flags & (CRYPT_ACTIVATE_NO_READ_WORKQUEUE | CRYPT_ACTIVATE_NO_WRITE_WORKQUEUE)) &&
+ !(dmt_flags & DM_CRYPT_NO_WORKQUEUE_SUPPORTED)) {
+ log_dbg(cd, "dm-crypt does not support performance options");
+ *dmd_flags = *dmd_flags & ~(CRYPT_ACTIVATE_NO_READ_WORKQUEUE | CRYPT_ACTIVATE_NO_WRITE_WORKQUEUE);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+int dm_create_device(struct crypt_device *cd, const char *name,
+ const char *type,
+ struct crypt_dm_active_device *dmd)
+{
+ uint32_t dmt_flags = 0;
+ int r = -EINVAL;
+
+ if (!type || !dmd)
+ return -EINVAL;
+
+ if (dm_init_context(cd, dmd->segment.type))
+ return -ENOTSUP;
+
+ r = _dm_create_device(cd, name, type, dmd);
+ if (!r || r == -EEXIST)
+ goto out;
+
+ if (dm_flags(cd, dmd->segment.type, &dmt_flags))
+ goto out;
+
+ if ((dmd->segment.type == DM_CRYPT || dmd->segment.type == DM_LINEAR || dmd->segment.type == DM_ZERO) &&
+ check_retry(cd, &dmd->flags, dmt_flags)) {
+ log_dbg(cd, "Retrying open without incompatible options.");
+ r = _dm_create_device(cd, name, type, dmd);
+ if (!r || r == -EEXIST)
+ goto out;
+ }
+
+ if (dmd->flags & (CRYPT_ACTIVATE_SAME_CPU_CRYPT|CRYPT_ACTIVATE_SUBMIT_FROM_CRYPT_CPUS) &&
+ !(dmt_flags & (DM_SAME_CPU_CRYPT_SUPPORTED|DM_SUBMIT_FROM_CRYPT_CPUS_SUPPORTED))) {
+ log_err(cd, _("Requested dm-crypt performance options are not supported."));
+ r = -EINVAL;
+ }
+
+ if (dmd->flags & (CRYPT_ACTIVATE_NO_READ_WORKQUEUE | CRYPT_ACTIVATE_NO_WRITE_WORKQUEUE) &&
+ !(dmt_flags & DM_CRYPT_NO_WORKQUEUE_SUPPORTED)) {
+ log_err(cd, _("Requested dm-crypt performance options are not supported."));
+ r = -EINVAL;
+ }
+
+ if (dmd->flags & (CRYPT_ACTIVATE_IGNORE_CORRUPTION|
+ CRYPT_ACTIVATE_RESTART_ON_CORRUPTION|
+ CRYPT_ACTIVATE_IGNORE_ZERO_BLOCKS|
+ CRYPT_ACTIVATE_CHECK_AT_MOST_ONCE) &&
+ !(dmt_flags & DM_VERITY_ON_CORRUPTION_SUPPORTED)) {
+ log_err(cd, _("Requested dm-verity data corruption handling options are not supported."));
+ r = -EINVAL;
+ }
+
+ if (dmd->flags & CRYPT_ACTIVATE_TASKLETS &&
+ !(dmt_flags & DM_VERITY_TASKLETS_SUPPORTED)) {
+ log_err(cd, _("Requested dm-verity tasklets option is not supported."));
+ r = -EINVAL;
+ }
+
+ if (dmd->flags & CRYPT_ACTIVATE_PANIC_ON_CORRUPTION &&
+ !(dmt_flags & DM_VERITY_PANIC_CORRUPTION_SUPPORTED)) {
+ log_err(cd, _("Requested dm-verity data corruption handling options are not supported."));
+ r = -EINVAL;
+ }
+
+ if (dmd->segment.type == DM_VERITY &&
+ dmd->segment.u.verity.fec_device && !(dmt_flags & DM_VERITY_FEC_SUPPORTED)) {
+ log_err(cd, _("Requested dm-verity FEC options are not supported."));
+ r = -EINVAL;
+ }
+
+ if (dmd->segment.type == DM_CRYPT) {
+ if (dmd->segment.u.crypt.integrity && !(dmt_flags & DM_INTEGRITY_SUPPORTED)) {
+ log_err(cd, _("Requested data integrity options are not supported."));
+ r = -EINVAL;
+ }
+ if (dmd->segment.u.crypt.sector_size != SECTOR_SIZE && !(dmt_flags & DM_SECTOR_SIZE_SUPPORTED)) {
+ log_err(cd, _("Requested sector_size option is not supported."));
+ r = -EINVAL;
+ }
+ }
+
+ if (dmd->segment.type == DM_INTEGRITY && (dmd->flags & CRYPT_ACTIVATE_RECALCULATE) &&
+ !(dmt_flags & DM_INTEGRITY_RECALC_SUPPORTED)) {
+ log_err(cd, _("Requested automatic recalculation of integrity tags is not supported."));
+ r = -EINVAL;
+ }
+
+ if (dmd->segment.type == DM_INTEGRITY && (dmd->flags & CRYPT_ACTIVATE_RECALCULATE_RESET) &&
+ !(dmt_flags & DM_INTEGRITY_RESET_RECALC_SUPPORTED)) {
+ log_err(cd, _("Requested automatic recalculation of integrity tags is not supported."));
+ r = -EINVAL;
+ }
+
+ if (dmd->segment.type == DM_INTEGRITY && (dmd->flags & CRYPT_ACTIVATE_ALLOW_DISCARDS) &&
+ !(dmt_flags & DM_INTEGRITY_DISCARDS_SUPPORTED)) {
+ log_err(cd, _("Discard/TRIM is not supported."));
+ r = -EINVAL;
+ }
+
+ if (dmd->segment.type == DM_INTEGRITY && (dmd->flags & CRYPT_ACTIVATE_NO_JOURNAL_BITMAP) &&
+ !(dmt_flags & DM_INTEGRITY_BITMAP_SUPPORTED)) {
+ log_err(cd, _("Requested dm-integrity bitmap mode is not supported."));
+ r = -EINVAL;
+ }
+out:
+ /*
+ * Print warning if activating dm-crypt cipher_null device unless it's reencryption helper or
+ * keyslot encryption helper device (LUKS1 cipher_null devices).
+ */
+ if (!r && !(dmd->flags & CRYPT_ACTIVATE_PRIVATE) && single_segment(dmd) && dmd->segment.type == DM_CRYPT &&
+ crypt_is_cipher_null(dmd->segment.u.crypt.cipher))
+ log_dbg(cd, "Activated dm-crypt device with cipher_null. Device is not encrypted.");
+
+ dm_exit_context();
+ return r;
+}
+
+int dm_reload_device(struct crypt_device *cd, const char *name,
+ struct crypt_dm_active_device *dmd, uint32_t dmflags, unsigned resume)
+{
+ int r;
+ uint32_t dmt_flags;
+
+ if (!dmd)
+ return -EINVAL;
+
+ if (dm_init_context(cd, dmd->segment.type))
+ return -ENOTSUP;
+
+ if (dm_flags(cd, DM_INTEGRITY, &dmt_flags) || !(dmt_flags & DM_INTEGRITY_RECALC_SUPPORTED))
+ dmd->flags &= ~CRYPT_ACTIVATE_RECALCULATE;
+
+ r = _dm_reload_device(cd, name, dmd);
+
+ if (r == -EINVAL && (dmd->segment.type == DM_CRYPT || dmd->segment.type == DM_LINEAR)) {
+ if ((dmd->flags & (CRYPT_ACTIVATE_SAME_CPU_CRYPT|CRYPT_ACTIVATE_SUBMIT_FROM_CRYPT_CPUS)) &&
+ !dm_flags(cd, DM_CRYPT, &dmt_flags) && !(dmt_flags & (DM_SAME_CPU_CRYPT_SUPPORTED | DM_SUBMIT_FROM_CRYPT_CPUS_SUPPORTED)))
+ log_err(cd, _("Requested dm-crypt performance options are not supported."));
+ if ((dmd->flags & (CRYPT_ACTIVATE_NO_READ_WORKQUEUE | CRYPT_ACTIVATE_NO_WRITE_WORKQUEUE)) &&
+ !dm_flags(cd, DM_CRYPT, &dmt_flags) && !(dmt_flags & DM_CRYPT_NO_WORKQUEUE_SUPPORTED))
+ log_err(cd, _("Requested dm-crypt performance options are not supported."));
+ if ((dmd->flags & CRYPT_ACTIVATE_ALLOW_DISCARDS) &&
+ !dm_flags(cd, DM_CRYPT, &dmt_flags) && !(dmt_flags & DM_DISCARDS_SUPPORTED))
+ log_err(cd, _("Discard/TRIM is not supported."));
+ if ((dmd->flags & CRYPT_ACTIVATE_ALLOW_DISCARDS) &&
+ !dm_flags(cd, DM_INTEGRITY, &dmt_flags) && !(dmt_flags & DM_INTEGRITY_DISCARDS_SUPPORTED))
+ log_err(cd, _("Discard/TRIM is not supported."));
+ }
+
+ if (!r && resume)
+ r = _dm_resume_device(name, dmflags | act2dmflags(dmd->flags));
+
+ dm_exit_context();
+ return r;
+}
+
+static int dm_status_dmi(const char *name, struct dm_info *dmi,
+ const char *target, char **status_line)
+{
+ struct dm_task *dmt;
+ uint64_t start, length;
+ char *target_type, *params = NULL;
+ int r = -EINVAL;
+
+ if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
+ return r;
+
+ if (!dm_task_no_flush(dmt))
+ goto out;
+
+ if (!dm_task_set_name(dmt, name))
+ goto out;
+
+ if (!dm_task_run(dmt))
+ goto out;
+
+ if (!dm_task_get_info(dmt, dmi))
+ goto out;
+
+ if (!dmi->exists) {
+ r = -ENODEV;
+ goto out;
+ }
+
+ r = -EEXIST;
+ dm_get_next_target(dmt, NULL, &start, &length,
+ &target_type, &params);
+
+ if (!target_type || start != 0)
+ goto out;
+
+ if (target && strcmp(target_type, target))
+ goto out;
+
+ /* for target == NULL check all supported */
+ if (!target && (strcmp(target_type, DM_CRYPT_TARGET) &&
+ strcmp(target_type, DM_VERITY_TARGET) &&
+ strcmp(target_type, DM_INTEGRITY_TARGET) &&
+ strcmp(target_type, DM_LINEAR_TARGET) &&
+ strcmp(target_type, DM_ZERO_TARGET) &&
+ strcmp(target_type, DM_ERROR_TARGET)))
+ goto out;
+ r = 0;
+out:
+ if (!r && status_line && !(*status_line = strdup(params)))
+ r = -ENOMEM;
+
+ dm_task_destroy(dmt);
+
+ return r;
+}
+
+int dm_status_device(struct crypt_device *cd, const char *name)
+{
+ int r;
+ struct dm_info dmi;
+ struct stat st;
+
+ /* libdevmapper is too clever and handles
+ * path argument differently with error.
+ * Fail early here if parameter is non-existent path.
+ */
+ if (strchr(name, '/') && stat(name, &st) < 0)
+ return -ENODEV;
+
+ if (dm_init_context(cd, DM_UNKNOWN))
+ return -ENOTSUP;
+ r = dm_status_dmi(name, &dmi, NULL, NULL);
+ dm_exit_context();
+
+ if (r < 0)
+ return r;
+
+ return (dmi.open_count > 0) ? 1 : 0;
+}
+
+int dm_status_suspended(struct crypt_device *cd, const char *name)
+{
+ int r;
+ struct dm_info dmi;
+
+ if (dm_init_context(cd, DM_UNKNOWN))
+ return -ENOTSUP;
+ r = dm_status_dmi(name, &dmi, NULL, NULL);
+ dm_exit_context();
+
+ if (r < 0)
+ return r;
+
+ return dmi.suspended ? 1 : 0;
+}
+
+static int _dm_status_verity_ok(struct crypt_device *cd, const char *name)
+{
+ int r;
+ struct dm_info dmi;
+ char *status_line = NULL;
+
+ r = dm_status_dmi(name, &dmi, DM_VERITY_TARGET, &status_line);
+ if (r < 0 || !status_line) {
+ free(status_line);
+ return r;
+ }
+
+ log_dbg(cd, "Verity volume %s status is %s.", name, status_line ?: "");
+ r = status_line[0] == 'V' ? 1 : 0;
+ free(status_line);
+
+ return r;
+}
+
+int dm_status_verity_ok(struct crypt_device *cd, const char *name)
+{
+ int r;
+
+ if (dm_init_context(cd, DM_VERITY))
+ return -ENOTSUP;
+ r = _dm_status_verity_ok(cd, name);
+ dm_exit_context();
+ return r;
+}
+
+int dm_status_integrity_failures(struct crypt_device *cd, const char *name, uint64_t *count)
+{
+ int r;
+ struct dm_info dmi;
+ char *status_line = NULL;
+
+ if (dm_init_context(cd, DM_INTEGRITY))
+ return -ENOTSUP;
+
+ r = dm_status_dmi(name, &dmi, DM_INTEGRITY_TARGET, &status_line);
+ if (r < 0 || !status_line) {
+ free(status_line);
+ dm_exit_context();
+ return r;
+ }
+
+ log_dbg(cd, "Integrity volume %s failure status is %s.", name, status_line ?: "");
+ *count = strtoull(status_line, NULL, 10);
+ free(status_line);
+ dm_exit_context();
+
+ return 0;
+}
+
+/* FIXME use hex wrapper, user val wrappers for line parsing */
+static int _dm_target_query_crypt(struct crypt_device *cd, uint32_t get_flags,
+ char *params, struct dm_target *tgt,
+ uint32_t *act_flags)
+{
+ uint64_t val64;
+ char *rcipher, *rintegrity, *key_, *rdevice, *endp, buffer[3], *arg, *key_desc;
+ unsigned int i, val;
+ int r;
+ size_t key_size;
+ struct device *data_device = NULL;
+ char *cipher = NULL, *integrity = NULL;
+ struct volume_key *vk = NULL;
+
+ tgt->type = DM_CRYPT;
+ tgt->direction = TARGET_QUERY;
+ tgt->u.crypt.sector_size = SECTOR_SIZE;
+
+ r = -EINVAL;
+
+ rcipher = strsep(&params, " ");
+ rintegrity = NULL;
+
+ /* skip */
+ key_ = strsep(&params, " ");
+ if (!params)
+ goto err;
+ val64 = strtoull(params, &params, 10);
+ if (*params != ' ')
+ goto err;
+ params++;
+
+ tgt->u.crypt.iv_offset = val64;
+
+ /* device */
+ rdevice = strsep(&params, " ");
+ if (get_flags & DM_ACTIVE_DEVICE) {
+ arg = crypt_lookup_dev(rdevice);
+ r = device_alloc(cd, &data_device, arg);
+ free(arg);
+ if (r < 0 && r != -ENOTBLK)
+ goto err;
+ }
+
+ r = -EINVAL;
+
+ /*offset */
+ if (!params)
+ goto err;
+ val64 = strtoull(params, &params, 10);
+ tgt->u.crypt.offset = val64;
+
+ tgt->u.crypt.tag_size = 0;
+
+ /* Features section, available since crypt target version 1.11 */
+ if (*params) {
+ if (*params != ' ')
+ goto err;
+ params++;
+
+ /* Number of arguments */
+ val64 = strtoull(params, &params, 10);
+ if (*params != ' ')
+ goto err;
+ params++;
+
+ for (i = 0; i < val64; i++) {
+ if (!params)
+ goto err;
+ arg = strsep(&params, " ");
+ if (!strcasecmp(arg, "allow_discards"))
+ *act_flags |= CRYPT_ACTIVATE_ALLOW_DISCARDS;
+ else if (!strcasecmp(arg, "same_cpu_crypt"))
+ *act_flags |= CRYPT_ACTIVATE_SAME_CPU_CRYPT;
+ else if (!strcasecmp(arg, "submit_from_crypt_cpus"))
+ *act_flags |= CRYPT_ACTIVATE_SUBMIT_FROM_CRYPT_CPUS;
+ else if (!strcasecmp(arg, "no_read_workqueue"))
+ *act_flags |= CRYPT_ACTIVATE_NO_READ_WORKQUEUE;
+ else if (!strcasecmp(arg, "no_write_workqueue"))
+ *act_flags |= CRYPT_ACTIVATE_NO_WRITE_WORKQUEUE;
+ else if (!strcasecmp(arg, "iv_large_sectors"))
+ *act_flags |= CRYPT_ACTIVATE_IV_LARGE_SECTORS;
+ else if (sscanf(arg, "integrity:%u:", &val) == 1) {
+ tgt->u.crypt.tag_size = val;
+ rintegrity = strchr(arg + strlen("integrity:"), ':');
+ if (!rintegrity)
+ goto err;
+ rintegrity++;
+ } else if (sscanf(arg, "sector_size:%u", &val) == 1) {
+ tgt->u.crypt.sector_size = val;
+ } else /* unknown option */
+ goto err;
+ }
+
+ /* All parameters should be processed */
+ if (params)
+ goto err;
+ }
+
+ /* cipher */
+ if (get_flags & DM_ACTIVE_CRYPT_CIPHER) {
+ r = crypt_capi_to_cipher(&cipher, &integrity, rcipher, rintegrity);
+ if (r < 0)
+ goto err;
+ }
+
+ r = -EINVAL;
+
+ if (key_[0] == ':')
+ *act_flags |= CRYPT_ACTIVATE_KEYRING_KEY;
+
+ if (get_flags & DM_ACTIVE_CRYPT_KEYSIZE) {
+ /* we will trust kernel the key_string is in expected format */
+ if (key_[0] == ':') {
+ if (sscanf(key_ + 1, "%zu", &key_size) != 1)
+ goto err;
+ } else
+ key_size = strlen(key_) / 2;
+
+ vk = crypt_alloc_volume_key(key_size, NULL);
+ if (!vk) {
+ r = -ENOMEM;
+ goto err;
+ }
+
+ if (get_flags & DM_ACTIVE_CRYPT_KEY) {
+ if (key_[0] == ':') {
+ /* :<key_size>:<key_type>:<key_description> */
+ key_desc = NULL;
+ endp = strpbrk(key_ + 1, ":");
+ if (endp)
+ key_desc = strpbrk(endp + 1, ":");
+ if (!key_desc) {
+ r = -ENOMEM;
+ goto err;
+ }
+ key_desc++;
+ crypt_volume_key_set_description(vk, key_desc);
+ } else {
+ buffer[2] = '\0';
+ for(i = 0; i < vk->keylength; i++) {
+ memcpy(buffer, &key_[i * 2], 2);
+ vk->key[i] = strtoul(buffer, &endp, 16);
+ if (endp != &buffer[2]) {
+ r = -EINVAL;
+ goto err;
+ }
+ }
+ }
+ }
+ }
+ memset(key_, 0, strlen(key_));
+
+ if (cipher)
+ tgt->u.crypt.cipher = cipher;
+ if (integrity)
+ tgt->u.crypt.integrity = integrity;
+ if (data_device)
+ tgt->data_device = data_device;
+ if (vk)
+ tgt->u.crypt.vk = vk;
+ return 0;
+err:
+ free(cipher);
+ free(integrity);
+ device_free(cd, data_device);
+ crypt_free_volume_key(vk);
+ return r;
+}
+
+static int _dm_target_query_verity(struct crypt_device *cd,
+ uint32_t get_flags,
+ char *params,
+ struct dm_target *tgt,
+ uint32_t *act_flags)
+{
+ struct crypt_params_verity *vp = NULL;
+ uint32_t val32;
+ uint64_t val64;
+ ssize_t len;
+ char *str, *str2, *arg;
+ unsigned int i, features;
+ int r;
+ struct device *data_device = NULL, *hash_device = NULL, *fec_device = NULL;
+ char *hash_name = NULL, *root_hash = NULL, *salt = NULL, *fec_dev_str = NULL;
+ char *root_hash_sig_key_desc = NULL;
+
+ if (get_flags & DM_ACTIVE_VERITY_PARAMS) {
+ vp = crypt_zalloc(sizeof(*vp));
+ if (!vp)
+ return -ENOMEM;
+ }
+
+ tgt->type = DM_VERITY;
+ tgt->direction = TARGET_QUERY;
+ tgt->u.verity.vp = vp;
+
+ /* version */
+ val32 = strtoul(params, &params, 10);
+ if (*params != ' ')
+ return -EINVAL;
+ if (vp)
+ vp->hash_type = val32;
+ params++;
+
+ /* data device */
+ str = strsep(&params, " ");
+ if (!params)
+ return -EINVAL;
+ if (get_flags & DM_ACTIVE_DEVICE) {
+ str2 = crypt_lookup_dev(str);
+ r = device_alloc(cd, &data_device, str2);
+ free(str2);
+ if (r < 0 && r != -ENOTBLK)
+ return r;
+ }
+
+ r = -EINVAL;
+
+ /* hash device */
+ str = strsep(&params, " ");
+ if (!params)
+ goto err;
+ if (get_flags & DM_ACTIVE_VERITY_HASH_DEVICE) {
+ str2 = crypt_lookup_dev(str);
+ r = device_alloc(cd, &hash_device, str2);
+ free(str2);
+ if (r < 0 && r != -ENOTBLK)
+ goto err;
+ }
+
+ r = -EINVAL;
+
+ /* data block size*/
+ val32 = strtoul(params, &params, 10);
+ if (*params != ' ')
+ goto err;
+ if (vp)
+ vp->data_block_size = val32;
+ params++;
+
+ /* hash block size */
+ val32 = strtoul(params, &params, 10);
+ if (*params != ' ')
+ goto err;
+ if (vp)
+ vp->hash_block_size = val32;
+ params++;
+
+ /* data blocks */
+ val64 = strtoull(params, &params, 10);
+ if (*params != ' ')
+ goto err;
+ if (vp)
+ vp->data_size = val64;
+ params++;
+
+ /* hash start */
+ val64 = strtoull(params, &params, 10);
+ if (*params != ' ')
+ goto err;
+ tgt->u.verity.hash_offset = val64;
+ params++;
+
+ /* hash algorithm */
+ str = strsep(&params, " ");
+ if (!params)
+ goto err;
+ if (vp) {
+ hash_name = strdup(str);
+ if (!hash_name) {
+ r = -ENOMEM;
+ goto err;
+ }
+ }
+
+ /* root digest */
+ str = strsep(&params, " ");
+ if (!params)
+ goto err;
+ len = crypt_hex_to_bytes(str, &str2, 0);
+ if (len < 0) {
+ r = len;
+ goto err;
+ }
+ tgt->u.verity.root_hash_size = len;
+ if (get_flags & DM_ACTIVE_VERITY_ROOT_HASH)
+ root_hash = str2;
+ else
+ free(str2);
+
+ /* salt */
+ str = strsep(&params, " ");
+ if (vp) {
+ if (!strcmp(str, "-")) {
+ vp->salt_size = 0;
+ vp->salt = NULL;
+ } else {
+ len = crypt_hex_to_bytes(str, &str2, 0);
+ if (len < 0) {
+ r = len;
+ goto err;
+ }
+ vp->salt_size = len;
+ salt = str2;
+ }
+ }
+
+ r = -EINVAL;
+
+ /* Features section, available since verity target version 1.3 */
+ if (params) {
+ /* Number of arguments */
+ val64 = strtoull(params, &params, 10);
+ if (*params != ' ')
+ goto err;
+ params++;
+
+ features = (int)val64;
+ for (i = 0; i < features; i++) {
+ r = -EINVAL;
+ if (!params)
+ goto err;
+ arg = strsep(&params, " ");
+ if (!strcasecmp(arg, "ignore_corruption"))
+ *act_flags |= CRYPT_ACTIVATE_IGNORE_CORRUPTION;
+ else if (!strcasecmp(arg, "restart_on_corruption"))
+ *act_flags |= CRYPT_ACTIVATE_RESTART_ON_CORRUPTION;
+ else if (!strcasecmp(arg, "panic_on_corruption"))
+ *act_flags |= CRYPT_ACTIVATE_PANIC_ON_CORRUPTION;
+ else if (!strcasecmp(arg, "ignore_zero_blocks"))
+ *act_flags |= CRYPT_ACTIVATE_IGNORE_ZERO_BLOCKS;
+ else if (!strcasecmp(arg, "check_at_most_once"))
+ *act_flags |= CRYPT_ACTIVATE_CHECK_AT_MOST_ONCE;
+ else if (!strcasecmp(arg, "try_verify_in_tasklet"))
+ *act_flags |= CRYPT_ACTIVATE_TASKLETS;
+ else if (!strcasecmp(arg, "use_fec_from_device")) {
+ str = strsep(&params, " ");
+ str2 = crypt_lookup_dev(str);
+ if (get_flags & DM_ACTIVE_VERITY_HASH_DEVICE) {
+ r = device_alloc(cd, &fec_device, str2);
+ if (r < 0 && r != -ENOTBLK) {
+ free(str2);
+ goto err;
+ }
+ }
+ if (vp) {
+ free(fec_dev_str);
+ fec_dev_str = str2;
+ } else
+ free(str2);
+ i++;
+ } else if (!strcasecmp(arg, "fec_start")) {
+ val64 = strtoull(params, &params, 10);
+ if (*params)
+ params++;
+ tgt->u.verity.fec_offset = val64;
+ if (vp)
+ vp->fec_area_offset = val64 * vp->hash_block_size;
+ i++;
+ } else if (!strcasecmp(arg, "fec_blocks")) {
+ val64 = strtoull(params, &params, 10);
+ if (*params)
+ params++;
+ tgt->u.verity.fec_blocks = val64;
+ i++;
+ } else if (!strcasecmp(arg, "fec_roots")) {
+ val32 = strtoul(params, &params, 10);
+ if (*params)
+ params++;
+ if (vp)
+ vp->fec_roots = val32;
+ i++;
+ } else if (!strcasecmp(arg, "root_hash_sig_key_desc")) {
+ str = strsep(&params, " ");
+ if (!str)
+ goto err;
+ if (vp && !root_hash_sig_key_desc) {
+ root_hash_sig_key_desc = strdup(str);
+ if (!root_hash_sig_key_desc) {
+ r = -ENOMEM;
+ goto err;
+ }
+ /* not stored in params, but cannot be used without vp */
+ vp->flags |= CRYPT_VERITY_ROOT_HASH_SIGNATURE;
+ }
+ i++;
+ } else /* unknown option */
+ goto err;
+ }
+
+ /* All parameters should be processed */
+ if (params && *params) {
+ r = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (data_device)
+ tgt->data_device = data_device;
+ if (hash_device)
+ tgt->u.verity.hash_device = hash_device;
+ if (fec_device)
+ tgt->u.verity.fec_device = fec_device;
+ if (root_hash)
+ tgt->u.verity.root_hash = root_hash;
+ if (vp && hash_name)
+ vp->hash_name = hash_name;
+ if (vp && salt)
+ vp->salt = salt;
+ if (vp && fec_dev_str)
+ vp->fec_device = fec_dev_str;
+ if (root_hash_sig_key_desc)
+ tgt->u.verity.root_hash_sig_key_desc = root_hash_sig_key_desc;
+
+ return 0;
+err:
+ device_free(cd, data_device);
+ device_free(cd, hash_device);
+ device_free(cd, fec_device);
+ free(root_hash_sig_key_desc);
+ free(root_hash);
+ free(hash_name);
+ free(salt);
+ free(fec_dev_str);
+ free(vp);
+ return r;
+}
+
+static int _dm_target_query_integrity(struct crypt_device *cd,
+ uint32_t get_flags,
+ char *params,
+ struct dm_target *tgt,
+ uint32_t *act_flags)
+{
+ uint32_t val32;
+ uint64_t val64;
+ char c, *str, *str2, *arg;
+ unsigned int i, features, val;
+ ssize_t len;
+ int r;
+ struct device *data_device = NULL, *meta_device = NULL;
+ char *integrity = NULL, *journal_crypt = NULL, *journal_integrity = NULL;
+ struct volume_key *vk = NULL;
+ struct volume_key *journal_integrity_key = NULL;
+ struct volume_key *journal_crypt_key = NULL;
+
+ tgt->type = DM_INTEGRITY;
+ tgt->direction = TARGET_QUERY;
+
+ /* data device */
+ str = strsep(&params, " ");
+ if (get_flags & DM_ACTIVE_DEVICE) {
+ str2 = crypt_lookup_dev(str);
+ r = device_alloc(cd, &data_device, str2);
+ free(str2);
+ if (r < 0 && r != -ENOTBLK)
+ return r;
+ }
+
+ r = -EINVAL;
+
+ /*offset */
+ if (!params)
+ goto err;
+ val64 = strtoull(params, &params, 10);
+ if (!*params || *params != ' ')
+ goto err;
+ tgt->u.integrity.offset = val64;
+
+ /* tag size*/
+ val32 = strtoul(params, &params, 10);
+ tgt->u.integrity.tag_size = val32;
+ if (!*params || *params != ' ')
+ goto err;
+
+ /* journal */
+ c = toupper(*(++params));
+ if (!*params || *(++params) != ' ' || (c != 'D' && c != 'J' && c != 'R' && c != 'B'))
+ goto err;
+ if (c == 'D')
+ *act_flags |= CRYPT_ACTIVATE_NO_JOURNAL;
+ if (c == 'R')
+ *act_flags |= CRYPT_ACTIVATE_RECOVERY;
+ if (c == 'B') {
+ *act_flags |= CRYPT_ACTIVATE_NO_JOURNAL;
+ *act_flags |= CRYPT_ACTIVATE_NO_JOURNAL_BITMAP;
+ }
+
+ tgt->u.integrity.sector_size = SECTOR_SIZE;
+
+ /* Features section */
+ if (params) {
+ /* Number of arguments */
+ val64 = strtoull(params, &params, 10);
+ if (*params != ' ')
+ goto err;
+ params++;
+
+ features = (int)val64;
+ for (i = 0; i < features; i++) {
+ r = -EINVAL;
+ if (!params)
+ goto err;
+ arg = strsep(&params, " ");
+ if (sscanf(arg, "journal_sectors:%u", &val) == 1)
+ tgt->u.integrity.journal_size = val * SECTOR_SIZE;
+ else if (sscanf(arg, "journal_watermark:%u", &val) == 1)
+ tgt->u.integrity.journal_watermark = val;
+ else if (sscanf(arg, "sectors_per_bit:%" PRIu64, &val64) == 1) {
+ if (val64 > UINT_MAX)
+ goto err;
+ /* overloaded value for bitmap mode */
+ tgt->u.integrity.journal_watermark = (unsigned int)val64;
+ } else if (sscanf(arg, "commit_time:%u", &val) == 1)
+ tgt->u.integrity.journal_commit_time = val;
+ else if (sscanf(arg, "bitmap_flush_interval:%u", &val) == 1)
+ /* overloaded value for bitmap mode */
+ tgt->u.integrity.journal_commit_time = val;
+ else if (sscanf(arg, "interleave_sectors:%u", &val) == 1)
+ tgt->u.integrity.interleave_sectors = val;
+ else if (sscanf(arg, "block_size:%u", &val) == 1)
+ tgt->u.integrity.sector_size = val;
+ else if (sscanf(arg, "buffer_sectors:%u", &val) == 1)
+ tgt->u.integrity.buffer_sectors = val;
+ else if (!strncmp(arg, "internal_hash:", 14) && !integrity) {
+ str = &arg[14];
+ arg = strsep(&str, ":");
+ if (get_flags & DM_ACTIVE_INTEGRITY_PARAMS) {
+ integrity = strdup(arg);
+ if (!integrity) {
+ r = -ENOMEM;
+ goto err;
+ }
+ }
+
+ if (str) {
+ len = crypt_hex_to_bytes(str, &str2, 1);
+ if (len < 0) {
+ r = len;
+ goto err;
+ }
+
+ r = 0;
+ if (get_flags & DM_ACTIVE_CRYPT_KEY) {
+ vk = crypt_alloc_volume_key(len, str2);
+ if (!vk)
+ r = -ENOMEM;
+ } else if (get_flags & DM_ACTIVE_CRYPT_KEYSIZE) {
+ vk = crypt_alloc_volume_key(len, NULL);
+ if (!vk)
+ r = -ENOMEM;
+ }
+ crypt_safe_free(str2);
+ if (r < 0)
+ goto err;
+ }
+ } else if (!strncmp(arg, "meta_device:", 12) && !meta_device) {
+ if (get_flags & DM_ACTIVE_DEVICE) {
+ str = crypt_lookup_dev(&arg[12]);
+ r = device_alloc(cd, &meta_device, str);
+ free(str);
+ if (r < 0 && r != -ENOTBLK)
+ goto err;
+ }
+ } else if (!strncmp(arg, "journal_crypt:", 14) && !journal_crypt) {
+ str = &arg[14];
+ arg = strsep(&str, ":");
+ if (get_flags & DM_ACTIVE_INTEGRITY_PARAMS) {
+ journal_crypt = strdup(arg);
+ if (!journal_crypt) {
+ r = -ENOMEM;
+ goto err;
+ }
+ }
+
+ if (str) {
+ len = crypt_hex_to_bytes(str, &str2, 1);
+ if (len < 0) {
+ r = len;
+ goto err;
+ }
+
+ r = 0;
+ if (get_flags & DM_ACTIVE_JOURNAL_CRYPT_KEY) {
+ journal_crypt_key = crypt_alloc_volume_key(len, str2);
+ if (!journal_crypt_key)
+ r = -ENOMEM;
+ } else if (get_flags & DM_ACTIVE_JOURNAL_CRYPT_KEYSIZE) {
+ journal_crypt_key = crypt_alloc_volume_key(len, NULL);
+ if (!journal_crypt_key)
+ r = -ENOMEM;
+ }
+ crypt_safe_free(str2);
+ if (r < 0)
+ goto err;
+ }
+ } else if (!strncmp(arg, "journal_mac:", 12) && !journal_integrity) {
+ str = &arg[12];
+ arg = strsep(&str, ":");
+ if (get_flags & DM_ACTIVE_INTEGRITY_PARAMS) {
+ journal_integrity = strdup(arg);
+ if (!journal_integrity) {
+ r = -ENOMEM;
+ goto err;
+ }
+ }
+
+ if (str) {
+ len = crypt_hex_to_bytes(str, &str2, 1);
+ if (len < 0) {
+ r = len;
+ goto err;
+ }
+
+ r = 0;
+ if (get_flags & DM_ACTIVE_JOURNAL_MAC_KEY) {
+ journal_integrity_key = crypt_alloc_volume_key(len, str2);
+ if (!journal_integrity_key)
+ r = -ENOMEM;
+ } else if (get_flags & DM_ACTIVE_JOURNAL_MAC_KEYSIZE) {
+ journal_integrity_key = crypt_alloc_volume_key(len, NULL);
+ if (!journal_integrity_key)
+ r = -ENOMEM;
+ }
+ crypt_safe_free(str2);
+ if (r < 0)
+ goto err;
+ }
+ } else if (!strcmp(arg, "recalculate")) {
+ *act_flags |= CRYPT_ACTIVATE_RECALCULATE;
+ } else if (!strcmp(arg, "reset_recalculate")) {
+ *act_flags |= CRYPT_ACTIVATE_RECALCULATE_RESET;
+ } else if (!strcmp(arg, "fix_padding")) {
+ tgt->u.integrity.fix_padding = true;
+ } else if (!strcmp(arg, "fix_hmac")) {
+ tgt->u.integrity.fix_hmac = true;
+ } else if (!strcmp(arg, "legacy_recalculate")) {
+ tgt->u.integrity.legacy_recalc = true;
+ } else if (!strcmp(arg, "allow_discards")) {
+ *act_flags |= CRYPT_ACTIVATE_ALLOW_DISCARDS;
+ } else /* unknown option */
+ goto err;
+ }
+
+ /* All parameters should be processed */
+ if (params && *params) {
+ r = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (data_device)
+ tgt->data_device = data_device;
+ if (meta_device)
+ tgt->u.integrity.meta_device = meta_device;
+ if (integrity)
+ tgt->u.integrity.integrity = integrity;
+ if (journal_crypt)
+ tgt->u.integrity.journal_crypt = journal_crypt;
+ if (journal_integrity)
+ tgt->u.integrity.journal_integrity = journal_integrity;
+ if (vk)
+ tgt->u.integrity.vk = vk;
+ if (journal_integrity_key)
+ tgt->u.integrity.journal_integrity_key = journal_integrity_key;
+ if (journal_crypt_key)
+ tgt->u.integrity.journal_crypt_key = journal_crypt_key;
+ return 0;
+err:
+ device_free(cd, data_device);
+ device_free(cd, meta_device);
+ free(integrity);
+ free(journal_crypt);
+ free(journal_integrity);
+ crypt_free_volume_key(vk);
+ crypt_free_volume_key(journal_integrity_key);
+ crypt_free_volume_key(journal_crypt_key);
+ return r;
+}
+
+static int _dm_target_query_linear(struct crypt_device *cd, struct dm_target *tgt,
+ uint32_t get_flags, char *params)
+{
+ uint64_t val64;
+ char *rdevice, *arg;
+ int r;
+ struct device *device = NULL;
+
+ /* device */
+ rdevice = strsep(&params, " ");
+ if (get_flags & DM_ACTIVE_DEVICE) {
+ arg = crypt_lookup_dev(rdevice);
+ r = device_alloc(cd, &device, arg);
+ free(arg);
+ if (r < 0 && r != -ENOTBLK)
+ return r;
+ }
+
+ r = -EINVAL;
+
+ /*offset */
+ if (!params)
+ goto err;
+ val64 = strtoull(params, &params, 10);
+
+ /* params should be empty now */
+ if (*params)
+ goto err;
+
+ tgt->type = DM_LINEAR;
+ tgt->direction = TARGET_QUERY;
+ tgt->data_device = device;
+ tgt->u.linear.offset = val64;
+
+ return 0;
+err:
+ device_free(cd, device);
+ return r;
+}
+
+static int _dm_target_query_error(struct dm_target *tgt)
+{
+ tgt->type = DM_ERROR;
+ tgt->direction = TARGET_QUERY;
+
+ return 0;
+}
+
+static int _dm_target_query_zero(struct dm_target *tgt)
+{
+ tgt->type = DM_ZERO;
+ tgt->direction = TARGET_QUERY;
+
+ return 0;
+}
+
+/*
+ * on error retval has to be negative
+ *
+ * also currently any _dm_target_query fn does not perform cleanup on error
+ */
+static int dm_target_query(struct crypt_device *cd, struct dm_target *tgt, const uint64_t *start,
+ const uint64_t *length, const char *target_type,
+ char *params, uint32_t get_flags, uint32_t *act_flags)
+{
+ int r = -ENOTSUP;
+
+ if (!strcmp(target_type, DM_CRYPT_TARGET))
+ r = _dm_target_query_crypt(cd, get_flags, params, tgt, act_flags);
+ else if (!strcmp(target_type, DM_VERITY_TARGET))
+ r = _dm_target_query_verity(cd, get_flags, params, tgt, act_flags);
+ else if (!strcmp(target_type, DM_INTEGRITY_TARGET))
+ r = _dm_target_query_integrity(cd, get_flags, params, tgt, act_flags);
+ else if (!strcmp(target_type, DM_LINEAR_TARGET))
+ r = _dm_target_query_linear(cd, tgt, get_flags, params);
+ else if (!strcmp(target_type, DM_ERROR_TARGET))
+ r = _dm_target_query_error(tgt);
+ else if (!strcmp(target_type, DM_ZERO_TARGET))
+ r = _dm_target_query_zero(tgt);
+
+ if (!r) {
+ tgt->offset = *start;
+ tgt->size = *length;
+ }
+
+ return r;
+}
+
+static int _dm_query_device(struct crypt_device *cd, const char *name,
+ uint32_t get_flags, struct crypt_dm_active_device *dmd)
+{
+ struct dm_target *t;
+ struct dm_task *dmt;
+ struct dm_info dmi;
+ uint64_t start, length;
+ char *target_type, *params;
+ const char *tmp_uuid;
+ void *next = NULL;
+ int r = -EINVAL;
+
+ t = &dmd->segment;
+
+ if (!(dmt = dm_task_create(DM_DEVICE_TABLE)))
+ return r;
+ if (!dm_task_secure_data(dmt))
+ goto out;
+ if (!dm_task_set_name(dmt, name))
+ goto out;
+ r = -ENODEV;
+ if (!dm_task_run(dmt))
+ goto out;
+
+ r = -EINVAL;
+ if (!dm_task_get_info(dmt, &dmi))
+ goto out;
+
+ if (!dmi.exists) {
+ r = -ENODEV;
+ goto out;
+ }
+
+ if (dmi.target_count <= 0) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* Never allow one to return empty key */
+ if ((get_flags & DM_ACTIVE_CRYPT_KEY) && dmi.suspended) {
+ log_dbg(cd, "Cannot read volume key while suspended.");
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = dm_targets_allocate(&dmd->segment, dmi.target_count);
+ if (r)
+ goto out;
+
+ do {
+ next = dm_get_next_target(dmt, next, &start, &length,
+ &target_type, &params);
+
+ r = dm_target_query(cd, t, &start, &length, target_type, params, get_flags, &dmd->flags);
+ if (!r && t->type == DM_VERITY) {
+ r = _dm_status_verity_ok(cd, name);
+ if (r == 0)
+ dmd->flags |= CRYPT_ACTIVATE_CORRUPTED;
+ }
+
+ if (r < 0) {
+ if (r != -ENOTSUP)
+ log_err(cd, _("Failed to query dm-%s segment."), target_type);
+ goto out;
+ }
+
+ dmd->size += length;
+ t = t->next;
+ } while (next && t);
+
+ if (dmi.read_only)
+ dmd->flags |= CRYPT_ACTIVATE_READONLY;
+
+ if (dmi.suspended)
+ dmd->flags |= CRYPT_ACTIVATE_SUSPENDED;
+
+ tmp_uuid = dm_task_get_uuid(dmt);
+ if (!tmp_uuid)
+ dmd->flags |= CRYPT_ACTIVATE_NO_UUID;
+ else if (get_flags & DM_ACTIVE_UUID) {
+ if (!strncmp(tmp_uuid, DM_UUID_PREFIX, DM_UUID_PREFIX_LEN))
+ dmd->uuid = strdup(tmp_uuid + DM_UUID_PREFIX_LEN);
+ }
+
+ dmd->holders = 0;
+#if (HAVE_DECL_DM_DEVICE_HAS_HOLDERS && HAVE_DECL_DM_DEVICE_HAS_MOUNTED_FS)
+ if (get_flags & DM_ACTIVE_HOLDERS)
+ dmd->holders = (dm_device_has_mounted_fs(dmi.major, dmi.minor) ||
+ dm_device_has_holders(dmi.major, dmi.minor));
+#endif
+
+ r = (dmi.open_count > 0);
+out:
+ dm_task_destroy(dmt);
+
+ if (r < 0)
+ dm_targets_free(cd, dmd);
+
+ return r;
+}
+
+int dm_query_device(struct crypt_device *cd, const char *name,
+ uint32_t get_flags, struct crypt_dm_active_device *dmd)
+{
+ int r;
+
+ if (!dmd)
+ return -EINVAL;
+
+ memset(dmd, 0, sizeof(*dmd));
+
+ if (dm_init_context(cd, DM_UNKNOWN))
+ return -ENOTSUP;
+
+ r = _dm_query_device(cd, name, get_flags, dmd);
+
+ dm_exit_context();
+ return r;
+}
+
+static int _process_deps(struct crypt_device *cd, const char *prefix, struct dm_deps *deps,
+ char **names, size_t names_offset, size_t names_length)
+{
+#if HAVE_DECL_DM_DEVICE_GET_NAME
+ struct crypt_dm_active_device dmd;
+ char dmname[PATH_MAX];
+ unsigned i;
+ int r, major, minor, count = 0;
+
+ if (!prefix || !deps)
+ return -EINVAL;
+
+ for (i = 0; i < deps->count; i++) {
+ major = major(deps->device[i]);
+ if (!dm_is_dm_major(major))
+ continue;
+
+ minor = minor(deps->device[i]);
+ if (!dm_device_get_name(major, minor, 0, dmname, PATH_MAX))
+ return -EINVAL;
+
+ memset(&dmd, 0, sizeof(dmd));
+ r = _dm_query_device(cd, dmname, DM_ACTIVE_UUID, &dmd);
+ if (r < 0)
+ continue;
+
+ if (!dmd.uuid ||
+ strncmp(prefix, dmd.uuid, strlen(prefix)) ||
+ crypt_string_in(dmname, names, names_length))
+ *dmname = '\0';
+
+ dm_targets_free(cd, &dmd);
+ free(CONST_CAST(void*)dmd.uuid);
+
+ if ((size_t)count >= (names_length - names_offset))
+ return -ENOMEM;
+
+ if (*dmname && !(names[names_offset + count++] = strdup(dmname)))
+ return -ENOMEM;
+ }
+
+ return count;
+#else
+ return -EINVAL;
+#endif
+}
+
+int dm_device_deps(struct crypt_device *cd, const char *name, const char *prefix,
+ char **names, size_t names_length)
+{
+ struct dm_task *dmt;
+ struct dm_info dmi;
+ struct dm_deps *deps;
+ int r = -EINVAL;
+ size_t i, last = 0, offset = 0;
+
+ if (!name || !names_length || !names)
+ return -EINVAL;
+
+ if (dm_init_context(cd, DM_UNKNOWN))
+ return -ENOTSUP;
+
+ while (name) {
+ if (!(dmt = dm_task_create(DM_DEVICE_DEPS)))
+ goto out;
+ if (!dm_task_set_name(dmt, name))
+ goto out;
+
+ r = -ENODEV;
+ if (!dm_task_run(dmt))
+ goto out;
+
+ r = -EINVAL;
+ if (!dm_task_get_info(dmt, &dmi))
+ goto out;
+ if (!(deps = dm_task_get_deps(dmt)))
+ goto out;
+
+ r = -ENODEV;
+ if (!dmi.exists)
+ goto out;
+
+ r = _process_deps(cd, prefix, deps, names, offset, names_length - 1);
+ if (r < 0)
+ goto out;
+
+ dm_task_destroy(dmt);
+ dmt = NULL;
+
+ offset += r;
+ name = names[last++];
+ }
+
+ r = 0;
+out:
+ if (r < 0) {
+ for (i = 0; i < names_length - 1; i++)
+ free(names[i]);
+ *names = NULL;
+ }
+
+ if (dmt)
+ dm_task_destroy(dmt);
+
+ dm_exit_context();
+ return r;
+}
+
+static int _dm_message(const char *name, const char *msg)
+{
+ int r = 0;
+ struct dm_task *dmt;
+
+ if (!(dmt = dm_task_create(DM_DEVICE_TARGET_MSG)))
+ return 0;
+
+ if (!dm_task_secure_data(dmt))
+ goto out;
+
+ if (name && !dm_task_set_name(dmt, name))
+ goto out;
+
+ if (!dm_task_set_sector(dmt, (uint64_t) 0))
+ goto out;
+
+ if (!dm_task_set_message(dmt, msg))
+ goto out;
+
+ r = dm_task_run(dmt);
+out:
+ dm_task_destroy(dmt);
+ return r;
+}
+
+int dm_suspend_device(struct crypt_device *cd, const char *name, uint32_t dmflags)
+{
+ uint32_t dmt_flags;
+ int r = -ENOTSUP;
+
+ if (dm_init_context(cd, DM_UNKNOWN))
+ return r;
+
+ if (dmflags & DM_SUSPEND_WIPE_KEY) {
+ if (dm_flags(cd, DM_CRYPT, &dmt_flags))
+ goto out;
+
+ if (!(dmt_flags & DM_KEY_WIPE_SUPPORTED))
+ goto out;
+ }
+
+ r = -EINVAL;
+
+ if (!_dm_simple(DM_DEVICE_SUSPEND, name, dmflags))
+ goto out;
+
+ if (dmflags & DM_SUSPEND_WIPE_KEY) {
+ if (!_dm_message(name, "key wipe")) {
+ _dm_resume_device(name, 0);
+ goto out;
+ }
+ }
+
+ r = 0;
+out:
+ dm_exit_context();
+ return r;
+}
+
+int dm_resume_device(struct crypt_device *cd, const char *name, uint32_t dmflags)
+{
+ int r;
+
+ if (dm_init_context(cd, DM_UNKNOWN))
+ return -ENOTSUP;
+
+ r = _dm_resume_device(name, dmflags);
+
+ dm_exit_context();
+
+ return r;
+}
+
+int dm_resume_and_reinstate_key(struct crypt_device *cd, const char *name,
+ const struct volume_key *vk)
+{
+ uint32_t dmt_flags;
+ int msg_size;
+ char *msg = NULL, *key = NULL;
+ int r = -ENOTSUP;
+
+ if (dm_init_context(cd, DM_CRYPT) || dm_flags(cd, DM_CRYPT, &dmt_flags))
+ return -ENOTSUP;
+
+ if (!(dmt_flags & DM_KEY_WIPE_SUPPORTED))
+ goto out;
+
+ if (!vk->keylength)
+ msg_size = 11; // key set -
+ else if (vk->key_description)
+ msg_size = strlen(vk->key_description) + int_log10(vk->keylength) + 18;
+ else
+ msg_size = vk->keylength * 2 + 10; // key set <key>
+
+ msg = crypt_safe_alloc(msg_size);
+ if (!msg) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ if (vk->key_description) {
+ r = snprintf(msg, msg_size, "key set :%zu:logon:%s", vk->keylength, vk->key_description);
+ } else {
+ key = crypt_bytes_to_hex(vk->keylength, vk->key);
+ if (!key) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ r = snprintf(msg, msg_size, "key set %s", key);
+ }
+ if (r < 0 || r >= msg_size) {
+ r = -EINVAL;
+ goto out;
+ }
+ if (!_dm_message(name, msg) ||
+ _dm_resume_device(name, 0)) {
+ r = -EINVAL;
+ goto out;
+ }
+ r = 0;
+out:
+ crypt_safe_free(msg);
+ crypt_safe_free(key);
+ dm_exit_context();
+ return r;
+}
+
+int dm_cancel_deferred_removal(const char *name)
+{
+ return _dm_message(name, "@cancel_deferred_remove") ? 0 : -ENOTSUP;
+}
+
+const char *dm_get_dir(void)
+{
+ return dm_dir();
+}
+
+int dm_is_dm_device(int major)
+{
+ return dm_is_dm_major((uint32_t)major);
+}
+
+int dm_is_dm_kernel_name(const char *name)
+{
+ return strncmp(name, "dm-", 3) ? 0 : 1;
+}
+
+int dm_crypt_target_set(struct dm_target *tgt, uint64_t seg_offset, uint64_t seg_size,
+ struct device *data_device, struct volume_key *vk, const char *cipher,
+ uint64_t iv_offset, uint64_t data_offset, const char *integrity, uint32_t tag_size,
+ uint32_t sector_size)
+{
+ char *dm_integrity = NULL;
+
+ if (tag_size) {
+ /* Space for IV metadata only */
+ dm_integrity = strdup(integrity ?: "none");
+ if (!dm_integrity)
+ return -ENOMEM;
+ }
+
+ tgt->data_device = data_device;
+
+ tgt->type = DM_CRYPT;
+ tgt->direction = TARGET_SET;
+ tgt->u.crypt.vk = vk;
+ tgt->offset = seg_offset;
+ tgt->size = seg_size;
+
+ tgt->u.crypt.cipher = cipher;
+ tgt->u.crypt.integrity = dm_integrity;
+ tgt->u.crypt.iv_offset = iv_offset;
+ tgt->u.crypt.offset = data_offset;
+ tgt->u.crypt.tag_size = tag_size;
+ tgt->u.crypt.sector_size = sector_size;
+
+ return 0;
+}
+
+int dm_verity_target_set(struct dm_target *tgt, uint64_t seg_offset, uint64_t seg_size,
+ struct device *data_device, struct device *hash_device, struct device *fec_device,
+ const char *root_hash, uint32_t root_hash_size, const char* root_hash_sig_key_desc,
+ uint64_t hash_offset_block, uint64_t fec_blocks, struct crypt_params_verity *vp)
+{
+ if (!data_device || !hash_device || !vp)
+ return -EINVAL;
+
+ tgt->type = DM_VERITY;
+ tgt->direction = TARGET_SET;
+ tgt->offset = seg_offset;
+ tgt->size = seg_size;
+ tgt->data_device = data_device;
+
+ tgt->u.verity.hash_device = hash_device;
+ tgt->u.verity.fec_device = fec_device;
+ tgt->u.verity.root_hash = root_hash;
+ tgt->u.verity.root_hash_size = root_hash_size;
+ tgt->u.verity.root_hash_sig_key_desc = root_hash_sig_key_desc;
+ tgt->u.verity.hash_offset = hash_offset_block;
+ tgt->u.verity.fec_offset = vp->fec_area_offset / vp->hash_block_size;
+ tgt->u.verity.fec_blocks = fec_blocks;
+ tgt->u.verity.vp = vp;
+
+ return 0;
+}
+
+int dm_integrity_target_set(struct crypt_device *cd,
+ struct dm_target *tgt, uint64_t seg_offset, uint64_t seg_size,
+ struct device *meta_device,
+ struct device *data_device, uint64_t tag_size, uint64_t offset,
+ uint32_t sector_size, struct volume_key *vk,
+ struct volume_key *journal_crypt_key, struct volume_key *journal_mac_key,
+ const struct crypt_params_integrity *ip)
+{
+ uint32_t dmi_flags;
+
+ if (!data_device)
+ return -EINVAL;
+
+ _dm_check_versions(cd, DM_INTEGRITY);
+
+ tgt->type = DM_INTEGRITY;
+ tgt->direction = TARGET_SET;
+ tgt->offset = seg_offset;
+ tgt->size = seg_size;
+ tgt->data_device = data_device;
+ if (meta_device != data_device)
+ tgt->u.integrity.meta_device = meta_device;
+ tgt->u.integrity.tag_size = tag_size;
+ tgt->u.integrity.offset = offset;
+ tgt->u.integrity.sector_size = sector_size;
+
+ tgt->u.integrity.vk = vk;
+ tgt->u.integrity.journal_crypt_key = journal_crypt_key;
+ tgt->u.integrity.journal_integrity_key = journal_mac_key;
+
+ if (!dm_flags(cd, DM_INTEGRITY, &dmi_flags) &&
+ (dmi_flags & DM_INTEGRITY_FIX_PADDING_SUPPORTED) &&
+ !(crypt_get_compatibility(cd) & CRYPT_COMPAT_LEGACY_INTEGRITY_PADDING))
+ tgt->u.integrity.fix_padding = true;
+
+ if (!dm_flags(cd, DM_INTEGRITY, &dmi_flags) &&
+ (dmi_flags & DM_INTEGRITY_FIX_HMAC_SUPPORTED) &&
+ !(crypt_get_compatibility(cd) & CRYPT_COMPAT_LEGACY_INTEGRITY_HMAC))
+ tgt->u.integrity.fix_hmac = true;
+
+ /* This flag can be backported, just try to set it always */
+ if (crypt_get_compatibility(cd) & CRYPT_COMPAT_LEGACY_INTEGRITY_RECALC)
+ tgt->u.integrity.legacy_recalc = true;
+
+ if (ip) {
+ tgt->u.integrity.journal_size = ip->journal_size;
+ tgt->u.integrity.journal_watermark = ip->journal_watermark;
+ tgt->u.integrity.journal_commit_time = ip->journal_commit_time;
+ tgt->u.integrity.interleave_sectors = ip->interleave_sectors;
+ tgt->u.integrity.buffer_sectors = ip->buffer_sectors;
+ tgt->u.integrity.journal_integrity = ip->journal_integrity;
+ tgt->u.integrity.journal_crypt = ip->journal_crypt;
+ tgt->u.integrity.integrity = ip->integrity;
+ }
+
+ return 0;
+}
+
+int dm_linear_target_set(struct dm_target *tgt, uint64_t seg_offset, uint64_t seg_size,
+ struct device *data_device, uint64_t data_offset)
+{
+ if (!data_device)
+ return -EINVAL;
+
+ tgt->type = DM_LINEAR;
+ tgt->direction = TARGET_SET;
+ tgt->offset = seg_offset;
+ tgt->size = seg_size;
+ tgt->data_device = data_device;
+
+ tgt->u.linear.offset = data_offset;
+
+ return 0;
+}
+
+int dm_zero_target_set(struct dm_target *tgt, uint64_t seg_offset, uint64_t seg_size)
+{
+ tgt->type = DM_ZERO;
+ tgt->direction = TARGET_SET;
+ tgt->offset = seg_offset;
+ tgt->size = seg_size;
+
+ return 0;
+}
diff --git a/lib/loopaes/loopaes.c b/lib/loopaes/loopaes.c
new file mode 100644
index 0000000..224d3d0
--- /dev/null
+++ b/lib/loopaes/loopaes.c
@@ -0,0 +1,253 @@
+/*
+ * loop-AES compatible volume handling
+ *
+ * Copyright (C) 2011-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2011-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "libcryptsetup.h"
+#include "loopaes.h"
+#include "internal.h"
+
+static const char *get_hash(unsigned int key_size)
+{
+ const char *hash;
+
+ switch (key_size) {
+ case 16: hash = "sha256"; break;
+ case 24: hash = "sha384"; break;
+ case 32: hash = "sha512"; break;
+ default: hash = NULL;
+ }
+
+ return hash;
+}
+
+static unsigned char get_tweak(unsigned int keys_count)
+{
+ switch (keys_count) {
+ case 64: return 0x55;
+ case 65: return 0xF4;
+ default: break;
+ }
+ return 0x00;
+}
+
+static int hash_key(const char *src, size_t src_len,
+ char *dst, size_t dst_len,
+ const char *hash_name)
+{
+ struct crypt_hash *hd = NULL;
+ int r;
+
+ if (crypt_hash_init(&hd, hash_name))
+ return -EINVAL;
+
+ r = crypt_hash_write(hd, src, src_len);
+ if (!r)
+ r = crypt_hash_final(hd, dst, dst_len);
+
+ crypt_hash_destroy(hd);
+ return r;
+}
+
+static int hash_keys(struct crypt_device *cd,
+ struct volume_key **vk,
+ const char *hash_override,
+ const char **input_keys,
+ unsigned int keys_count,
+ unsigned int key_len_output,
+ unsigned int key_len_input)
+{
+ const char *hash_name;
+ char tweak, *key_ptr;
+ unsigned int i;
+ int r = 0;
+
+ hash_name = hash_override ?: get_hash(key_len_output);
+ tweak = get_tweak(keys_count);
+
+ if (!keys_count || !key_len_output || !hash_name || !key_len_input) {
+ log_err(cd, _("Key processing error (using hash %s)."),
+ hash_name ?: "[none]");
+ return -EINVAL;
+ }
+
+ *vk = crypt_alloc_volume_key((size_t)key_len_output * keys_count, NULL);
+ if (!*vk)
+ return -ENOMEM;
+
+ for (i = 0; i < keys_count; i++) {
+ key_ptr = &(*vk)->key[i * key_len_output];
+ r = hash_key(input_keys[i], key_len_input, key_ptr,
+ key_len_output, hash_name);
+ if (r < 0)
+ break;
+
+ key_ptr[0] ^= tweak;
+ }
+
+ if (r < 0 && *vk) {
+ crypt_free_volume_key(*vk);
+ *vk = NULL;
+ }
+ return r;
+}
+
+static int keyfile_is_gpg(char *buffer, size_t buffer_len)
+{
+ int r = 0;
+ int index = buffer_len < 100 ? buffer_len - 1 : 100;
+ char eos = buffer[index];
+
+ buffer[index] = '\0';
+ if (strstr(buffer, "BEGIN PGP MESSAGE"))
+ r = 1;
+ buffer[index] = eos;
+ return r;
+}
+
+int LOOPAES_parse_keyfile(struct crypt_device *cd,
+ struct volume_key **vk,
+ const char *hash,
+ unsigned int *keys_count,
+ char *buffer,
+ size_t buffer_len)
+{
+ const char *keys[LOOPAES_KEYS_MAX];
+ unsigned int key_lengths[LOOPAES_KEYS_MAX];
+ unsigned int i, key_index, key_len, offset;
+
+ log_dbg(cd, "Parsing loop-AES keyfile of size %zu.", buffer_len);
+
+ if (!buffer_len)
+ return -EINVAL;
+
+ if (keyfile_is_gpg(buffer, buffer_len)) {
+ log_err(cd, _("Detected not yet supported GPG encrypted keyfile."));
+ log_std(cd, _("Please use gpg --decrypt <KEYFILE> | cryptsetup --keyfile=- ...\n"));
+ return -EINVAL;
+ }
+
+ /* Remove EOL in buffer */
+ for (i = 0; i < buffer_len; i++)
+ if (buffer[i] == '\n' || buffer[i] == '\r')
+ buffer[i] = '\0';
+
+ offset = 0;
+ key_index = 0;
+ key_lengths[0] = 0;
+ while (offset < buffer_len && key_index < LOOPAES_KEYS_MAX) {
+ keys[key_index] = &buffer[offset];
+ key_lengths[key_index] = 0;;
+ while (offset < buffer_len && buffer[offset]) {
+ offset++;
+ key_lengths[key_index]++;
+ }
+ if (offset == buffer_len) {
+ log_dbg(cd, "Unterminated key #%d in keyfile.", key_index);
+ log_err(cd, _("Incompatible loop-AES keyfile detected."));
+ return -EINVAL;
+ }
+ while (offset < buffer_len && !buffer[offset])
+ offset++;
+ key_index++;
+ }
+
+ /* All keys must be the same length */
+ key_len = key_lengths[0];
+ for (i = 0; i < key_index; i++)
+ if (!key_lengths[i] || (key_lengths[i] != key_len)) {
+ log_dbg(cd, "Unexpected length %d of key #%d (should be %d).",
+ key_lengths[i], i, key_len);
+ key_len = 0;
+ break;
+ }
+
+ if (offset != buffer_len || key_len == 0 ||
+ (key_index != 1 && key_index !=64 && key_index != 65)) {
+ log_err(cd, _("Incompatible loop-AES keyfile detected."));
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Keyfile: %d keys of length %d.", key_index, key_len);
+
+ *keys_count = key_index;
+ return hash_keys(cd, vk, hash, keys, key_index,
+ crypt_get_volume_key_size(cd), key_len);
+}
+
+int LOOPAES_activate(struct crypt_device *cd,
+ const char *name,
+ const char *base_cipher,
+ unsigned int keys_count,
+ struct volume_key *vk,
+ uint32_t flags)
+{
+ int r;
+ uint32_t req_flags, dmc_flags;
+ char *cipher = NULL;
+ struct crypt_dm_active_device dmd = {
+ .flags = flags,
+ };
+
+ r = device_block_adjust(cd, crypt_data_device(cd), DEV_EXCL,
+ crypt_get_data_offset(cd), &dmd.size, &dmd.flags);
+ if (r)
+ return r;
+
+ if (keys_count == 1) {
+ req_flags = DM_PLAIN64_SUPPORTED;
+ r = asprintf(&cipher, "%s-%s", base_cipher, "cbc-plain64");
+ } else {
+ req_flags = DM_LMK_SUPPORTED;
+ r = asprintf(&cipher, "%s:%d-%s", base_cipher, 64, "cbc-lmk");
+ }
+ if (r < 0)
+ return -ENOMEM;
+
+ r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
+ vk, cipher, crypt_get_iv_offset(cd),
+ crypt_get_data_offset(cd), crypt_get_integrity(cd),
+ crypt_get_integrity_tag_size(cd), crypt_get_sector_size(cd));
+
+ if (r) {
+ free(cipher);
+ return r;
+ }
+
+ log_dbg(cd, "Trying to activate loop-AES device %s using cipher %s.",
+ name, cipher);
+
+ r = dm_create_device(cd, name, CRYPT_LOOPAES, &dmd);
+
+ if (r < 0 && !dm_flags(cd, DM_CRYPT, &dmc_flags) &&
+ (dmc_flags & req_flags) != req_flags) {
+ log_err(cd, _("Kernel does not support loop-AES compatible mapping."));
+ r = -ENOTSUP;
+ }
+
+ dm_targets_free(cd, &dmd);
+ free(cipher);
+
+ return r;
+}
diff --git a/lib/loopaes/loopaes.h b/lib/loopaes/loopaes.h
new file mode 100644
index 0000000..a921694
--- /dev/null
+++ b/lib/loopaes/loopaes.h
@@ -0,0 +1,46 @@
+/*
+ * loop-AES compatible volume handling
+ *
+ * Copyright (C) 2011-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2011-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _LOOPAES_H
+#define _LOOPAES_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+struct crypt_device;
+struct volume_key;
+
+#define LOOPAES_KEYS_MAX 65
+
+int LOOPAES_parse_keyfile(struct crypt_device *cd,
+ struct volume_key **vk,
+ const char *hash,
+ unsigned int *keys_count,
+ char *buffer,
+ size_t buffer_len);
+
+int LOOPAES_activate(struct crypt_device *cd,
+ const char *name,
+ const char *base_cipher,
+ unsigned int keys_count,
+ struct volume_key *vk,
+ uint32_t flags);
+#endif
diff --git a/lib/luks1/af.c b/lib/luks1/af.c
new file mode 100644
index 0000000..76afeac
--- /dev/null
+++ b/lib/luks1/af.c
@@ -0,0 +1,170 @@
+/*
+ * AFsplitter - Anti forensic information splitter
+ *
+ * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ *
+ * AFsplitter diffuses information over a large stripe of data,
+ * therefore supporting secure data destruction.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include "internal.h"
+#include "af.h"
+
+static void XORblock(const char *src1, const char *src2, char *dst, size_t n)
+{
+ size_t j;
+
+ for (j = 0; j < n; j++)
+ dst[j] = src1[j] ^ src2[j];
+}
+
+static int hash_buf(const char *src, char *dst, uint32_t iv,
+ size_t len, const char *hash_name)
+{
+ struct crypt_hash *hd = NULL;
+ char *iv_char = (char *)&iv;
+ int r;
+
+ iv = be32_to_cpu(iv);
+ if (crypt_hash_init(&hd, hash_name))
+ return -EINVAL;
+
+ if ((r = crypt_hash_write(hd, iv_char, sizeof(uint32_t))))
+ goto out;
+
+ if ((r = crypt_hash_write(hd, src, len)))
+ goto out;
+
+ r = crypt_hash_final(hd, dst, len);
+out:
+ crypt_hash_destroy(hd);
+ return r;
+}
+
+/*
+ * diffuse: Information spreading over the whole dataset with
+ * the help of hash function.
+ */
+static int diffuse(char *src, char *dst, size_t size, const char *hash_name)
+{
+ int r, hash_size = crypt_hash_size(hash_name);
+ unsigned int digest_size;
+ unsigned int i, blocks, padding;
+
+ if (hash_size <= 0)
+ return -EINVAL;
+ digest_size = hash_size;
+
+ blocks = size / digest_size;
+ padding = size % digest_size;
+
+ for (i = 0; i < blocks; i++) {
+ r = hash_buf(src + digest_size * i,
+ dst + digest_size * i,
+ i, (size_t)digest_size, hash_name);
+ if (r < 0)
+ return r;
+ }
+
+ if (padding) {
+ r = hash_buf(src + digest_size * i,
+ dst + digest_size * i,
+ i, (size_t)padding, hash_name);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
+/*
+ * Information splitting. The amount of data is multiplied by
+ * blocknumbers. The same blocksize and blocknumbers values
+ * must be supplied to AF_merge to recover information.
+ */
+int AF_split(struct crypt_device *ctx, const char *src, char *dst,
+ size_t blocksize, unsigned int blocknumbers, const char *hash)
+{
+ unsigned int i;
+ char *bufblock;
+ int r;
+
+ bufblock = crypt_safe_alloc(blocksize);
+ if (!bufblock)
+ return -ENOMEM;
+
+ /* process everything except the last block */
+ for (i = 0; i < blocknumbers - 1; i++) {
+ r = crypt_random_get(ctx, dst + blocksize * i, blocksize, CRYPT_RND_NORMAL);
+ if (r < 0)
+ goto out;
+
+ XORblock(dst + blocksize * i, bufblock, bufblock, blocksize);
+ r = diffuse(bufblock, bufblock, blocksize, hash);
+ if (r < 0)
+ goto out;
+ }
+ /* the last block is computed */
+ XORblock(src, bufblock, dst + blocksize * i, blocksize);
+ r = 0;
+out:
+ crypt_safe_free(bufblock);
+ return r;
+}
+
+int AF_merge(const char *src, char *dst,
+ size_t blocksize, unsigned int blocknumbers, const char *hash)
+{
+ unsigned int i;
+ char *bufblock;
+ int r;
+
+ bufblock = crypt_safe_alloc(blocksize);
+ if (!bufblock)
+ return -ENOMEM;
+
+ for (i = 0; i < blocknumbers - 1; i++) {
+ XORblock(src + blocksize * i, bufblock, bufblock, blocksize);
+ r = diffuse(bufblock, bufblock, blocksize, hash);
+ if (r < 0)
+ goto out;
+ }
+ XORblock(src + blocksize * i, bufblock, dst, blocksize);
+ r = 0;
+out:
+ crypt_safe_free(bufblock);
+ return r;
+}
+
+/* Size of final split data including sector alignment */
+size_t AF_split_sectors(size_t blocksize, unsigned int blocknumbers)
+{
+ size_t af_size;
+
+ /* data material * stripes */
+ af_size = blocksize * blocknumbers;
+
+ /* round up to sector */
+ af_size = (af_size + (SECTOR_SIZE - 1)) / SECTOR_SIZE;
+
+ return af_size;
+}
diff --git a/lib/luks1/af.h b/lib/luks1/af.h
new file mode 100644
index 0000000..8a2bceb
--- /dev/null
+++ b/lib/luks1/af.h
@@ -0,0 +1,67 @@
+/*
+ * AFsplitter - Anti forensic information splitter
+ *
+ * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ *
+ * AFsplitter diffuses information over a large stripe of data,
+ * therefore supporting secure data destruction.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef INCLUDED_CRYPTSETUP_LUKS_AF_H
+#define INCLUDED_CRYPTSETUP_LUKS_AF_H
+
+#include <stddef.h>
+
+struct crypt_device;
+struct volume_key;
+
+/*
+ * AF_split operates on src and produces information split data in
+ * dst. src is assumed to be of the length blocksize. The data stripe
+ * dst points to must be capable of storing blocksize*blocknumbers.
+ * blocknumbers is the data multiplication factor.
+ *
+ * AF_merge does just the opposite: reproduces the information stored in
+ * src of the length blocksize*blocknumbers into dst of the length
+ * blocksize.
+ *
+ * On error, both functions return -1, 0 otherwise.
+ */
+
+int AF_split(struct crypt_device *ctx, const char *src, char *dst,
+ size_t blocksize, unsigned int blocknumbers, const char *hash);
+int AF_merge(const char *src, char *dst, size_t blocksize,
+ unsigned int blocknumbers, const char *hash);
+size_t AF_split_sectors(size_t blocksize, unsigned int blocknumbers);
+
+int LUKS_encrypt_to_storage(
+ char *src, size_t srcLength,
+ const char *cipher,
+ const char *cipher_mode,
+ struct volume_key *vk,
+ unsigned int sector,
+ struct crypt_device *ctx);
+
+int LUKS_decrypt_from_storage(
+ char *dst, size_t dstLength,
+ const char *cipher,
+ const char *cipher_mode,
+ struct volume_key *vk,
+ unsigned int sector,
+ struct crypt_device *ctx);
+
+#endif
diff --git a/lib/luks1/keyencryption.c b/lib/luks1/keyencryption.c
new file mode 100644
index 0000000..c1c8201
--- /dev/null
+++ b/lib/luks1/keyencryption.c
@@ -0,0 +1,268 @@
+/*
+ * LUKS - Linux Unified Key Setup
+ *
+ * Copyright (C) 2004-2006 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2012-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include "luks.h"
+#include "af.h"
+#include "internal.h"
+
+static void _error_hint(struct crypt_device *ctx, const char *device,
+ const char *cipher, const char *mode, size_t keyLength)
+{
+ char *c, cipher_spec[MAX_CIPHER_LEN * 3];
+
+ if (snprintf(cipher_spec, sizeof(cipher_spec), "%s-%s", cipher, mode) < 0)
+ return;
+
+ log_err(ctx, _("Failed to setup dm-crypt key mapping for device %s.\n"
+ "Check that kernel supports %s cipher (check syslog for more info)."),
+ device, cipher_spec);
+
+ if (!strncmp(mode, "xts", 3) && (keyLength != 256 && keyLength != 512))
+ log_err(ctx, _("Key size in XTS mode must be 256 or 512 bits."));
+ else if (!(c = strchr(mode, '-')) || strlen(c) < 4)
+ log_err(ctx, _("Cipher specification should be in [cipher]-[mode]-[iv] format."));
+}
+
+static int LUKS_endec_template(char *src, size_t srcLength,
+ const char *cipher, const char *cipher_mode,
+ struct volume_key *vk,
+ unsigned int sector,
+ ssize_t (*func)(int, size_t, size_t, void *, size_t),
+ int mode,
+ struct crypt_device *ctx)
+{
+ char name[PATH_MAX], path[PATH_MAX];
+ char cipher_spec[MAX_CIPHER_LEN * 3];
+ struct crypt_dm_active_device dmd = {
+ .flags = CRYPT_ACTIVATE_PRIVATE,
+ };
+ int r, devfd = -1, remove_dev = 0;
+ size_t bsize, keyslot_alignment, alignment;
+
+ log_dbg(ctx, "Using dmcrypt to access keyslot area.");
+
+ bsize = device_block_size(ctx, crypt_metadata_device(ctx));
+ alignment = device_alignment(crypt_metadata_device(ctx));
+ if (!bsize || !alignment)
+ return -EINVAL;
+
+ if (bsize > LUKS_ALIGN_KEYSLOTS)
+ keyslot_alignment = LUKS_ALIGN_KEYSLOTS;
+ else
+ keyslot_alignment = bsize;
+ dmd.size = size_round_up(srcLength, keyslot_alignment) / SECTOR_SIZE;
+
+ if (mode == O_RDONLY)
+ dmd.flags |= CRYPT_ACTIVATE_READONLY;
+
+ if (snprintf(name, sizeof(name), "temporary-cryptsetup-%d", getpid()) < 0)
+ return -ENOMEM;
+ if (snprintf(path, sizeof(path), "%s/%s", dm_get_dir(), name) < 0)
+ return -ENOMEM;
+ if (snprintf(cipher_spec, sizeof(cipher_spec), "%s-%s", cipher, cipher_mode) < 0)
+ return -ENOMEM;
+
+ r = device_block_adjust(ctx, crypt_metadata_device(ctx), DEV_OK,
+ sector, &dmd.size, &dmd.flags);
+ if (r < 0) {
+ log_err(ctx, _("Device %s does not exist or access denied."),
+ device_path(crypt_metadata_device(ctx)));
+ return -EIO;
+ }
+
+ if (mode != O_RDONLY && dmd.flags & CRYPT_ACTIVATE_READONLY) {
+ log_err(ctx, _("Cannot write to device %s, permission denied."),
+ device_path(crypt_metadata_device(ctx)));
+ return -EACCES;
+ }
+
+ r = dm_crypt_target_set(&dmd.segment, 0, dmd.size,
+ crypt_metadata_device(ctx), vk, cipher_spec, 0, sector,
+ NULL, 0, SECTOR_SIZE);
+ if (r)
+ goto out;
+
+ r = dm_create_device(ctx, name, "TEMP", &dmd);
+ if (r < 0) {
+ if (r != -EACCES && r != -ENOTSUP)
+ _error_hint(ctx, device_path(crypt_metadata_device(ctx)),
+ cipher, cipher_mode, vk->keylength * 8);
+ r = -EIO;
+ goto out;
+ }
+ remove_dev = 1;
+
+ devfd = open(path, mode | O_DIRECT | O_SYNC);
+ if (devfd == -1) {
+ log_err(ctx, _("Failed to open temporary keystore device."));
+ r = -EIO;
+ goto out;
+ }
+
+ r = func(devfd, bsize, alignment, src, srcLength);
+ if (r < 0) {
+ log_err(ctx, _("Failed to access temporary keystore device."));
+ r = -EIO;
+ } else
+ r = 0;
+ out:
+ dm_targets_free(ctx, &dmd);
+ if (devfd != -1)
+ close(devfd);
+ if (remove_dev)
+ dm_remove_device(ctx, name, CRYPT_DEACTIVATE_FORCE);
+ return r;
+}
+
+int LUKS_encrypt_to_storage(char *src, size_t srcLength,
+ const char *cipher,
+ const char *cipher_mode,
+ struct volume_key *vk,
+ unsigned int sector,
+ struct crypt_device *ctx)
+{
+ struct device *device = crypt_metadata_device(ctx);
+ struct crypt_storage *s;
+ int devfd, r = 0;
+
+ /* Only whole sector writes supported */
+ if (MISALIGNED_512(srcLength))
+ return -EINVAL;
+
+ /* Encrypt buffer */
+ r = crypt_storage_init(&s, SECTOR_SIZE, cipher, cipher_mode, vk->key, vk->keylength, false);
+
+ if (r)
+ log_dbg(ctx, "Userspace crypto wrapper cannot use %s-%s (%d).",
+ cipher, cipher_mode, r);
+
+ /* Fallback to old temporary dmcrypt device */
+ if (r == -ENOTSUP || r == -ENOENT)
+ return LUKS_endec_template(src, srcLength, cipher, cipher_mode,
+ vk, sector, write_blockwise, O_RDWR, ctx);
+
+ if (r) {
+ _error_hint(ctx, device_path(device), cipher, cipher_mode,
+ vk->keylength * 8);
+ return r;
+ }
+
+ log_dbg(ctx, "Using userspace crypto wrapper to access keyslot area.");
+
+ r = crypt_storage_encrypt(s, 0, srcLength, src);
+ crypt_storage_destroy(s);
+
+ if (r)
+ return r;
+
+ r = -EIO;
+
+ /* Write buffer to device */
+ if (device_is_locked(device))
+ devfd = device_open_locked(ctx, device, O_RDWR);
+ else
+ devfd = device_open(ctx, device, O_RDWR);
+ if (devfd < 0)
+ goto out;
+
+ if (write_lseek_blockwise(devfd, device_block_size(ctx, device),
+ device_alignment(device), src, srcLength,
+ sector * SECTOR_SIZE) < 0)
+ goto out;
+
+ r = 0;
+out:
+ device_sync(ctx, device);
+ if (r)
+ log_err(ctx, _("IO error while encrypting keyslot."));
+
+ return r;
+}
+
+int LUKS_decrypt_from_storage(char *dst, size_t dstLength,
+ const char *cipher,
+ const char *cipher_mode,
+ struct volume_key *vk,
+ unsigned int sector,
+ struct crypt_device *ctx)
+{
+ struct device *device = crypt_metadata_device(ctx);
+ struct crypt_storage *s;
+ struct stat st;
+ int devfd, r = 0;
+
+ /* Only whole sector reads supported */
+ if (MISALIGNED_512(dstLength))
+ return -EINVAL;
+
+ r = crypt_storage_init(&s, SECTOR_SIZE, cipher, cipher_mode, vk->key, vk->keylength, false);
+
+ if (r)
+ log_dbg(ctx, "Userspace crypto wrapper cannot use %s-%s (%d).",
+ cipher, cipher_mode, r);
+
+ /* Fallback to old temporary dmcrypt device */
+ if (r == -ENOTSUP || r == -ENOENT)
+ return LUKS_endec_template(dst, dstLength, cipher, cipher_mode,
+ vk, sector, read_blockwise, O_RDONLY, ctx);
+
+ if (r) {
+ _error_hint(ctx, device_path(device), cipher, cipher_mode,
+ vk->keylength * 8);
+ return r;
+ }
+
+ log_dbg(ctx, "Using userspace crypto wrapper to access keyslot area.");
+
+ /* Read buffer from device */
+ if (device_is_locked(device))
+ devfd = device_open_locked(ctx, device, O_RDONLY);
+ else
+ devfd = device_open(ctx, device, O_RDONLY);
+ if (devfd < 0) {
+ log_err(ctx, _("Cannot open device %s."), device_path(device));
+ crypt_storage_destroy(s);
+ return -EIO;
+ }
+
+ if (read_lseek_blockwise(devfd, device_block_size(ctx, device),
+ device_alignment(device), dst, dstLength,
+ sector * SECTOR_SIZE) < 0) {
+ if (!fstat(devfd, &st) && (st.st_size < (off_t)dstLength))
+ log_err(ctx, _("Device %s is too small."), device_path(device));
+ else
+ log_err(ctx, _("IO error while decrypting keyslot."));
+
+ crypt_storage_destroy(s);
+ return -EIO;
+ }
+
+ /* Decrypt buffer */
+ r = crypt_storage_decrypt(s, 0, dstLength, dst);
+ crypt_storage_destroy(s);
+
+ return r;
+}
diff --git a/lib/luks1/keymanage.c b/lib/luks1/keymanage.c
new file mode 100644
index 0000000..fe49a00
--- /dev/null
+++ b/lib/luks1/keymanage.c
@@ -0,0 +1,1300 @@
+/*
+ * LUKS - Linux Unified Key Setup
+ *
+ * Copyright (C) 2004-2006 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2013-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <uuid/uuid.h>
+#include <limits.h>
+
+#include "luks.h"
+#include "af.h"
+#include "internal.h"
+
+int LUKS_keyslot_area(const struct luks_phdr *hdr,
+ int keyslot,
+ uint64_t *offset,
+ uint64_t *length)
+{
+ if(keyslot >= LUKS_NUMKEYS || keyslot < 0)
+ return -EINVAL;
+
+ *offset = (uint64_t)hdr->keyblock[keyslot].keyMaterialOffset * SECTOR_SIZE;
+ *length = AF_split_sectors(hdr->keyBytes, LUKS_STRIPES) * SECTOR_SIZE;
+
+ return 0;
+}
+
+/* insertsort: because the array has 8 elements and it's mostly sorted. that's why */
+static void LUKS_sort_keyslots(const struct luks_phdr *hdr, int *array)
+{
+ int i, j, x;
+
+ for (i = 1; i < LUKS_NUMKEYS; i++) {
+ j = i;
+ while (j > 0 && hdr->keyblock[array[j-1]].keyMaterialOffset > hdr->keyblock[array[j]].keyMaterialOffset) {
+ x = array[j];
+ array[j] = array[j-1];
+ array[j-1] = x;
+ j--;
+ }
+ }
+}
+
+static int _is_not_lower(char *str, unsigned max_len)
+{
+ for(; *str && max_len; str++, max_len--)
+ if (isupper(*str))
+ return 1;
+ return 0;
+}
+
+static int _to_lower(char *str, unsigned max_len)
+{
+ int r = 0;
+
+ for(; *str && max_len; str++, max_len--)
+ if (isupper(*str)) {
+ *str = tolower(*str);
+ r = 1;
+ }
+
+ return r;
+}
+
+size_t LUKS_device_sectors(const struct luks_phdr *hdr)
+{
+ int sorted_areas[LUKS_NUMKEYS] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+
+ LUKS_sort_keyslots(hdr, sorted_areas);
+
+ return hdr->keyblock[sorted_areas[LUKS_NUMKEYS-1]].keyMaterialOffset + AF_split_sectors(hdr->keyBytes, LUKS_STRIPES);
+}
+
+size_t LUKS_keyslots_offset(const struct luks_phdr *hdr)
+{
+ int sorted_areas[LUKS_NUMKEYS] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+
+ LUKS_sort_keyslots(hdr, sorted_areas);
+
+ return hdr->keyblock[sorted_areas[0]].keyMaterialOffset;
+}
+
+static int LUKS_check_device_size(struct crypt_device *ctx, const struct luks_phdr *hdr, int falloc)
+{
+ struct device *device = crypt_metadata_device(ctx);
+ uint64_t dev_sectors, hdr_sectors;
+
+ if (!hdr->keyBytes)
+ return -EINVAL;
+
+ if (device_size(device, &dev_sectors)) {
+ log_dbg(ctx, "Cannot get device size for device %s.", device_path(device));
+ return -EIO;
+ }
+
+ dev_sectors >>= SECTOR_SHIFT;
+ hdr_sectors = LUKS_device_sectors(hdr);
+ log_dbg(ctx, "Key length %u, device size %" PRIu64 " sectors, header size %"
+ PRIu64 " sectors.", hdr->keyBytes, dev_sectors, hdr_sectors);
+
+ if (hdr_sectors > dev_sectors) {
+ /* If it is header file, increase its size */
+ if (falloc && !device_fallocate(device, hdr_sectors << SECTOR_SHIFT))
+ return 0;
+
+ log_err(ctx, _("Device %s is too small. (LUKS1 requires at least %" PRIu64 " bytes.)"),
+ device_path(device), hdr_sectors * SECTOR_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int LUKS_check_keyslots(struct crypt_device *ctx, const struct luks_phdr *phdr)
+{
+ int i, prev, next, sorted_areas[LUKS_NUMKEYS] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ uint32_t secs_per_stripes = AF_split_sectors(phdr->keyBytes, LUKS_STRIPES);
+
+ LUKS_sort_keyslots(phdr, sorted_areas);
+
+ /* Check keyslot to prevent access outside of header and keyslot area */
+ for (i = 0; i < LUKS_NUMKEYS; i++) {
+ /* enforce stripes == 4000 */
+ if (phdr->keyblock[i].stripes != LUKS_STRIPES) {
+ log_dbg(ctx, "Invalid stripes count %u in keyslot %u.",
+ phdr->keyblock[i].stripes, i);
+ log_err(ctx, _("LUKS keyslot %u is invalid."), i);
+ return -1;
+ }
+
+ /* First sectors is the header itself */
+ if (phdr->keyblock[i].keyMaterialOffset * SECTOR_SIZE < sizeof(*phdr)) {
+ log_dbg(ctx, "Invalid offset %u in keyslot %u.",
+ phdr->keyblock[i].keyMaterialOffset, i);
+ log_err(ctx, _("LUKS keyslot %u is invalid."), i);
+ return -1;
+ }
+
+ /* Ignore following check for detached header where offset can be zero. */
+ if (phdr->payloadOffset == 0)
+ continue;
+
+ if (phdr->payloadOffset <= phdr->keyblock[i].keyMaterialOffset) {
+ log_dbg(ctx, "Invalid offset %u in keyslot %u (beyond data area offset %u).",
+ phdr->keyblock[i].keyMaterialOffset, i,
+ phdr->payloadOffset);
+ log_err(ctx, _("LUKS keyslot %u is invalid."), i);
+ return -1;
+ }
+
+ if (phdr->payloadOffset < (phdr->keyblock[i].keyMaterialOffset + secs_per_stripes)) {
+ log_dbg(ctx, "Invalid keyslot size %u (offset %u, stripes %u) in "
+ "keyslot %u (beyond data area offset %u).",
+ secs_per_stripes,
+ phdr->keyblock[i].keyMaterialOffset,
+ phdr->keyblock[i].stripes,
+ i, phdr->payloadOffset);
+ log_err(ctx, _("LUKS keyslot %u is invalid."), i);
+ return -1;
+ }
+ }
+
+ /* check no keyslot overlaps with each other */
+ for (i = 1; i < LUKS_NUMKEYS; i++) {
+ prev = sorted_areas[i-1];
+ next = sorted_areas[i];
+ if (phdr->keyblock[next].keyMaterialOffset <
+ (phdr->keyblock[prev].keyMaterialOffset + secs_per_stripes)) {
+ log_dbg(ctx, "Not enough space in LUKS keyslot %d.", prev);
+ log_err(ctx, _("LUKS keyslot %u is invalid."), prev);
+ return -1;
+ }
+ }
+ /* do not check last keyslot on purpose, it must be tested in device size check */
+
+ return 0;
+}
+
+static const char *dbg_slot_state(crypt_keyslot_info ki)
+{
+ switch(ki) {
+ case CRYPT_SLOT_INACTIVE:
+ return "INACTIVE";
+ case CRYPT_SLOT_ACTIVE:
+ return "ACTIVE";
+ case CRYPT_SLOT_ACTIVE_LAST:
+ return "ACTIVE_LAST";
+ case CRYPT_SLOT_INVALID:
+ default:
+ return "INVALID";
+ }
+}
+
+int LUKS_hdr_backup(const char *backup_file, struct crypt_device *ctx)
+{
+ struct device *device = crypt_metadata_device(ctx);
+ struct luks_phdr hdr;
+ int fd, devfd, r = 0;
+ size_t hdr_size;
+ size_t buffer_size;
+ ssize_t ret;
+ char *buffer = NULL;
+
+ r = LUKS_read_phdr(&hdr, 1, 0, ctx);
+ if (r)
+ return r;
+
+ hdr_size = LUKS_device_sectors(&hdr) << SECTOR_SHIFT;
+ buffer_size = size_round_up(hdr_size, crypt_getpagesize());
+
+ buffer = malloc(buffer_size);
+ if (!buffer || hdr_size < LUKS_ALIGN_KEYSLOTS || hdr_size > buffer_size) {
+ r = -ENOMEM;
+ goto out;
+ }
+ memset(buffer, 0, buffer_size);
+
+ log_dbg(ctx, "Storing backup of header (%zu bytes) and keyslot area (%zu bytes).",
+ sizeof(hdr), hdr_size - LUKS_ALIGN_KEYSLOTS);
+
+ log_dbg(ctx, "Output backup file size: %zu bytes.", buffer_size);
+
+ devfd = device_open(ctx, device, O_RDONLY);
+ if (devfd < 0) {
+ log_err(ctx, _("Device %s is not a valid LUKS device."), device_path(device));
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (read_lseek_blockwise(devfd, device_block_size(ctx, device), device_alignment(device),
+ buffer, hdr_size, 0) < (ssize_t)hdr_size) {
+ r = -EIO;
+ goto out;
+ }
+
+ /* Wipe unused area, so backup cannot contain old signatures */
+ if (hdr.keyblock[0].keyMaterialOffset * SECTOR_SIZE == LUKS_ALIGN_KEYSLOTS)
+ memset(buffer + sizeof(hdr), 0, LUKS_ALIGN_KEYSLOTS - sizeof(hdr));
+
+ fd = open(backup_file, O_CREAT|O_EXCL|O_WRONLY, S_IRUSR);
+ if (fd == -1) {
+ if (errno == EEXIST)
+ log_err(ctx, _("Requested header backup file %s already exists."), backup_file);
+ else
+ log_err(ctx, _("Cannot create header backup file %s."), backup_file);
+ r = -EINVAL;
+ goto out;
+ }
+ ret = write_buffer(fd, buffer, buffer_size);
+ close(fd);
+ if (ret < (ssize_t)buffer_size) {
+ log_err(ctx, _("Cannot write header backup file %s."), backup_file);
+ r = -EIO;
+ goto out;
+ }
+
+ r = 0;
+out:
+ crypt_safe_memzero(&hdr, sizeof(hdr));
+ crypt_safe_memzero(buffer, buffer_size);
+ free(buffer);
+ return r;
+}
+
+int LUKS_hdr_restore(
+ const char *backup_file,
+ struct luks_phdr *hdr,
+ struct crypt_device *ctx)
+{
+ struct device *device = crypt_metadata_device(ctx);
+ int fd, r = 0, devfd = -1, diff_uuid = 0;
+ ssize_t ret, buffer_size = 0;
+ char *buffer = NULL, msg[200];
+ struct luks_phdr hdr_file;
+
+ r = LUKS_read_phdr_backup(backup_file, &hdr_file, 0, ctx);
+ if (r == -ENOENT)
+ return r;
+
+ if (!r)
+ buffer_size = LUKS_device_sectors(&hdr_file) << SECTOR_SHIFT;
+
+ if (r || buffer_size < LUKS_ALIGN_KEYSLOTS) {
+ log_err(ctx, _("Backup file does not contain valid LUKS header."));
+ r = -EINVAL;
+ goto out;
+ }
+
+ buffer = malloc(buffer_size);
+ if (!buffer) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ fd = open(backup_file, O_RDONLY);
+ if (fd == -1) {
+ log_err(ctx, _("Cannot open header backup file %s."), backup_file);
+ r = -EINVAL;
+ goto out;
+ }
+
+ ret = read_buffer(fd, buffer, buffer_size);
+ close(fd);
+ if (ret < buffer_size) {
+ log_err(ctx, _("Cannot read header backup file %s."), backup_file);
+ r = -EIO;
+ goto out;
+ }
+
+ r = LUKS_read_phdr(hdr, 0, 0, ctx);
+ if (r == 0) {
+ log_dbg(ctx, "Device %s already contains LUKS header, checking UUID and offset.", device_path(device));
+ if(hdr->payloadOffset != hdr_file.payloadOffset ||
+ hdr->keyBytes != hdr_file.keyBytes) {
+ log_err(ctx, _("Data offset or key size differs on device and backup, restore failed."));
+ r = -EINVAL;
+ goto out;
+ }
+ if (memcmp(hdr->uuid, hdr_file.uuid, UUID_STRING_L))
+ diff_uuid = 1;
+ }
+
+ if (snprintf(msg, sizeof(msg), _("Device %s %s%s"), device_path(device),
+ r ? _("does not contain LUKS header. Replacing header can destroy data on that device.") :
+ _("already contains LUKS header. Replacing header will destroy existing keyslots."),
+ diff_uuid ? _("\nWARNING: real device header has different UUID than backup!") : "") < 0) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ if (!crypt_confirm(ctx, msg)) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ log_dbg(ctx, "Storing backup of header (%zu bytes) and keyslot area (%zu bytes) to device %s.",
+ sizeof(*hdr), buffer_size - LUKS_ALIGN_KEYSLOTS, device_path(device));
+
+ devfd = device_open(ctx, device, O_RDWR);
+ if (devfd < 0) {
+ if (errno == EACCES)
+ log_err(ctx, _("Cannot write to device %s, permission denied."),
+ device_path(device));
+ else
+ log_err(ctx, _("Cannot open device %s."), device_path(device));
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (write_lseek_blockwise(devfd, device_block_size(ctx, device), device_alignment(device),
+ buffer, buffer_size, 0) < buffer_size) {
+ r = -EIO;
+ goto out;
+ }
+
+ /* Be sure to reload new data */
+ r = LUKS_read_phdr(hdr, 1, 0, ctx);
+out:
+ device_sync(ctx, device);
+ crypt_safe_memzero(buffer, buffer_size);
+ free(buffer);
+ return r;
+}
+
+/* This routine should do some just basic recovery for known problems. */
+static int _keyslot_repair(struct luks_phdr *phdr, struct crypt_device *ctx)
+{
+ struct luks_phdr temp_phdr;
+ const unsigned char *sector = (const unsigned char*)phdr;
+ struct volume_key *vk;
+ int i, bad, r, need_write = 0;
+
+ if (phdr->keyBytes != 16 && phdr->keyBytes != 32 && phdr->keyBytes != 64) {
+ log_err(ctx, _("Non standard key size, manual repair required."));
+ return -EINVAL;
+ }
+
+ /*
+ * cryptsetup 1.0 did not align keyslots to 4k, cannot repair this one
+ * Also we cannot trust possibly broken keyslots metadata here through LUKS_keyslots_offset().
+ * Expect first keyslot is aligned, if not, then manual repair is necessary.
+ */
+ if (phdr->keyblock[0].keyMaterialOffset < (LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE)) {
+ log_err(ctx, _("Non standard keyslots alignment, manual repair required."));
+ return -EINVAL;
+ }
+
+ /*
+ * ECB mode does not use IV but legacy dmcrypt silently allows it.
+ * Today device cannot be activated anyway, so we need to fix it here.
+ */
+ if (!strncmp(phdr->cipherMode, "ecb-", 4)) {
+ log_err(ctx, _("Cipher mode repaired (%s -> %s)."), phdr->cipherMode, "ecb");
+ memset(phdr->cipherMode, 0, LUKS_CIPHERMODE_L);
+ strcpy(phdr->cipherMode, "ecb");
+ need_write = 1;
+ }
+
+ /*
+ * Old cryptsetup expects "sha1", gcrypt allows case insensitive names,
+ * so always convert hash to lower case in header
+ */
+ if (_to_lower(phdr->hashSpec, LUKS_HASHSPEC_L)) {
+ log_err(ctx, _("Cipher hash repaired to lowercase (%s)."), phdr->hashSpec);
+ if (crypt_hmac_size(phdr->hashSpec) < LUKS_DIGESTSIZE) {
+ log_err(ctx, _("Requested LUKS hash %s is not supported."), phdr->hashSpec);
+ return -EINVAL;
+ }
+ need_write = 1;
+ }
+
+ r = LUKS_check_cipher(ctx, phdr->keyBytes, phdr->cipherName, phdr->cipherMode);
+ if (r < 0)
+ return -EINVAL;
+
+ vk = crypt_alloc_volume_key(phdr->keyBytes, NULL);
+ if (!vk)
+ return -ENOMEM;
+
+ log_verbose(ctx, _("Repairing keyslots."));
+
+ log_dbg(ctx, "Generating second header with the same parameters for check.");
+ /* cipherName, cipherMode, hashSpec, uuid are already null terminated */
+ /* payloadOffset - cannot check */
+ r = LUKS_generate_phdr(&temp_phdr, vk, phdr->cipherName, phdr->cipherMode,
+ phdr->hashSpec, phdr->uuid,
+ phdr->payloadOffset * SECTOR_SIZE, 0, 0, ctx);
+ if (r < 0)
+ goto out;
+
+ for(i = 0; i < LUKS_NUMKEYS; ++i) {
+ if (phdr->keyblock[i].active == LUKS_KEY_ENABLED) {
+ log_dbg(ctx, "Skipping repair for active keyslot %i.", i);
+ continue;
+ }
+
+ bad = 0;
+ if (phdr->keyblock[i].keyMaterialOffset != temp_phdr.keyblock[i].keyMaterialOffset) {
+ log_err(ctx, _("Keyslot %i: offset repaired (%u -> %u)."), i,
+ (unsigned)phdr->keyblock[i].keyMaterialOffset,
+ (unsigned)temp_phdr.keyblock[i].keyMaterialOffset);
+ phdr->keyblock[i].keyMaterialOffset = temp_phdr.keyblock[i].keyMaterialOffset;
+ bad = 1;
+ }
+
+ if (phdr->keyblock[i].stripes != temp_phdr.keyblock[i].stripes) {
+ log_err(ctx, _("Keyslot %i: stripes repaired (%u -> %u)."), i,
+ (unsigned)phdr->keyblock[i].stripes,
+ (unsigned)temp_phdr.keyblock[i].stripes);
+ phdr->keyblock[i].stripes = temp_phdr.keyblock[i].stripes;
+ bad = 1;
+ }
+
+ /* Known case - MSDOS partition table signature */
+ if (i == 6 && sector[0x1fe] == 0x55 && sector[0x1ff] == 0xaa) {
+ log_err(ctx, _("Keyslot %i: bogus partition signature."), i);
+ bad = 1;
+ }
+
+ if(bad) {
+ log_err(ctx, _("Keyslot %i: salt wiped."), i);
+ phdr->keyblock[i].active = LUKS_KEY_DISABLED;
+ memset(&phdr->keyblock[i].passwordSalt, 0x00, LUKS_SALTSIZE);
+ phdr->keyblock[i].passwordIterations = 0;
+ }
+
+ if (bad)
+ need_write = 1;
+ }
+
+ /*
+ * check repair result before writing because repair can't fix out of order
+ * keyslot offsets and would corrupt header again
+ */
+ if (LUKS_check_keyslots(ctx, phdr))
+ r = -EINVAL;
+ else if (need_write) {
+ log_verbose(ctx, _("Writing LUKS header to disk."));
+ r = LUKS_write_phdr(phdr, ctx);
+ }
+out:
+ if (r)
+ log_err(ctx, _("Repair failed."));
+ crypt_free_volume_key(vk);
+ crypt_safe_memzero(&temp_phdr, sizeof(temp_phdr));
+ return r;
+}
+
+static int _check_and_convert_hdr(const char *device,
+ struct luks_phdr *hdr,
+ int require_luks_device,
+ int repair,
+ struct crypt_device *ctx)
+{
+ int r = 0;
+ unsigned int i;
+ char luksMagic[] = LUKS_MAGIC;
+
+ hdr->version = be16_to_cpu(hdr->version);
+ if (memcmp(hdr->magic, luksMagic, LUKS_MAGIC_L)) { /* Check magic */
+ log_dbg(ctx, "LUKS header not detected.");
+ if (require_luks_device)
+ log_err(ctx, _("Device %s is not a valid LUKS device."), device);
+ return -EINVAL;
+ } else if (hdr->version != 1) {
+ log_err(ctx, _("Unsupported LUKS version %d."), hdr->version);
+ return -EINVAL;
+ }
+
+ hdr->hashSpec[LUKS_HASHSPEC_L - 1] = '\0';
+ if (crypt_hmac_size(hdr->hashSpec) < LUKS_DIGESTSIZE) {
+ log_err(ctx, _("Requested LUKS hash %s is not supported."), hdr->hashSpec);
+ r = -EINVAL;
+ }
+
+ /* Header detected */
+ hdr->payloadOffset = be32_to_cpu(hdr->payloadOffset);
+ hdr->keyBytes = be32_to_cpu(hdr->keyBytes);
+ hdr->mkDigestIterations = be32_to_cpu(hdr->mkDigestIterations);
+
+ for (i = 0; i < LUKS_NUMKEYS; ++i) {
+ hdr->keyblock[i].active = be32_to_cpu(hdr->keyblock[i].active);
+ hdr->keyblock[i].passwordIterations = be32_to_cpu(hdr->keyblock[i].passwordIterations);
+ hdr->keyblock[i].keyMaterialOffset = be32_to_cpu(hdr->keyblock[i].keyMaterialOffset);
+ hdr->keyblock[i].stripes = be32_to_cpu(hdr->keyblock[i].stripes);
+ }
+
+ if (LUKS_check_keyslots(ctx, hdr))
+ r = -EINVAL;
+
+ /* Avoid unterminated strings */
+ hdr->cipherName[LUKS_CIPHERNAME_L - 1] = '\0';
+ hdr->cipherMode[LUKS_CIPHERMODE_L - 1] = '\0';
+ hdr->uuid[UUID_STRING_L - 1] = '\0';
+
+ if (repair) {
+ if (!strncmp(hdr->cipherMode, "ecb-", 4)) {
+ log_err(ctx, _("LUKS cipher mode %s is invalid."), hdr->cipherMode);
+ r = -EINVAL;
+ }
+
+ if (_is_not_lower(hdr->hashSpec, LUKS_HASHSPEC_L)) {
+ log_err(ctx, _("LUKS hash %s is invalid."), hdr->hashSpec);
+ r = -EINVAL;
+ }
+
+ if (r == -EINVAL)
+ r = _keyslot_repair(hdr, ctx);
+ else
+ log_verbose(ctx, _("No known problems detected for LUKS header."));
+ }
+
+ return r;
+}
+
+int LUKS_read_phdr_backup(const char *backup_file,
+ struct luks_phdr *hdr,
+ int require_luks_device,
+ struct crypt_device *ctx)
+{
+ ssize_t hdr_size = sizeof(struct luks_phdr);
+ int devfd = 0, r = 0;
+
+ log_dbg(ctx, "Reading LUKS header of size %d from backup file %s",
+ (int)hdr_size, backup_file);
+
+ devfd = open(backup_file, O_RDONLY);
+ if (devfd == -1) {
+ log_err(ctx, _("Cannot open header backup file %s."), backup_file);
+ return -ENOENT;
+ }
+
+ if (read_buffer(devfd, hdr, hdr_size) < hdr_size)
+ r = -EIO;
+ else
+ r = _check_and_convert_hdr(backup_file, hdr,
+ require_luks_device, 0, ctx);
+
+ close(devfd);
+ return r;
+}
+
+int LUKS_read_phdr(struct luks_phdr *hdr,
+ int require_luks_device,
+ int repair,
+ struct crypt_device *ctx)
+{
+ int devfd, r = 0;
+ struct device *device = crypt_metadata_device(ctx);
+ ssize_t hdr_size = sizeof(struct luks_phdr);
+
+ /* LUKS header starts at offset 0, first keyslot on LUKS_ALIGN_KEYSLOTS */
+ assert(sizeof(struct luks_phdr) <= LUKS_ALIGN_KEYSLOTS);
+
+ /* Stripes count cannot be changed without additional code fixes yet */
+ assert(LUKS_STRIPES == 4000);
+
+ if (repair && !require_luks_device)
+ return -EINVAL;
+
+ log_dbg(ctx, "Reading LUKS header of size %zu from device %s",
+ hdr_size, device_path(device));
+
+ devfd = device_open(ctx, device, O_RDONLY);
+ if (devfd < 0) {
+ log_err(ctx, _("Cannot open device %s."), device_path(device));
+ return -EINVAL;
+ }
+
+ if (read_lseek_blockwise(devfd, device_block_size(ctx, device), device_alignment(device),
+ hdr, hdr_size, 0) < hdr_size)
+ r = -EIO;
+ else
+ r = _check_and_convert_hdr(device_path(device), hdr, require_luks_device,
+ repair, ctx);
+
+ if (!r)
+ r = LUKS_check_device_size(ctx, hdr, 0);
+
+ /*
+ * Cryptsetup 1.0.0 did not align keyslots to 4k (very rare version).
+ * Disable direct-io to avoid possible IO errors if underlying device
+ * has bigger sector size.
+ */
+ if (!r && hdr->keyblock[0].keyMaterialOffset * SECTOR_SIZE < LUKS_ALIGN_KEYSLOTS) {
+ log_dbg(ctx, "Old unaligned LUKS keyslot detected, disabling direct-io.");
+ device_disable_direct_io(device);
+ }
+
+ return r;
+}
+
+int LUKS_write_phdr(struct luks_phdr *hdr,
+ struct crypt_device *ctx)
+{
+ struct device *device = crypt_metadata_device(ctx);
+ ssize_t hdr_size = sizeof(struct luks_phdr);
+ int devfd = 0;
+ unsigned int i;
+ struct luks_phdr convHdr;
+ int r;
+
+ log_dbg(ctx, "Updating LUKS header of size %zu on device %s",
+ sizeof(struct luks_phdr), device_path(device));
+
+ r = LUKS_check_device_size(ctx, hdr, 1);
+ if (r)
+ return r;
+
+ devfd = device_open(ctx, device, O_RDWR);
+ if (devfd < 0) {
+ if (errno == EACCES)
+ log_err(ctx, _("Cannot write to device %s, permission denied."),
+ device_path(device));
+ else
+ log_err(ctx, _("Cannot open device %s."), device_path(device));
+ return -EINVAL;
+ }
+
+ memcpy(&convHdr, hdr, hdr_size);
+ memset(&convHdr._padding, 0, sizeof(convHdr._padding));
+
+ /* Convert every uint16/32_t item to network byte order */
+ convHdr.version = cpu_to_be16(hdr->version);
+ convHdr.payloadOffset = cpu_to_be32(hdr->payloadOffset);
+ convHdr.keyBytes = cpu_to_be32(hdr->keyBytes);
+ convHdr.mkDigestIterations = cpu_to_be32(hdr->mkDigestIterations);
+ for(i = 0; i < LUKS_NUMKEYS; ++i) {
+ convHdr.keyblock[i].active = cpu_to_be32(hdr->keyblock[i].active);
+ convHdr.keyblock[i].passwordIterations = cpu_to_be32(hdr->keyblock[i].passwordIterations);
+ convHdr.keyblock[i].keyMaterialOffset = cpu_to_be32(hdr->keyblock[i].keyMaterialOffset);
+ convHdr.keyblock[i].stripes = cpu_to_be32(hdr->keyblock[i].stripes);
+ }
+
+ r = write_lseek_blockwise(devfd, device_block_size(ctx, device), device_alignment(device),
+ &convHdr, hdr_size, 0) < hdr_size ? -EIO : 0;
+ if (r)
+ log_err(ctx, _("Error during update of LUKS header on device %s."), device_path(device));
+
+ device_sync(ctx, device);
+
+ /* Re-read header from disk to be sure that in-memory and on-disk data are the same. */
+ if (!r) {
+ r = LUKS_read_phdr(hdr, 1, 0, ctx);
+ if (r)
+ log_err(ctx, _("Error re-reading LUKS header after update on device %s."),
+ device_path(device));
+ }
+
+ return r;
+}
+
+/* Check that kernel supports requested cipher by decryption of one sector */
+int LUKS_check_cipher(struct crypt_device *ctx, size_t keylength, const char *cipher, const char *cipher_mode)
+{
+ int r;
+ struct volume_key *empty_key;
+ char buf[SECTOR_SIZE];
+
+ log_dbg(ctx, "Checking if cipher %s-%s is usable.", cipher, cipher_mode);
+
+ empty_key = crypt_alloc_volume_key(keylength, NULL);
+ if (!empty_key)
+ return -ENOMEM;
+
+ /* No need to get KEY quality random but it must avoid known weak keys. */
+ r = crypt_random_get(ctx, empty_key->key, empty_key->keylength, CRYPT_RND_NORMAL);
+ if (!r)
+ r = LUKS_decrypt_from_storage(buf, sizeof(buf), cipher, cipher_mode, empty_key, 0, ctx);
+
+ crypt_free_volume_key(empty_key);
+ crypt_safe_memzero(buf, sizeof(buf));
+ return r;
+}
+
+int LUKS_generate_phdr(struct luks_phdr *header,
+ const struct volume_key *vk,
+ const char *cipherName,
+ const char *cipherMode,
+ const char *hashSpec,
+ const char *uuid,
+ uint64_t data_offset, /* in bytes */
+ uint64_t align_offset, /* in bytes */
+ uint64_t required_alignment, /* in bytes */
+ struct crypt_device *ctx)
+{
+ int i, r;
+ size_t keyslot_sectors, header_sectors;
+ uuid_t partitionUuid;
+ struct crypt_pbkdf_type *pbkdf;
+ double PBKDF2_temp;
+ char luksMagic[] = LUKS_MAGIC;
+
+ if (data_offset % SECTOR_SIZE || align_offset % SECTOR_SIZE ||
+ required_alignment % SECTOR_SIZE)
+ return -EINVAL;
+
+ memset(header, 0, sizeof(struct luks_phdr));
+
+ keyslot_sectors = AF_split_sectors(vk->keylength, LUKS_STRIPES);
+ header_sectors = LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE;
+
+ for (i = 0; i < LUKS_NUMKEYS; i++) {
+ header->keyblock[i].active = LUKS_KEY_DISABLED;
+ header->keyblock[i].keyMaterialOffset = header_sectors;
+ header->keyblock[i].stripes = LUKS_STRIPES;
+ header_sectors = size_round_up(header_sectors + keyslot_sectors,
+ LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE);
+ }
+ /* In sector is now size of all keyslot material space */
+
+ /* Data offset has priority */
+ if (data_offset)
+ header->payloadOffset = data_offset / SECTOR_SIZE;
+ else if (required_alignment) {
+ header->payloadOffset = size_round_up(header_sectors, (required_alignment / SECTOR_SIZE));
+ header->payloadOffset += (align_offset / SECTOR_SIZE);
+ } else
+ header->payloadOffset = 0;
+
+ if (header->payloadOffset && header->payloadOffset < header_sectors) {
+ log_err(ctx, _("Data offset for LUKS header must be "
+ "either 0 or higher than header size."));
+ return -EINVAL;
+ }
+
+ if (crypt_hmac_size(hashSpec) < LUKS_DIGESTSIZE) {
+ log_err(ctx, _("Requested LUKS hash %s is not supported."), hashSpec);
+ return -EINVAL;
+ }
+
+ if (uuid && uuid_parse(uuid, partitionUuid) == -1) {
+ log_err(ctx, _("Wrong LUKS UUID format provided."));
+ return -EINVAL;
+ }
+ if (!uuid)
+ uuid_generate(partitionUuid);
+
+ /* Set Magic */
+ memcpy(header->magic,luksMagic,LUKS_MAGIC_L);
+ header->version=1;
+ strncpy(header->cipherName,cipherName,LUKS_CIPHERNAME_L-1);
+ strncpy(header->cipherMode,cipherMode,LUKS_CIPHERMODE_L-1);
+ strncpy(header->hashSpec,hashSpec,LUKS_HASHSPEC_L-1);
+ _to_lower(header->hashSpec, LUKS_HASHSPEC_L);
+
+ header->keyBytes=vk->keylength;
+
+ log_dbg(ctx, "Generating LUKS header version %d using hash %s, %s, %s, MK %d bytes",
+ header->version, header->hashSpec ,header->cipherName, header->cipherMode,
+ header->keyBytes);
+
+ r = crypt_random_get(ctx, header->mkDigestSalt, LUKS_SALTSIZE, CRYPT_RND_SALT);
+ if(r < 0) {
+ log_err(ctx, _("Cannot create LUKS header: reading random salt failed."));
+ return r;
+ }
+
+ /* Compute volume key digest */
+ pbkdf = crypt_get_pbkdf(ctx);
+ r = crypt_benchmark_pbkdf_internal(ctx, pbkdf, vk->keylength);
+ if (r < 0)
+ return r;
+ assert(pbkdf->iterations);
+
+ if (pbkdf->flags & CRYPT_PBKDF_NO_BENCHMARK && pbkdf->time_ms == 0)
+ PBKDF2_temp = LUKS_MKD_ITERATIONS_MIN;
+ else /* iterations per ms * LUKS_MKD_ITERATIONS_MS */
+ PBKDF2_temp = (double)pbkdf->iterations * LUKS_MKD_ITERATIONS_MS / pbkdf->time_ms;
+
+ if (PBKDF2_temp > (double)UINT32_MAX)
+ return -EINVAL;
+ header->mkDigestIterations = AT_LEAST((uint32_t)PBKDF2_temp, LUKS_MKD_ITERATIONS_MIN);
+ assert(header->mkDigestIterations);
+
+ r = crypt_pbkdf(CRYPT_KDF_PBKDF2, header->hashSpec, vk->key,vk->keylength,
+ header->mkDigestSalt, LUKS_SALTSIZE,
+ header->mkDigest,LUKS_DIGESTSIZE,
+ header->mkDigestIterations, 0, 0);
+ if (r < 0) {
+ log_err(ctx, _("Cannot create LUKS header: header digest failed (using hash %s)."),
+ header->hashSpec);
+ return r;
+ }
+
+ uuid_unparse(partitionUuid, header->uuid);
+
+ log_dbg(ctx, "Data offset %d, UUID %s, digest iterations %" PRIu32,
+ header->payloadOffset, header->uuid, header->mkDigestIterations);
+
+ return 0;
+}
+
+int LUKS_hdr_uuid_set(
+ struct luks_phdr *hdr,
+ const char *uuid,
+ struct crypt_device *ctx)
+{
+ uuid_t partitionUuid;
+
+ if (uuid && uuid_parse(uuid, partitionUuid) == -1) {
+ log_err(ctx, _("Wrong LUKS UUID format provided."));
+ return -EINVAL;
+ }
+ if (!uuid)
+ uuid_generate(partitionUuid);
+
+ uuid_unparse(partitionUuid, hdr->uuid);
+
+ return LUKS_write_phdr(hdr, ctx);
+}
+
+int LUKS_set_key(unsigned int keyIndex,
+ const char *password, size_t passwordLen,
+ struct luks_phdr *hdr, struct volume_key *vk,
+ struct crypt_device *ctx)
+{
+ struct volume_key *derived_key;
+ char *AfKey = NULL;
+ size_t AFEKSize;
+ struct crypt_pbkdf_type *pbkdf;
+ int r;
+
+ if(hdr->keyblock[keyIndex].active != LUKS_KEY_DISABLED) {
+ log_err(ctx, _("Key slot %d active, purge first."), keyIndex);
+ return -EINVAL;
+ }
+
+ /* LUKS keyslot has always at least 4000 stripes according to specification */
+ if(hdr->keyblock[keyIndex].stripes < 4000) {
+ log_err(ctx, _("Key slot %d material includes too few stripes. Header manipulation?"),
+ keyIndex);
+ return -EINVAL;
+ }
+
+ log_dbg(ctx, "Calculating data for key slot %d", keyIndex);
+ pbkdf = crypt_get_pbkdf(ctx);
+ r = crypt_benchmark_pbkdf_internal(ctx, pbkdf, vk->keylength);
+ if (r < 0)
+ return r;
+ assert(pbkdf->iterations);
+
+ /*
+ * Final iteration count is at least LUKS_SLOT_ITERATIONS_MIN
+ */
+ hdr->keyblock[keyIndex].passwordIterations =
+ AT_LEAST(pbkdf->iterations, LUKS_SLOT_ITERATIONS_MIN);
+ log_dbg(ctx, "Key slot %d use %" PRIu32 " password iterations.", keyIndex,
+ hdr->keyblock[keyIndex].passwordIterations);
+
+ derived_key = crypt_alloc_volume_key(hdr->keyBytes, NULL);
+ if (!derived_key)
+ return -ENOMEM;
+
+ r = crypt_random_get(ctx, hdr->keyblock[keyIndex].passwordSalt,
+ LUKS_SALTSIZE, CRYPT_RND_SALT);
+ if (r < 0)
+ goto out;
+
+ r = crypt_pbkdf(CRYPT_KDF_PBKDF2, hdr->hashSpec, password, passwordLen,
+ hdr->keyblock[keyIndex].passwordSalt, LUKS_SALTSIZE,
+ derived_key->key, hdr->keyBytes,
+ hdr->keyblock[keyIndex].passwordIterations, 0, 0);
+ if (r < 0) {
+ if ((crypt_backend_flags() & CRYPT_BACKEND_PBKDF2_INT) &&
+ hdr->keyblock[keyIndex].passwordIterations > INT_MAX)
+ log_err(ctx, _("PBKDF2 iteration value overflow."));
+ goto out;
+ }
+
+ /*
+ * AF splitting, the volume key stored in vk->key is split to AfKey
+ */
+ assert(vk->keylength == hdr->keyBytes);
+ AFEKSize = AF_split_sectors(vk->keylength, hdr->keyblock[keyIndex].stripes) * SECTOR_SIZE;
+ AfKey = crypt_safe_alloc(AFEKSize);
+ if (!AfKey) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ log_dbg(ctx, "Using hash %s for AF in key slot %d, %d stripes",
+ hdr->hashSpec, keyIndex, hdr->keyblock[keyIndex].stripes);
+ r = AF_split(ctx, vk->key, AfKey, vk->keylength, hdr->keyblock[keyIndex].stripes, hdr->hashSpec);
+ if (r < 0)
+ goto out;
+
+ log_dbg(ctx, "Updating key slot %d [0x%04x] area.", keyIndex,
+ hdr->keyblock[keyIndex].keyMaterialOffset << 9);
+ /* Encryption via dm */
+ r = LUKS_encrypt_to_storage(AfKey,
+ AFEKSize,
+ hdr->cipherName, hdr->cipherMode,
+ derived_key,
+ hdr->keyblock[keyIndex].keyMaterialOffset,
+ ctx);
+ if (r < 0)
+ goto out;
+
+ /* Mark the key as active in phdr */
+ r = LUKS_keyslot_set(hdr, (int)keyIndex, 1, ctx);
+ if (r < 0)
+ goto out;
+
+ r = LUKS_write_phdr(hdr, ctx);
+ if (r < 0)
+ goto out;
+
+ r = 0;
+out:
+ crypt_safe_free(AfKey);
+ crypt_free_volume_key(derived_key);
+ return r;
+}
+
+/* Check whether a volume key is invalid. */
+int LUKS_verify_volume_key(const struct luks_phdr *hdr,
+ const struct volume_key *vk)
+{
+ char checkHashBuf[LUKS_DIGESTSIZE];
+
+ if (crypt_pbkdf(CRYPT_KDF_PBKDF2, hdr->hashSpec, vk->key, vk->keylength,
+ hdr->mkDigestSalt, LUKS_SALTSIZE,
+ checkHashBuf, LUKS_DIGESTSIZE,
+ hdr->mkDigestIterations, 0, 0) < 0)
+ return -EINVAL;
+
+ if (crypt_backend_memeq(checkHashBuf, hdr->mkDigest, LUKS_DIGESTSIZE))
+ return -EPERM;
+
+ return 0;
+}
+
+/* Try to open a particular key slot */
+static int LUKS_open_key(unsigned int keyIndex,
+ const char *password,
+ size_t passwordLen,
+ struct luks_phdr *hdr,
+ struct volume_key **vk,
+ struct crypt_device *ctx)
+{
+ crypt_keyslot_info ki = LUKS_keyslot_info(hdr, keyIndex);
+ struct volume_key *derived_key;
+ char *AfKey = NULL;
+ size_t AFEKSize;
+ int r;
+
+ log_dbg(ctx, "Trying to open key slot %d [%s].", keyIndex,
+ dbg_slot_state(ki));
+
+ if (ki < CRYPT_SLOT_ACTIVE)
+ return -ENOENT;
+
+ derived_key = crypt_alloc_volume_key(hdr->keyBytes, NULL);
+ if (!derived_key)
+ return -ENOMEM;
+
+ *vk = crypt_alloc_volume_key(hdr->keyBytes, NULL);
+ if (!*vk) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ AFEKSize = AF_split_sectors(hdr->keyBytes, hdr->keyblock[keyIndex].stripes) * SECTOR_SIZE;
+ AfKey = crypt_safe_alloc(AFEKSize);
+ if (!AfKey) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ r = crypt_pbkdf(CRYPT_KDF_PBKDF2, hdr->hashSpec, password, passwordLen,
+ hdr->keyblock[keyIndex].passwordSalt, LUKS_SALTSIZE,
+ derived_key->key, hdr->keyBytes,
+ hdr->keyblock[keyIndex].passwordIterations, 0, 0);
+ if (r < 0) {
+ log_err(ctx, _("Cannot open keyslot (using hash %s)."), hdr->hashSpec);
+ goto out;
+ }
+
+ log_dbg(ctx, "Reading key slot %d area.", keyIndex);
+ r = LUKS_decrypt_from_storage(AfKey,
+ AFEKSize,
+ hdr->cipherName, hdr->cipherMode,
+ derived_key,
+ hdr->keyblock[keyIndex].keyMaterialOffset,
+ ctx);
+ if (r < 0)
+ goto out;
+
+ r = AF_merge(AfKey, (*vk)->key, (*vk)->keylength, hdr->keyblock[keyIndex].stripes, hdr->hashSpec);
+ if (r < 0)
+ goto out;
+
+ r = LUKS_verify_volume_key(hdr, *vk);
+
+ /* Allow only empty passphrase with null cipher */
+ if (!r && crypt_is_cipher_null(hdr->cipherName) && passwordLen)
+ r = -EPERM;
+out:
+ if (r < 0) {
+ crypt_free_volume_key(*vk);
+ *vk = NULL;
+ }
+ crypt_safe_free(AfKey);
+ crypt_free_volume_key(derived_key);
+ return r;
+}
+
+int LUKS_open_key_with_hdr(int keyIndex,
+ const char *password,
+ size_t passwordLen,
+ struct luks_phdr *hdr,
+ struct volume_key **vk,
+ struct crypt_device *ctx)
+{
+ unsigned int i, tried = 0;
+ int r;
+
+ if (keyIndex >= 0) {
+ r = LUKS_open_key(keyIndex, password, passwordLen, hdr, vk, ctx);
+ return (r < 0) ? r : keyIndex;
+ }
+
+ for (i = 0; i < LUKS_NUMKEYS; i++) {
+ r = LUKS_open_key(i, password, passwordLen, hdr, vk, ctx);
+ if (r == 0)
+ return i;
+
+ /* Do not retry for errors that are no -EPERM or -ENOENT,
+ former meaning password wrong, latter key slot inactive */
+ if ((r != -EPERM) && (r != -ENOENT))
+ return r;
+ if (r == -EPERM)
+ tried++;
+ }
+ /* Warning, early returns above */
+ return tried ? -EPERM : -ENOENT;
+}
+
+int LUKS_del_key(unsigned int keyIndex,
+ struct luks_phdr *hdr,
+ struct crypt_device *ctx)
+{
+ struct device *device = crypt_metadata_device(ctx);
+ unsigned int startOffset, endOffset;
+ int r;
+
+ r = LUKS_read_phdr(hdr, 1, 0, ctx);
+ if (r)
+ return r;
+
+ r = LUKS_keyslot_set(hdr, keyIndex, 0, ctx);
+ if (r) {
+ log_err(ctx, _("Key slot %d is invalid, please select keyslot between 0 and %d."),
+ keyIndex, LUKS_NUMKEYS - 1);
+ return r;
+ }
+
+ /* secure deletion of key material */
+ startOffset = hdr->keyblock[keyIndex].keyMaterialOffset;
+ endOffset = startOffset + AF_split_sectors(hdr->keyBytes, hdr->keyblock[keyIndex].stripes);
+
+ r = crypt_wipe_device(ctx, device, CRYPT_WIPE_SPECIAL, startOffset * SECTOR_SIZE,
+ (endOffset - startOffset) * SECTOR_SIZE,
+ (endOffset - startOffset) * SECTOR_SIZE, NULL, NULL);
+ if (r) {
+ if (r == -EACCES) {
+ log_err(ctx, _("Cannot write to device %s, permission denied."),
+ device_path(device));
+ r = -EINVAL;
+ } else
+ log_err(ctx, _("Cannot wipe device %s."),
+ device_path(device));
+ return r;
+ }
+
+ /* Wipe keyslot info */
+ memset(&hdr->keyblock[keyIndex].passwordSalt, 0, LUKS_SALTSIZE);
+ hdr->keyblock[keyIndex].passwordIterations = 0;
+
+ r = LUKS_write_phdr(hdr, ctx);
+
+ return r;
+}
+
+crypt_keyslot_info LUKS_keyslot_info(struct luks_phdr *hdr, int keyslot)
+{
+ int i;
+
+ if(keyslot >= LUKS_NUMKEYS || keyslot < 0)
+ return CRYPT_SLOT_INVALID;
+
+ if (hdr->keyblock[keyslot].active == LUKS_KEY_DISABLED)
+ return CRYPT_SLOT_INACTIVE;
+
+ if (hdr->keyblock[keyslot].active != LUKS_KEY_ENABLED)
+ return CRYPT_SLOT_INVALID;
+
+ for(i = 0; i < LUKS_NUMKEYS; i++)
+ if(i != keyslot && hdr->keyblock[i].active == LUKS_KEY_ENABLED)
+ return CRYPT_SLOT_ACTIVE;
+
+ return CRYPT_SLOT_ACTIVE_LAST;
+}
+
+int LUKS_keyslot_find_empty(struct luks_phdr *hdr)
+{
+ int i;
+
+ for (i = 0; i < LUKS_NUMKEYS; i++)
+ if(hdr->keyblock[i].active == LUKS_KEY_DISABLED)
+ break;
+
+ if (i == LUKS_NUMKEYS)
+ return -EINVAL;
+
+ return i;
+}
+
+int LUKS_keyslot_active_count(struct luks_phdr *hdr)
+{
+ int i, num = 0;
+
+ for (i = 0; i < LUKS_NUMKEYS; i++)
+ if(hdr->keyblock[i].active == LUKS_KEY_ENABLED)
+ num++;
+
+ return num;
+}
+
+int LUKS_keyslot_set(struct luks_phdr *hdr, int keyslot, int enable, struct crypt_device *ctx)
+{
+ crypt_keyslot_info ki = LUKS_keyslot_info(hdr, keyslot);
+
+ if (ki == CRYPT_SLOT_INVALID)
+ return -EINVAL;
+
+ hdr->keyblock[keyslot].active = enable ? LUKS_KEY_ENABLED : LUKS_KEY_DISABLED;
+ log_dbg(ctx, "Key slot %d was %s in LUKS header.", keyslot, enable ? "enabled" : "disabled");
+ return 0;
+}
+
+int LUKS1_activate(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vk,
+ uint32_t flags)
+{
+ int r;
+ struct crypt_dm_active_device dmd = {
+ .flags = flags,
+ .uuid = crypt_get_uuid(cd),
+ };
+
+ r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
+ vk, crypt_get_cipher_spec(cd), crypt_get_iv_offset(cd),
+ crypt_get_data_offset(cd), crypt_get_integrity(cd),
+ crypt_get_integrity_tag_size(cd), crypt_get_sector_size(cd));
+ if (!r)
+ r = create_or_reload_device(cd, name, CRYPT_LUKS1, &dmd);
+
+ dm_targets_free(cd, &dmd);
+
+ return r;
+}
+
+int LUKS_wipe_header_areas(struct luks_phdr *hdr,
+ struct crypt_device *ctx)
+{
+ int i, r;
+ uint64_t offset, length;
+ size_t wipe_block;
+
+ r = LUKS_check_device_size(ctx, hdr, 1);
+ if (r)
+ return r;
+
+ /* Wipe complete header, keyslots and padding areas with zeroes. */
+ offset = 0;
+ length = (uint64_t)hdr->payloadOffset * SECTOR_SIZE;
+ wipe_block = 1024 * 1024;
+
+ /* On detached header or bogus header, wipe at least the first 4k */
+ if (length == 0 || length > (LUKS_MAX_KEYSLOT_SIZE * LUKS_NUMKEYS)) {
+ length = 4096;
+ wipe_block = 4096;
+ }
+
+ log_dbg(ctx, "Wiping LUKS areas (0x%06" PRIx64 " - 0x%06" PRIx64") with zeroes.",
+ offset, length + offset);
+
+ r = crypt_wipe_device(ctx, crypt_metadata_device(ctx), CRYPT_WIPE_ZERO,
+ offset, length, wipe_block, NULL, NULL);
+ if (r < 0)
+ return r;
+
+ /* Wipe keyslots areas */
+ wipe_block = 1024 * 1024;
+ for (i = 0; i < LUKS_NUMKEYS; i++) {
+ r = LUKS_keyslot_area(hdr, i, &offset, &length);
+ if (r < 0)
+ return r;
+
+ /* Ignore too big LUKS1 keyslots here */
+ if (length > LUKS_MAX_KEYSLOT_SIZE ||
+ offset > (LUKS_MAX_KEYSLOT_SIZE - length))
+ continue;
+
+ if (length == 0 || offset < 4096)
+ return -EINVAL;
+
+ log_dbg(ctx, "Wiping keyslot %i area (0x%06" PRIx64 " - 0x%06" PRIx64") with random data.",
+ i, offset, length + offset);
+
+ r = crypt_wipe_device(ctx, crypt_metadata_device(ctx), CRYPT_WIPE_RANDOM,
+ offset, length, wipe_block, NULL, NULL);
+ if (r < 0)
+ return r;
+ }
+
+ return r;
+}
+
+int LUKS_keyslot_pbkdf(struct luks_phdr *hdr, int keyslot, struct crypt_pbkdf_type *pbkdf)
+{
+ if (LUKS_keyslot_info(hdr, keyslot) < CRYPT_SLOT_ACTIVE)
+ return -EINVAL;
+
+ pbkdf->type = CRYPT_KDF_PBKDF2;
+ pbkdf->hash = hdr->hashSpec;
+ pbkdf->iterations = hdr->keyblock[keyslot].passwordIterations;
+ pbkdf->max_memory_kb = 0;
+ pbkdf->parallel_threads = 0;
+ pbkdf->time_ms = 0;
+ pbkdf->flags = 0;
+ return 0;
+}
diff --git a/lib/luks1/luks.h b/lib/luks1/luks.h
new file mode 100644
index 0000000..9c3f386
--- /dev/null
+++ b/lib/luks1/luks.h
@@ -0,0 +1,194 @@
+/*
+ * LUKS - Linux Unified Key Setup
+ *
+ * Copyright (C) 2004-2006 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef INCLUDED_CRYPTSETUP_LUKS_LUKS_H
+#define INCLUDED_CRYPTSETUP_LUKS_LUKS_H
+
+/*
+ * LUKS partition header
+ */
+
+#include "libcryptsetup.h"
+
+#define LUKS_CIPHERNAME_L 32
+#define LUKS_CIPHERMODE_L 32
+#define LUKS_HASHSPEC_L 32
+#define LUKS_DIGESTSIZE 20 // since SHA1
+#define LUKS_HMACSIZE 32
+#define LUKS_SALTSIZE 32
+#define LUKS_NUMKEYS 8
+
+// Minimal number of iterations
+#define LUKS_MKD_ITERATIONS_MIN 1000
+#define LUKS_SLOT_ITERATIONS_MIN 1000
+
+// Iteration time for digest in ms
+#define LUKS_MKD_ITERATIONS_MS 125
+
+#define LUKS_KEY_DISABLED_OLD 0
+#define LUKS_KEY_ENABLED_OLD 0xCAFE
+
+#define LUKS_KEY_DISABLED 0x0000DEAD
+#define LUKS_KEY_ENABLED 0x00AC71F3
+
+#define LUKS_STRIPES 4000
+
+// partition header starts with magic
+#define LUKS_MAGIC {'L','U','K','S', 0xba, 0xbe};
+#define LUKS_MAGIC_L 6
+
+/* Actually we need only 37, but we don't want struct autoaligning to kick in */
+#define UUID_STRING_L 40
+
+/* Offset to keyslot area [in bytes] */
+#define LUKS_ALIGN_KEYSLOTS 4096
+
+/* Maximal LUKS header size, for wipe [in bytes] */
+#define LUKS_MAX_KEYSLOT_SIZE 0x1000000 /* 16 MB, up to 32768 bits key */
+
+/* Any integer values are stored in network byte order on disk and must be
+converted */
+
+struct volume_key;
+struct device_backend;
+
+struct luks_phdr {
+ char magic[LUKS_MAGIC_L];
+ uint16_t version;
+ char cipherName[LUKS_CIPHERNAME_L];
+ char cipherMode[LUKS_CIPHERMODE_L];
+ char hashSpec[LUKS_HASHSPEC_L];
+ uint32_t payloadOffset;
+ uint32_t keyBytes;
+ char mkDigest[LUKS_DIGESTSIZE];
+ char mkDigestSalt[LUKS_SALTSIZE];
+ uint32_t mkDigestIterations;
+ char uuid[UUID_STRING_L];
+
+ struct {
+ uint32_t active;
+
+ /* parameters used for password processing */
+ uint32_t passwordIterations;
+ char passwordSalt[LUKS_SALTSIZE];
+
+ /* parameters used for AF store/load */
+ uint32_t keyMaterialOffset;
+ uint32_t stripes;
+ } keyblock[LUKS_NUMKEYS];
+
+ /* Align it to 512 sector size */
+ char _padding[432];
+};
+
+int LUKS_verify_volume_key(const struct luks_phdr *hdr,
+ const struct volume_key *vk);
+
+int LUKS_check_cipher(struct crypt_device *ctx,
+ size_t keylength,
+ const char *cipher,
+ const char *cipher_mode);
+
+int LUKS_generate_phdr(struct luks_phdr *header,
+ const struct volume_key *vk,
+ const char *cipherName,
+ const char *cipherMode,
+ const char *hashSpec,
+ const char *uuid,
+ uint64_t data_offset,
+ uint64_t align_offset,
+ uint64_t required_alignment,
+ struct crypt_device *ctx);
+
+int LUKS_read_phdr(
+ struct luks_phdr *hdr,
+ int require_luks_device,
+ int repair,
+ struct crypt_device *ctx);
+
+int LUKS_read_phdr_backup(
+ const char *backup_file,
+ struct luks_phdr *hdr,
+ int require_luks_device,
+ struct crypt_device *ctx);
+
+int LUKS_hdr_uuid_set(
+ struct luks_phdr *hdr,
+ const char *uuid,
+ struct crypt_device *ctx);
+
+int LUKS_hdr_backup(
+ const char *backup_file,
+ struct crypt_device *ctx);
+
+int LUKS_hdr_restore(
+ const char *backup_file,
+ struct luks_phdr *hdr,
+ struct crypt_device *ctx);
+
+int LUKS_write_phdr(
+ struct luks_phdr *hdr,
+ struct crypt_device *ctx);
+
+int LUKS_set_key(
+ unsigned int keyIndex,
+ const char *password,
+ size_t passwordLen,
+ struct luks_phdr *hdr,
+ struct volume_key *vk,
+ struct crypt_device *ctx);
+
+int LUKS_open_key_with_hdr(
+ int keyIndex,
+ const char *password,
+ size_t passwordLen,
+ struct luks_phdr *hdr,
+ struct volume_key **vk,
+ struct crypt_device *ctx);
+
+int LUKS_del_key(
+ unsigned int keyIndex,
+ struct luks_phdr *hdr,
+ struct crypt_device *ctx);
+
+int LUKS_wipe_header_areas(struct luks_phdr *hdr,
+ struct crypt_device *ctx);
+
+crypt_keyslot_info LUKS_keyslot_info(struct luks_phdr *hdr, int keyslot);
+int LUKS_keyslot_find_empty(struct luks_phdr *hdr);
+int LUKS_keyslot_active_count(struct luks_phdr *hdr);
+int LUKS_keyslot_set(struct luks_phdr *hdr, int keyslot, int enable,
+ struct crypt_device *ctx);
+int LUKS_keyslot_area(const struct luks_phdr *hdr,
+ int keyslot,
+ uint64_t *offset,
+ uint64_t *length);
+size_t LUKS_device_sectors(const struct luks_phdr *hdr);
+size_t LUKS_keyslots_offset(const struct luks_phdr *hdr);
+int LUKS_keyslot_pbkdf(struct luks_phdr *hdr, int keyslot,
+ struct crypt_pbkdf_type *pbkdf);
+
+int LUKS1_activate(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vk,
+ uint32_t flags);
+
+#endif
diff --git a/lib/luks2/luks2.h b/lib/luks2/luks2.h
new file mode 100644
index 0000000..dfccf02
--- /dev/null
+++ b/lib/luks2/luks2.h
@@ -0,0 +1,497 @@
+/*
+ * LUKS - Linux Unified Key Setup v2
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTSETUP_LUKS2_ONDISK_H
+#define _CRYPTSETUP_LUKS2_ONDISK_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include "libcryptsetup.h"
+
+#define LUKS2_MAGIC_1ST "LUKS\xba\xbe"
+#define LUKS2_MAGIC_2ND "SKUL\xba\xbe"
+#define LUKS2_MAGIC_L 6
+#define LUKS2_UUID_L 40
+#define LUKS2_LABEL_L 48
+#define LUKS2_SALT_L 64
+#define LUKS2_CHECKSUM_ALG_L 32
+#define LUKS2_CHECKSUM_L 64
+
+#define LUKS2_KEYSLOTS_MAX 32
+#define LUKS2_TOKENS_MAX 32
+#define LUKS2_SEGMENT_MAX 32
+
+#define LUKS2_BUILTIN_TOKEN_PREFIX "luks2-"
+#define LUKS2_BUILTIN_TOKEN_PREFIX_LEN 6
+
+#define LUKS2_TOKEN_NAME_MAX 64
+
+#define LUKS2_TOKEN_KEYRING LUKS2_BUILTIN_TOKEN_PREFIX "keyring"
+
+#define LUKS2_DIGEST_MAX 8
+
+#define CRYPT_ANY_SEGMENT -1
+#define CRYPT_DEFAULT_SEGMENT -2
+#define CRYPT_ONE_SEGMENT -3
+
+#define CRYPT_ANY_DIGEST -1
+
+/* 20 MiBs */
+#define LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH 0x1400000
+
+/* 1 GiB */
+#define LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH 0x40000000
+
+/* supported reencryption requirement versions */
+#define LUKS2_REENCRYPT_REQ_VERSION UINT8_C(2)
+#define LUKS2_DECRYPT_DATASHIFT_REQ_VERSION UINT8_C(3)
+
+/* see reencrypt_assembly_verification_data() in luks2_reencrypt_digest.c */
+/* LUKS2_REENCRYPT_MAX_VERSION UINT8_C(207) */
+
+struct device;
+struct luks2_reencrypt;
+struct reenc_protection;
+struct crypt_lock_handle;
+struct crypt_dm_active_device;
+struct luks_phdr; /* LUKS1 for conversion */
+
+/*
+ * LUKS2 header on-disk.
+ *
+ * Binary header is followed by JSON area.
+ * JSON area is followed by keyslot area and data area,
+ * these are described in JSON metadata.
+ *
+ * Note: uuid, csum_alg are intentionally on the same offset as LUKS1
+ * (checksum alg replaces hash in LUKS1)
+ *
+ * String (char) should be zero terminated.
+ * Padding should be wiped.
+ * Checksum is calculated with csum zeroed (+ full JSON area).
+ */
+struct luks2_hdr_disk {
+ char magic[LUKS2_MAGIC_L];
+ uint16_t version; /* Version 2 */
+ uint64_t hdr_size; /* in bytes, including JSON area */
+ uint64_t seqid; /* increased on every update */
+ char label[LUKS2_LABEL_L];
+ char checksum_alg[LUKS2_CHECKSUM_ALG_L];
+ uint8_t salt[LUKS2_SALT_L]; /* unique for every header/offset */
+ char uuid[LUKS2_UUID_L];
+ char subsystem[LUKS2_LABEL_L]; /* owner subsystem label */
+ uint64_t hdr_offset; /* offset from device start in bytes */
+ char _padding[184];
+ uint8_t csum[LUKS2_CHECKSUM_L];
+ char _padding4096[7*512];
+ /* JSON area starts here */
+} __attribute__ ((packed));
+
+/*
+ * LUKS2 header in-memory.
+ */
+struct luks2_hdr {
+ size_t hdr_size;
+ uint64_t seqid;
+ unsigned int version;
+ char label[LUKS2_LABEL_L];
+ char subsystem[LUKS2_LABEL_L];
+ char checksum_alg[LUKS2_CHECKSUM_ALG_L];
+ uint8_t salt1[LUKS2_SALT_L];
+ uint8_t salt2[LUKS2_SALT_L];
+ char uuid[LUKS2_UUID_L];
+ void *jobj;
+ void *jobj_rollback;
+};
+
+struct luks2_keyslot_params {
+ enum { LUKS2_KEYSLOT_AF_LUKS1 = 0 } af_type;
+ enum { LUKS2_KEYSLOT_AREA_RAW = 0 } area_type;
+
+ union {
+ struct {
+ char hash[LUKS2_CHECKSUM_ALG_L]; // or include luks.h
+ unsigned int stripes;
+ } luks1;
+ } af;
+
+ union {
+ struct {
+ char encryption[65]; // or include utils_crypt.h
+ size_t key_size;
+ } raw;
+ } area;
+};
+
+/*
+ * Supportable header sizes (hdr_disk + JSON area)
+ * Also used as offset for the 2nd header.
+ */
+#define LUKS2_HDR_16K_LEN 0x4000
+
+#define LUKS2_HDR_BIN_LEN sizeof(struct luks2_hdr_disk)
+
+//#define LUKS2_DEFAULT_HDR_SIZE 0x400000 /* 4 MiB */
+#define LUKS2_DEFAULT_HDR_SIZE 0x1000000 /* 16 MiB */
+
+#define LUKS2_MAX_KEYSLOTS_SIZE 0x8000000 /* 128 MiB */
+
+#define LUKS2_HDR_OFFSET_MAX 0x400000 /* 4 MiB */
+
+/* Offsets for secondary header (for scan if primary header is corrupted). */
+#define LUKS2_HDR2_OFFSETS { 0x04000, 0x008000, 0x010000, 0x020000, \
+ 0x40000, 0x080000, 0x100000, 0x200000, LUKS2_HDR_OFFSET_MAX }
+
+int LUKS2_hdr_version_unlocked(struct crypt_device *cd,
+ const char *backup_file);
+
+int LUKS2_hdr_read(struct crypt_device *cd, struct luks2_hdr *hdr, int repair);
+int LUKS2_hdr_write(struct crypt_device *cd, struct luks2_hdr *hdr);
+int LUKS2_hdr_write_force(struct crypt_device *cd, struct luks2_hdr *hdr);
+int LUKS2_hdr_rollback(struct crypt_device *cd, struct luks2_hdr *hdr);
+int LUKS2_hdr_dump(struct crypt_device *cd, struct luks2_hdr *hdr);
+int LUKS2_hdr_dump_json(struct crypt_device *cd, struct luks2_hdr *hdr, const char **json);
+
+int LUKS2_hdr_uuid(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const char *uuid);
+
+int LUKS2_hdr_labels(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const char *label,
+ const char *subsystem,
+ int commit);
+
+void LUKS2_hdr_free(struct crypt_device *cd, struct luks2_hdr *hdr);
+
+int LUKS2_hdr_backup(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const char *backup_file);
+int LUKS2_hdr_restore(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const char *backup_file);
+
+uint64_t LUKS2_hdr_and_areas_size(struct luks2_hdr *hdr);
+uint64_t LUKS2_keyslots_size(struct luks2_hdr *hdr);
+uint64_t LUKS2_metadata_size(struct luks2_hdr *hdr);
+
+int LUKS2_keyslot_cipher_incompatible(struct crypt_device *cd, const char *cipher_spec);
+
+/*
+ * Generic LUKS2 keyslot
+ */
+int LUKS2_keyslot_open(struct crypt_device *cd,
+ int keyslot,
+ int segment,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vk);
+
+int LUKS2_keyslot_open_all_segments(struct crypt_device *cd,
+ int keyslot_old,
+ int keyslot_new,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vks);
+
+int LUKS2_keyslot_store(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const char *password,
+ size_t password_len,
+ const struct volume_key *vk,
+ const struct luks2_keyslot_params *params);
+
+int LUKS2_keyslot_wipe(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int wipe_area_only);
+
+crypt_keyslot_priority LUKS2_keyslot_priority_get(struct luks2_hdr *hdr, int keyslot);
+
+int LUKS2_keyslot_priority_set(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ crypt_keyslot_priority priority,
+ int commit);
+
+int LUKS2_keyslot_swap(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int keyslot2);
+
+/*
+ * Generic LUKS2 token
+ */
+int LUKS2_token_json_get(struct luks2_hdr *hdr,
+ int token,
+ const char **json);
+
+int LUKS2_token_assign(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int token,
+ int assign,
+ int commit);
+
+int LUKS2_token_is_assigned(struct luks2_hdr *hdr,
+ int keyslot,
+ int token);
+
+int LUKS2_token_assignment_copy(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot_from,
+ int keyslot_to,
+ int commit);
+
+int LUKS2_token_create(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *json,
+ int commit);
+
+crypt_token_info LUKS2_token_status(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char **type);
+
+int LUKS2_token_open_and_activate(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *name,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ uint32_t flags,
+ void *usrptr);
+
+int LUKS2_token_unlock_key(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ int segment,
+ void *usrptr,
+ struct volume_key **vk);
+
+int LUKS2_token_keyring_get(struct luks2_hdr *hdr,
+ int token,
+ struct crypt_token_params_luks2_keyring *keyring_params);
+
+int LUKS2_token_keyring_json(char *buffer, size_t buffer_size,
+ const struct crypt_token_params_luks2_keyring *keyring_params);
+
+int LUKS2_token_unlock_passphrase(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ void *usrptr,
+ char **passphrase,
+ size_t *passphrase_size);
+
+void crypt_token_unload_external_all(struct crypt_device *cd);
+
+/*
+ * Generic LUKS2 digest
+ */
+int LUKS2_digest_any_matching(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk);
+
+int LUKS2_digest_verify_by_segment(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int segment,
+ const struct volume_key *vk);
+
+int LUKS2_digest_verify(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk,
+ int keyslot);
+
+int LUKS2_digest_assign(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int digest,
+ int assign,
+ int commit);
+
+int LUKS2_digest_segment_assign(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int segment,
+ int digest,
+ int assign,
+ int commit);
+
+int LUKS2_digest_by_keyslot(struct luks2_hdr *hdr, int keyslot);
+
+int LUKS2_digest_by_segment(struct luks2_hdr *hdr, int segment);
+
+int LUKS2_digest_create(struct crypt_device *cd,
+ const char *type,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk);
+
+/*
+ * LUKS2 generic
+ */
+int LUKS2_activate(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vk,
+ uint32_t flags);
+
+int LUKS2_activate_multi(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vks,
+ uint64_t device_size,
+ uint32_t flags);
+
+int LUKS2_deactivate(struct crypt_device *cd,
+ const char *name,
+ struct luks2_hdr *hdr,
+ struct crypt_dm_active_device *dmd,
+ uint32_t flags);
+
+int LUKS2_generate_hdr(
+ struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk,
+ const char *cipherName,
+ const char *cipherMode,
+ const char *integrity,
+ const char *uuid,
+ unsigned int sector_size,
+ uint64_t data_offset,
+ uint64_t align_offset,
+ uint64_t required_alignment,
+ uint64_t metadata_size,
+ uint64_t keyslots_size);
+
+int LUKS2_check_metadata_area_size(uint64_t metadata_size);
+int LUKS2_check_keyslots_area_size(uint64_t keyslots_size);
+
+int LUKS2_wipe_header_areas(struct crypt_device *cd,
+ struct luks2_hdr *hdr, bool detached_header);
+
+uint64_t LUKS2_get_data_offset(struct luks2_hdr *hdr);
+int LUKS2_get_data_size(struct luks2_hdr *hdr, uint64_t *size, bool *dynamic);
+uint32_t LUKS2_get_sector_size(struct luks2_hdr *hdr);
+const char *LUKS2_get_cipher(struct luks2_hdr *hdr, int segment);
+const char *LUKS2_get_integrity(struct luks2_hdr *hdr, int segment);
+int LUKS2_keyslot_params_default(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct luks2_keyslot_params *params);
+int LUKS2_get_volume_key_size(struct luks2_hdr *hdr, int segment);
+int LUKS2_get_keyslot_stored_key_size(struct luks2_hdr *hdr, int keyslot);
+const char *LUKS2_get_keyslot_cipher(struct luks2_hdr *hdr, int keyslot, size_t *key_size);
+int LUKS2_keyslot_find_empty(struct crypt_device *cd, struct luks2_hdr *hdr, size_t keylength);
+int LUKS2_keyslot_active_count(struct luks2_hdr *hdr, int segment);
+crypt_keyslot_info LUKS2_keyslot_info(struct luks2_hdr *hdr, int keyslot);
+int LUKS2_keyslot_area(struct luks2_hdr *hdr,
+ int keyslot,
+ uint64_t *offset,
+ uint64_t *length);
+int LUKS2_keyslot_pbkdf(struct luks2_hdr *hdr, int keyslot, struct crypt_pbkdf_type *pbkdf);
+
+/*
+ * Permanent activation flags stored in header
+ */
+int LUKS2_config_get_flags(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t *flags);
+int LUKS2_config_set_flags(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t flags);
+
+/*
+ * Requirements for device activation or header modification
+ */
+int LUKS2_config_get_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t *reqs);
+int LUKS2_config_set_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t reqs, bool commit);
+int LUKS2_config_set_requirement_version(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t req_id, uint8_t req_version, bool commit);
+
+int LUKS2_config_get_reencrypt_version(struct luks2_hdr *hdr, uint8_t *version);
+
+bool LUKS2_reencrypt_requirement_candidate(struct luks2_hdr *hdr);
+
+int LUKS2_unmet_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t reqs_mask, int quiet);
+
+int LUKS2_key_description_by_segment(struct crypt_device *cd,
+ struct luks2_hdr *hdr, struct volume_key *vk, int segment);
+int LUKS2_volume_key_load_in_keyring_by_keyslot(struct crypt_device *cd,
+ struct luks2_hdr *hdr, struct volume_key *vk, int keyslot);
+int LUKS2_volume_key_load_in_keyring_by_digest(struct crypt_device *cd,
+ struct volume_key *vk, int digest);
+
+int LUKS2_luks1_to_luks2(struct crypt_device *cd,
+ struct luks_phdr *hdr1,
+ struct luks2_hdr *hdr2);
+int LUKS2_luks2_to_luks1(struct crypt_device *cd,
+ struct luks2_hdr *hdr2,
+ struct luks_phdr *hdr1);
+
+/*
+ * LUKS2 reencryption
+ */
+int LUKS2_reencrypt_locked_recovery_by_passphrase(struct crypt_device *cd,
+ int keyslot_old,
+ int keyslot_new,
+ const char *passphrase,
+ size_t passphrase_size,
+ struct volume_key **vks);
+
+void LUKS2_reencrypt_free(struct crypt_device *cd,
+ struct luks2_reencrypt *rh);
+
+crypt_reencrypt_info LUKS2_reencrypt_status(struct luks2_hdr *hdr);
+
+crypt_reencrypt_info LUKS2_reencrypt_get_params(struct luks2_hdr *hdr,
+ struct crypt_params_reencrypt *params);
+
+int LUKS2_reencrypt_lock(struct crypt_device *cd,
+ struct crypt_lock_handle **reencrypt_lock);
+
+int LUKS2_reencrypt_lock_by_dm_uuid(struct crypt_device *cd,
+ const char *dm_uuid,
+ struct crypt_lock_handle **reencrypt_lock);
+
+void LUKS2_reencrypt_unlock(struct crypt_device *cd,
+ struct crypt_lock_handle *reencrypt_lock);
+
+int LUKS2_reencrypt_check_device_size(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint64_t check_size,
+ uint64_t *dev_size,
+ bool activation,
+ bool dynamic);
+
+int LUKS2_reencrypt_digest_verify(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct volume_key *vks);
+
+int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct reenc_protection *rp,
+ int reencrypt_keyslot,
+ uint64_t *r_length);
+
+void LUKS2_reencrypt_protection_erase(struct reenc_protection *rp);
+
+#endif
diff --git a/lib/luks2/luks2_digest.c b/lib/luks2/luks2_digest.c
new file mode 100644
index 0000000..933b059
--- /dev/null
+++ b/lib/luks2/luks2_digest.c
@@ -0,0 +1,455 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, digest handling
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+extern const digest_handler PBKDF2_digest;
+
+static const digest_handler *digest_handlers[LUKS2_DIGEST_MAX] = {
+ &PBKDF2_digest,
+ NULL
+};
+
+static const digest_handler *LUKS2_digest_handler_type(const char *type)
+{
+ int i;
+
+ for (i = 0; i < LUKS2_DIGEST_MAX && digest_handlers[i]; i++) {
+ if (!strcmp(digest_handlers[i]->name, type))
+ return digest_handlers[i];
+ }
+
+ return NULL;
+}
+
+static const digest_handler *LUKS2_digest_handler(struct crypt_device *cd, int digest)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj1, *jobj2;
+
+ if (digest < 0)
+ return NULL;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return NULL;
+
+ if (!(jobj1 = LUKS2_get_digest_jobj(hdr, digest)))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj1, "type", &jobj2))
+ return NULL;
+
+ return LUKS2_digest_handler_type(json_object_get_string(jobj2));
+}
+
+static int LUKS2_digest_find_free(struct luks2_hdr *hdr)
+{
+ int digest = 0;
+
+ while (LUKS2_get_digest_jobj(hdr, digest) && digest < LUKS2_DIGEST_MAX)
+ digest++;
+
+ return digest < LUKS2_DIGEST_MAX ? digest : -1;
+}
+
+int LUKS2_digest_create(struct crypt_device *cd,
+ const char *type,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk)
+{
+ int digest;
+ const digest_handler *dh;
+
+ dh = LUKS2_digest_handler_type(type);
+ if (!dh)
+ return -EINVAL;
+
+ digest = LUKS2_digest_find_free(hdr);
+ if (digest < 0)
+ return -EINVAL;
+
+ log_dbg(cd, "Creating new digest %d (%s).", digest, type);
+
+ return dh->store(cd, digest, vk->key, vk->keylength) ?: digest;
+}
+
+int LUKS2_digest_by_keyslot(struct luks2_hdr *hdr, int keyslot)
+{
+ char keyslot_name[16];
+ json_object *jobj_digests, *jobj_digest_keyslots;
+
+ if (snprintf(keyslot_name, sizeof(keyslot_name), "%u", keyslot) < 1)
+ return -ENOMEM;
+
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ json_object_object_get_ex(val, "keyslots", &jobj_digest_keyslots);
+ if (LUKS2_array_jobj(jobj_digest_keyslots, keyslot_name))
+ return atoi(key);
+ }
+
+ return -ENOENT;
+}
+
+int LUKS2_digest_verify_by_digest(struct crypt_device *cd,
+ int digest,
+ const struct volume_key *vk)
+{
+ const digest_handler *h;
+ int r;
+
+ h = LUKS2_digest_handler(cd, digest);
+ if (!h)
+ return -EINVAL;
+
+ r = h->verify(cd, digest, vk->key, vk->keylength);
+ if (r < 0) {
+ log_dbg(cd, "Digest %d (%s) verify failed with %d.", digest, h->name, r);
+ return r;
+ }
+
+ return digest;
+}
+
+int LUKS2_digest_verify(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk,
+ int keyslot)
+{
+ int digest;
+
+ digest = LUKS2_digest_by_keyslot(hdr, keyslot);
+ if (digest < 0)
+ return digest;
+
+ log_dbg(cd, "Verifying key from keyslot %d, digest %d.", keyslot, digest);
+
+ return LUKS2_digest_verify_by_digest(cd, digest, vk);
+}
+
+int LUKS2_digest_dump(struct crypt_device *cd, int digest)
+{
+ const digest_handler *h;
+
+ if (!(h = LUKS2_digest_handler(cd, digest)))
+ return -EINVAL;
+
+ return h->dump(cd, digest);
+}
+
+int LUKS2_digest_any_matching(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk)
+{
+ int digest;
+
+ for (digest = 0; digest < LUKS2_DIGEST_MAX; digest++)
+ if (LUKS2_digest_verify_by_digest(cd, digest, vk) == digest)
+ return digest;
+
+ return -ENOENT;
+}
+
+int LUKS2_digest_verify_by_segment(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int segment,
+ const struct volume_key *vk)
+{
+ return LUKS2_digest_verify_by_digest(cd, LUKS2_digest_by_segment(hdr, segment), vk);
+}
+
+/* FIXME: segment can have more digests */
+int LUKS2_digest_by_segment(struct luks2_hdr *hdr, int segment)
+{
+ char segment_name[16];
+ json_object *jobj_digests, *jobj_digest_segments;
+
+ if (segment == CRYPT_DEFAULT_SEGMENT)
+ segment = LUKS2_get_default_segment(hdr);
+
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+
+ if (snprintf(segment_name, sizeof(segment_name), "%u", segment) < 1)
+ return -EINVAL;
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ json_object_object_get_ex(val, "segments", &jobj_digest_segments);
+ if (!LUKS2_array_jobj(jobj_digest_segments, segment_name))
+ continue;
+
+ return atoi(key);
+ }
+
+ return -ENOENT;
+}
+
+static int assign_one_digest(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, int digest, int assign)
+{
+ json_object *jobj1, *jobj_digest, *jobj_digest_keyslots;
+ char num[16];
+
+ log_dbg(cd, "Keyslot %i %s digest %i.", keyslot, assign ? "assigned to" : "unassigned from", digest);
+
+ jobj_digest = LUKS2_get_digest_jobj(hdr, digest);
+ if (!jobj_digest)
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_digest, "keyslots", &jobj_digest_keyslots);
+ if (!jobj_digest_keyslots)
+ return -EINVAL;
+
+ if (snprintf(num, sizeof(num), "%d", keyslot) < 0)
+ return -EINVAL;
+
+ if (assign) {
+ jobj1 = LUKS2_array_jobj(jobj_digest_keyslots, num);
+ if (!jobj1)
+ json_object_array_add(jobj_digest_keyslots, json_object_new_string(num));
+ } else {
+ jobj1 = LUKS2_array_remove(jobj_digest_keyslots, num);
+ if (jobj1)
+ json_object_object_add(jobj_digest, "keyslots", jobj1);
+ }
+
+ return 0;
+}
+
+int LUKS2_digest_assign(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, int digest, int assign, int commit)
+{
+ json_object *jobj_digests;
+ int r = 0;
+
+ if (digest == CRYPT_ANY_DIGEST) {
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ UNUSED(val);
+ r = assign_one_digest(cd, hdr, keyslot, atoi(key), assign);
+ if (r < 0)
+ break;
+ }
+ } else
+ r = assign_one_digest(cd, hdr, keyslot, digest, assign);
+
+ if (r < 0)
+ return r;
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+static int assign_all_segments(struct luks2_hdr *hdr, int digest, int assign)
+{
+ json_object *jobj1, *jobj_digest, *jobj_digest_segments;
+
+ jobj_digest = LUKS2_get_digest_jobj(hdr, digest);
+ if (!jobj_digest)
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_digest, "segments", &jobj_digest_segments);
+ if (!jobj_digest_segments)
+ return -EINVAL;
+
+ if (assign) {
+ json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, value) {
+ UNUSED(value);
+ jobj1 = LUKS2_array_jobj(jobj_digest_segments, key);
+ if (!jobj1)
+ json_object_array_add(jobj_digest_segments, json_object_new_string(key));
+ }
+ } else {
+ jobj1 = json_object_new_array();
+ if (!jobj1)
+ return -ENOMEM;
+ json_object_object_add(jobj_digest, "segments", jobj1);
+ }
+
+ return 0;
+}
+
+static int assign_one_segment(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int segment, int digest, int assign)
+{
+ json_object *jobj1, *jobj_digest, *jobj_digest_segments;
+ char num[16];
+
+ log_dbg(cd, "Segment %i %s digest %i.", segment, assign ? "assigned to" : "unassigned from", digest);
+
+ jobj_digest = LUKS2_get_digest_jobj(hdr, digest);
+ if (!jobj_digest)
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_digest, "segments", &jobj_digest_segments);
+ if (!jobj_digest_segments)
+ return -EINVAL;
+
+ if (snprintf(num, sizeof(num), "%d", segment) < 0)
+ return -EINVAL;
+
+ if (assign) {
+ jobj1 = LUKS2_array_jobj(jobj_digest_segments, num);
+ if (!jobj1)
+ json_object_array_add(jobj_digest_segments, json_object_new_string(num));
+ } else {
+ jobj1 = LUKS2_array_remove(jobj_digest_segments, num);
+ if (jobj1)
+ json_object_object_add(jobj_digest, "segments", jobj1);
+ }
+
+ return 0;
+}
+
+int LUKS2_digest_segment_assign(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int segment, int digest, int assign, int commit)
+{
+ json_object *jobj_digests;
+ int r = 0;
+
+ if (segment == CRYPT_DEFAULT_SEGMENT)
+ segment = LUKS2_get_default_segment(hdr);
+
+ if (digest == CRYPT_ANY_DIGEST) {
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ UNUSED(val);
+ if (segment == CRYPT_ANY_SEGMENT)
+ r = assign_all_segments(hdr, atoi(key), assign);
+ else
+ r = assign_one_segment(cd, hdr, segment, atoi(key), assign);
+ if (r < 0)
+ break;
+ }
+ } else {
+ if (segment == CRYPT_ANY_SEGMENT)
+ r = assign_all_segments(hdr, digest, assign);
+ else
+ r = assign_one_segment(cd, hdr, segment, digest, assign);
+ }
+
+ if (r < 0)
+ return r;
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+static int digest_unused(json_object *jobj_digest)
+{
+ json_object *jobj;
+
+ json_object_object_get_ex(jobj_digest, "segments", &jobj);
+ if (!jobj || !json_object_is_type(jobj, json_type_array) || json_object_array_length(jobj) > 0)
+ return 0;
+
+ json_object_object_get_ex(jobj_digest, "keyslots", &jobj);
+ if (!jobj || !json_object_is_type(jobj, json_type_array))
+ return 0;
+
+ return json_object_array_length(jobj) > 0 ? 0 : 1;
+}
+
+void LUKS2_digests_erase_unused(struct crypt_device *cd,
+ struct luks2_hdr *hdr)
+{
+ json_object *jobj_digests;
+
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+ if (!jobj_digests || !json_object_is_type(jobj_digests, json_type_object))
+ return;
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ if (digest_unused(val)) {
+ log_dbg(cd, "Erasing unused digest %d.", atoi(key));
+ json_object_object_del(jobj_digests, key);
+ }
+ }
+}
+
+/* Key description helpers */
+static char *get_key_description_by_digest(struct crypt_device *cd, int digest)
+{
+ char *desc, digest_str[3];
+ int r;
+ size_t len;
+
+ if (!crypt_get_uuid(cd))
+ return NULL;
+
+ r = snprintf(digest_str, sizeof(digest_str), "d%u", digest);
+ if (r < 0 || (size_t)r >= sizeof(digest_str))
+ return NULL;
+
+ /* "cryptsetup:<uuid>-<digest_str>" + \0 */
+ len = strlen(crypt_get_uuid(cd)) + strlen(digest_str) + 13;
+
+ desc = malloc(len);
+ if (!desc)
+ return NULL;
+
+ r = snprintf(desc, len, "%s:%s-%s", "cryptsetup", crypt_get_uuid(cd), digest_str);
+ if (r < 0 || (size_t)r >= len) {
+ free(desc);
+ return NULL;
+ }
+
+ return desc;
+}
+
+int LUKS2_key_description_by_segment(struct crypt_device *cd,
+ struct luks2_hdr *hdr, struct volume_key *vk, int segment)
+{
+ char *desc = get_key_description_by_digest(cd, LUKS2_digest_by_segment(hdr, segment));
+ int r;
+
+ r = crypt_volume_key_set_description(vk, desc);
+ free(desc);
+ return r;
+}
+
+int LUKS2_volume_key_load_in_keyring_by_keyslot(struct crypt_device *cd,
+ struct luks2_hdr *hdr, struct volume_key *vk, int keyslot)
+{
+ char *desc = get_key_description_by_digest(cd, LUKS2_digest_by_keyslot(hdr, keyslot));
+ int r;
+
+ r = crypt_volume_key_set_description(vk, desc);
+ if (!r)
+ r = crypt_volume_key_load_in_keyring(cd, vk);
+
+ free(desc);
+ return r;
+}
+
+int LUKS2_volume_key_load_in_keyring_by_digest(struct crypt_device *cd,
+ struct volume_key *vk, int digest)
+{
+ char *desc = get_key_description_by_digest(cd, digest);
+ int r;
+
+ r = crypt_volume_key_set_description(vk, desc);
+ if (!r)
+ r = crypt_volume_key_load_in_keyring(cd, vk);
+
+ free(desc);
+ return r;
+}
diff --git a/lib/luks2/luks2_digest_pbkdf2.c b/lib/luks2/luks2_digest_pbkdf2.c
new file mode 100644
index 0000000..1009cfb
--- /dev/null
+++ b/lib/luks2/luks2_digest_pbkdf2.c
@@ -0,0 +1,210 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, PBKDF2 digest handler (LUKS1 compatible)
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+#define LUKS_DIGESTSIZE 20 // since SHA1
+#define LUKS_SALTSIZE 32
+#define LUKS_MKD_ITERATIONS_MS 125
+
+static int PBKDF2_digest_verify(struct crypt_device *cd,
+ int digest,
+ const char *volume_key,
+ size_t volume_key_len)
+{
+ char checkHashBuf[64];
+ json_object *jobj_digest, *jobj1;
+ const char *hashSpec;
+ char *mkDigest = NULL, *mkDigestSalt = NULL;
+ unsigned int mkDigestIterations;
+ size_t len;
+ int r = -EINVAL;
+
+ /* This can be done only for internally linked digests */
+ jobj_digest = LUKS2_get_digest_jobj(crypt_get_hdr(cd, CRYPT_LUKS2), digest);
+ if (!jobj_digest)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_digest, "hash", &jobj1))
+ return -EINVAL;
+ hashSpec = json_object_get_string(jobj1);
+
+ if (!json_object_object_get_ex(jobj_digest, "iterations", &jobj1))
+ return -EINVAL;
+ mkDigestIterations = json_object_get_int64(jobj1);
+
+ if (!json_object_object_get_ex(jobj_digest, "salt", &jobj1))
+ return -EINVAL;
+ r = crypt_base64_decode(&mkDigestSalt, &len, json_object_get_string(jobj1),
+ json_object_get_string_len(jobj1));
+ if (r < 0)
+ goto out;
+ if (len != LUKS_SALTSIZE)
+ goto out;
+
+ if (!json_object_object_get_ex(jobj_digest, "digest", &jobj1))
+ goto out;
+ r = crypt_base64_decode(&mkDigest, &len, json_object_get_string(jobj1),
+ json_object_get_string_len(jobj1));
+ if (r < 0)
+ goto out;
+ if (len < LUKS_DIGESTSIZE ||
+ len > sizeof(checkHashBuf) ||
+ (len != LUKS_DIGESTSIZE && len != (size_t)crypt_hash_size(hashSpec)))
+ goto out;
+
+ r = -EPERM;
+ if (crypt_pbkdf(CRYPT_KDF_PBKDF2, hashSpec, volume_key, volume_key_len,
+ mkDigestSalt, LUKS_SALTSIZE,
+ checkHashBuf, len,
+ mkDigestIterations, 0, 0) < 0) {
+ r = -EINVAL;
+ } else {
+ if (crypt_backend_memeq(checkHashBuf, mkDigest, len) == 0)
+ r = 0;
+ }
+out:
+ free(mkDigest);
+ free(mkDigestSalt);
+ return r;
+}
+
+static int PBKDF2_digest_store(struct crypt_device *cd,
+ int digest,
+ const char *volume_key,
+ size_t volume_key_len)
+{
+ json_object *jobj_digest, *jobj_digests;
+ char salt[LUKS_SALTSIZE], digest_raw[128];
+ int hmac_size, r;
+ char *base64_str;
+ struct luks2_hdr *hdr;
+ struct crypt_pbkdf_limits pbkdf_limits;
+ const struct crypt_pbkdf_type *pbkdf_cd;
+ struct crypt_pbkdf_type pbkdf = {
+ .type = CRYPT_KDF_PBKDF2,
+ .time_ms = LUKS_MKD_ITERATIONS_MS,
+ };
+
+ /* Inherit hash from PBKDF setting */
+ pbkdf_cd = crypt_get_pbkdf_type(cd);
+ if (pbkdf_cd)
+ pbkdf.hash = pbkdf_cd->hash;
+ if (!pbkdf.hash)
+ pbkdf.hash = DEFAULT_LUKS1_HASH;
+
+ log_dbg(cd, "Setting PBKDF2 type key digest %d.", digest);
+
+ r = crypt_random_get(cd, salt, LUKS_SALTSIZE, CRYPT_RND_SALT);
+ if (r < 0)
+ return r;
+
+ r = crypt_pbkdf_get_limits(CRYPT_KDF_PBKDF2, &pbkdf_limits);
+ if (r < 0)
+ return r;
+
+ if (crypt_get_pbkdf(cd)->flags & CRYPT_PBKDF_NO_BENCHMARK)
+ pbkdf.iterations = pbkdf_limits.min_iterations;
+ else {
+ r = crypt_benchmark_pbkdf_internal(cd, &pbkdf, volume_key_len);
+ if (r < 0)
+ return r;
+ }
+
+ hmac_size = crypt_hmac_size(pbkdf.hash);
+ if (hmac_size < 0 || hmac_size > (int)sizeof(digest_raw))
+ return -EINVAL;
+
+ r = crypt_pbkdf(CRYPT_KDF_PBKDF2, pbkdf.hash, volume_key, volume_key_len,
+ salt, LUKS_SALTSIZE, digest_raw, hmac_size,
+ pbkdf.iterations, 0, 0);
+ if (r < 0)
+ return r;
+
+ jobj_digest = LUKS2_get_digest_jobj(crypt_get_hdr(cd, CRYPT_LUKS2), digest);
+ jobj_digests = NULL;
+ if (!jobj_digest) {
+ hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ jobj_digest = json_object_new_object();
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+ }
+
+ json_object_object_add(jobj_digest, "type", json_object_new_string("pbkdf2"));
+ json_object_object_add(jobj_digest, "keyslots", json_object_new_array());
+ json_object_object_add(jobj_digest, "segments", json_object_new_array());
+ json_object_object_add(jobj_digest, "hash", json_object_new_string(pbkdf.hash));
+ json_object_object_add(jobj_digest, "iterations", json_object_new_int(pbkdf.iterations));
+
+ r = crypt_base64_encode(&base64_str, NULL, salt, LUKS_SALTSIZE);
+ if (r < 0) {
+ json_object_put(jobj_digest);
+ return r;
+ }
+ json_object_object_add(jobj_digest, "salt", json_object_new_string(base64_str));
+ free(base64_str);
+
+ r = crypt_base64_encode(&base64_str, NULL, digest_raw, hmac_size);
+ if (r < 0) {
+ json_object_put(jobj_digest);
+ return r;
+ }
+ json_object_object_add(jobj_digest, "digest", json_object_new_string(base64_str));
+ free(base64_str);
+
+ if (jobj_digests)
+ json_object_object_add_by_uint(jobj_digests, digest, jobj_digest);
+
+ JSON_DBG(cd, jobj_digest, "Digest JSON:");
+ return 0;
+}
+
+static int PBKDF2_digest_dump(struct crypt_device *cd, int digest)
+{
+ json_object *jobj_digest, *jobj1;
+
+ /* This can be done only for internally linked digests */
+ jobj_digest = LUKS2_get_digest_jobj(crypt_get_hdr(cd, CRYPT_LUKS2), digest);
+ if (!jobj_digest)
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_digest, "hash", &jobj1);
+ log_std(cd, "\tHash: %s\n", json_object_get_string(jobj1));
+
+ json_object_object_get_ex(jobj_digest, "iterations", &jobj1);
+ log_std(cd, "\tIterations: %" PRIu64 "\n", json_object_get_int64(jobj1));
+
+ json_object_object_get_ex(jobj_digest, "salt", &jobj1);
+ log_std(cd, "\tSalt: ");
+ hexprint_base64(cd, jobj1, " ", " ");
+
+ json_object_object_get_ex(jobj_digest, "digest", &jobj1);
+ log_std(cd, "\tDigest: ");
+ hexprint_base64(cd, jobj1, " ", " ");
+
+ return 0;
+}
+
+const digest_handler PBKDF2_digest = {
+ .name = "pbkdf2",
+ .verify = PBKDF2_digest_verify,
+ .store = PBKDF2_digest_store,
+ .dump = PBKDF2_digest_dump,
+};
diff --git a/lib/luks2/luks2_disk_metadata.c b/lib/luks2/luks2_disk_metadata.c
new file mode 100644
index 0000000..e995959
--- /dev/null
+++ b/lib/luks2/luks2_disk_metadata.c
@@ -0,0 +1,811 @@
+/*
+ * LUKS - Linux Unified Key Setup v2
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+/*
+ * Helper functions
+ */
+static json_object *parse_json_len(struct crypt_device *cd, const char *json_area,
+ uint64_t max_length, int *json_len)
+{
+ json_object *jobj;
+ struct json_tokener *jtok;
+
+ /* INT32_MAX is internal (json-c) json_tokener_parse_ex() limit */
+ if (!json_area || max_length > INT32_MAX)
+ return NULL;
+
+ jtok = json_tokener_new();
+ if (!jtok) {
+ log_dbg(cd, "ERROR: Failed to init json tokener");
+ return NULL;
+ }
+
+ jobj = json_tokener_parse_ex(jtok, json_area, max_length);
+ if (!jobj)
+ log_dbg(cd, "ERROR: Failed to parse json data (%d): %s",
+ json_tokener_get_error(jtok),
+ json_tokener_error_desc(json_tokener_get_error(jtok)));
+ else
+ *json_len = jtok->char_offset;
+
+ json_tokener_free(jtok);
+
+ return jobj;
+}
+
+static void log_dbg_checksum(struct crypt_device *cd,
+ const uint8_t *csum, const char *csum_alg, const char *info)
+{
+ char csum_txt[2*LUKS2_CHECKSUM_L+1];
+ int i;
+
+ for (i = 0; i < crypt_hash_size(csum_alg); i++)
+ if (snprintf(&csum_txt[i*2], 3, "%02hhx", (const char)csum[i]) != 2)
+ return;
+
+ log_dbg(cd, "Checksum:%s (%s)", &csum_txt[0], info);
+}
+
+/*
+ * Calculate hash (checksum) of |LUKS2_bin|LUKS2_JSON_area| from in-memory structs.
+ * LUKS2 on-disk header contains uniques salt both for primary and secondary header.
+ * Checksum is always calculated with zeroed checksum field in binary header.
+ */
+static int hdr_checksum_calculate(const char *alg, struct luks2_hdr_disk *hdr_disk,
+ const char *json_area, size_t json_len)
+{
+ struct crypt_hash *hd = NULL;
+ int hash_size, r;
+
+ hash_size = crypt_hash_size(alg);
+ if (hash_size <= 0 || crypt_hash_init(&hd, alg))
+ return -EINVAL;
+
+ /* Binary header, csum zeroed. */
+ r = crypt_hash_write(hd, (char*)hdr_disk, LUKS2_HDR_BIN_LEN);
+
+ /* JSON area (including unused space) */
+ if (!r)
+ r = crypt_hash_write(hd, json_area, json_len);
+
+ if (!r)
+ r = crypt_hash_final(hd, (char*)hdr_disk->csum, (size_t)hash_size);
+
+ crypt_hash_destroy(hd);
+ return r;
+}
+
+/*
+ * Compare hash (checksum) of on-disk and in-memory header.
+ */
+static int hdr_checksum_check(struct crypt_device *cd,
+ const char *alg, struct luks2_hdr_disk *hdr_disk,
+ const char *json_area, size_t json_len)
+{
+ struct luks2_hdr_disk hdr_tmp;
+ int hash_size, r;
+
+ hash_size = crypt_hash_size(alg);
+ if (hash_size <= 0)
+ return -EINVAL;
+
+ /* Copy header and zero checksum. */
+ memcpy(&hdr_tmp, hdr_disk, LUKS2_HDR_BIN_LEN);
+ memset(&hdr_tmp.csum, 0, sizeof(hdr_tmp.csum));
+
+ r = hdr_checksum_calculate(alg, &hdr_tmp, json_area, json_len);
+ if (r < 0)
+ return r;
+
+ log_dbg_checksum(cd, hdr_disk->csum, alg, "on-disk");
+ log_dbg_checksum(cd, hdr_tmp.csum, alg, "in-memory");
+
+ if (memcmp(hdr_tmp.csum, hdr_disk->csum, (size_t)hash_size))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Convert header from on-disk format to in-memory struct
+ */
+static void hdr_from_disk(struct luks2_hdr_disk *hdr_disk1,
+ struct luks2_hdr_disk *hdr_disk2,
+ struct luks2_hdr *hdr,
+ int secondary)
+{
+ hdr->version = be16_to_cpu(hdr_disk1->version);
+ hdr->hdr_size = be64_to_cpu(hdr_disk1->hdr_size);
+ hdr->seqid = be64_to_cpu(hdr_disk1->seqid);
+
+ memcpy(hdr->label, hdr_disk1->label, LUKS2_LABEL_L);
+ hdr->label[LUKS2_LABEL_L - 1] = '\0';
+ memcpy(hdr->subsystem, hdr_disk1->subsystem, LUKS2_LABEL_L);
+ hdr->subsystem[LUKS2_LABEL_L - 1] = '\0';
+ memcpy(hdr->checksum_alg, hdr_disk1->checksum_alg, LUKS2_CHECKSUM_ALG_L);
+ hdr->checksum_alg[LUKS2_CHECKSUM_ALG_L - 1] = '\0';
+ memcpy(hdr->uuid, hdr_disk1->uuid, LUKS2_UUID_L);
+ hdr->uuid[LUKS2_UUID_L - 1] = '\0';
+
+ if (secondary) {
+ memcpy(hdr->salt1, hdr_disk2->salt, LUKS2_SALT_L);
+ memcpy(hdr->salt2, hdr_disk1->salt, LUKS2_SALT_L);
+ } else {
+ memcpy(hdr->salt1, hdr_disk1->salt, LUKS2_SALT_L);
+ memcpy(hdr->salt2, hdr_disk2->salt, LUKS2_SALT_L);
+ }
+}
+
+/*
+ * Convert header from in-memory struct to on-disk format
+ */
+static void hdr_to_disk(struct luks2_hdr *hdr,
+ struct luks2_hdr_disk *hdr_disk,
+ int secondary, uint64_t offset)
+{
+ assert(((char*)&(hdr_disk->_padding4096) - (char*)&(hdr_disk->magic)) == 512);
+
+ memset(hdr_disk, 0, LUKS2_HDR_BIN_LEN);
+
+ memcpy(&hdr_disk->magic, secondary ? LUKS2_MAGIC_2ND : LUKS2_MAGIC_1ST, LUKS2_MAGIC_L);
+ hdr_disk->version = cpu_to_be16(hdr->version);
+ hdr_disk->hdr_size = cpu_to_be64(hdr->hdr_size);
+ hdr_disk->hdr_offset = cpu_to_be64(offset);
+ hdr_disk->seqid = cpu_to_be64(hdr->seqid);
+
+ memcpy(hdr_disk->label, hdr->label, MIN(strlen(hdr->label), LUKS2_LABEL_L));
+ hdr_disk->label[LUKS2_LABEL_L - 1] = '\0';
+ memcpy(hdr_disk->subsystem, hdr->subsystem, MIN(strlen(hdr->subsystem), LUKS2_LABEL_L));
+ hdr_disk->subsystem[LUKS2_LABEL_L - 1] = '\0';
+ memcpy(hdr_disk->checksum_alg, hdr->checksum_alg, MIN(strlen(hdr->checksum_alg), LUKS2_CHECKSUM_ALG_L));
+ hdr_disk->checksum_alg[LUKS2_CHECKSUM_ALG_L - 1] = '\0';
+ memcpy(hdr_disk->uuid, hdr->uuid, MIN(strlen(hdr->uuid), LUKS2_UUID_L));
+ hdr_disk->uuid[LUKS2_UUID_L - 1] = '\0';
+
+ memcpy(hdr_disk->salt, secondary ? hdr->salt2 : hdr->salt1, LUKS2_SALT_L);
+}
+
+/*
+ * Sanity checks before checksum is validated
+ */
+static int hdr_disk_sanity_check_pre(struct crypt_device *cd,
+ struct luks2_hdr_disk *hdr,
+ size_t *hdr_json_size, int secondary,
+ uint64_t offset)
+{
+ uint64_t hdr_size;
+
+ if (memcmp(hdr->magic, secondary ? LUKS2_MAGIC_2ND : LUKS2_MAGIC_1ST, LUKS2_MAGIC_L))
+ return -EINVAL;
+
+ if (be16_to_cpu(hdr->version) != 2) {
+ log_dbg(cd, "Unsupported LUKS2 header version %u.", be16_to_cpu(hdr->version));
+ return -EINVAL;
+ }
+
+ if (offset != be64_to_cpu(hdr->hdr_offset)) {
+ log_dbg(cd, "LUKS2 offset 0x%04" PRIx64 " on device differs to expected offset 0x%04" PRIx64 ".",
+ be64_to_cpu(hdr->hdr_offset), offset);
+ return -EINVAL;
+ }
+
+ hdr_size = be64_to_cpu(hdr->hdr_size);
+
+ if (hdr_size < LUKS2_HDR_16K_LEN || hdr_size > LUKS2_HDR_OFFSET_MAX) {
+ log_dbg(cd, "LUKS2 header has bogus size 0x%04" PRIx64 ".", hdr_size);
+ return -EINVAL;
+ }
+
+ if (secondary && (offset != hdr_size)) {
+ log_dbg(cd, "LUKS2 offset 0x%04" PRIx64 " in secondary header does not match size 0x%04" PRIx64 ".",
+ offset, hdr_size);
+ return -EINVAL;
+ }
+
+ /* FIXME: sanity check checksum alg. */
+
+ log_dbg(cd, "LUKS2 header version %u of size %" PRIu64 " bytes, checksum %s.",
+ be16_to_cpu(hdr->version), hdr_size,
+ hdr->checksum_alg);
+
+ *hdr_json_size = hdr_size - LUKS2_HDR_BIN_LEN;
+ return 0;
+}
+
+/*
+ * Read LUKS2 header from disk at specific offset.
+ */
+static int hdr_read_disk(struct crypt_device *cd,
+ struct device *device, struct luks2_hdr_disk *hdr_disk,
+ char **json_area, uint64_t offset, int secondary)
+{
+ size_t hdr_json_size = 0;
+ int devfd, r;
+
+ log_dbg(cd, "Trying to read %s LUKS2 header at offset 0x%" PRIx64 ".",
+ secondary ? "secondary" : "primary", offset);
+
+ devfd = device_open_locked(cd, device, O_RDONLY);
+ if (devfd < 0)
+ return devfd == -1 ? -EIO : devfd;
+
+ /*
+ * Read binary header and run sanity check before reading
+ * JSON area and validating checksum.
+ */
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), hdr_disk,
+ LUKS2_HDR_BIN_LEN, offset) != LUKS2_HDR_BIN_LEN) {
+ return -EIO;
+ }
+
+ /*
+ * hdr_json_size is validated if this call succeeds
+ */
+ r = hdr_disk_sanity_check_pre(cd, hdr_disk, &hdr_json_size, secondary, offset);
+ if (r < 0)
+ return r;
+
+ /*
+ * Allocate and read JSON area. Always the whole area must be read.
+ */
+ *json_area = malloc(hdr_json_size);
+ if (!*json_area)
+ return -ENOMEM;
+
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), *json_area, hdr_json_size,
+ offset + LUKS2_HDR_BIN_LEN) != (ssize_t)hdr_json_size) {
+ free(*json_area);
+ *json_area = NULL;
+ return -EIO;
+ }
+
+ /*
+ * Calculate and validate checksum and zero it afterwards.
+ */
+ if (hdr_checksum_check(cd, hdr_disk->checksum_alg, hdr_disk,
+ *json_area, hdr_json_size)) {
+ log_dbg(cd, "LUKS2 header checksum error (offset %" PRIu64 ").", offset);
+ free(*json_area);
+ *json_area = NULL;
+ r = -EINVAL;
+ }
+ memset(hdr_disk->csum, 0, LUKS2_CHECKSUM_L);
+
+ return r;
+}
+
+/*
+ * Write LUKS2 header to disk at specific offset.
+ */
+static int hdr_write_disk(struct crypt_device *cd,
+ struct device *device, struct luks2_hdr *hdr,
+ const char *json_area, int secondary)
+{
+ struct luks2_hdr_disk hdr_disk;
+ uint64_t offset = secondary ? hdr->hdr_size : 0;
+ size_t hdr_json_len;
+ int devfd, r;
+
+ log_dbg(cd, "Trying to write LUKS2 header (%zu bytes) at offset %" PRIu64 ".",
+ hdr->hdr_size, offset);
+
+ devfd = device_open_locked(cd, device, O_RDWR);
+ if (devfd < 0)
+ return devfd == -1 ? -EINVAL : devfd;
+
+ hdr_json_len = hdr->hdr_size - LUKS2_HDR_BIN_LEN;
+
+ hdr_to_disk(hdr, &hdr_disk, secondary, offset);
+
+ /*
+ * Write header without checksum but with proper seqid.
+ */
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), (char *)&hdr_disk,
+ LUKS2_HDR_BIN_LEN, offset) < (ssize_t)LUKS2_HDR_BIN_LEN) {
+ return -EIO;
+ }
+
+ /*
+ * Write json area.
+ */
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device),
+ CONST_CAST(char*)json_area, hdr_json_len,
+ LUKS2_HDR_BIN_LEN + offset) < (ssize_t)hdr_json_len) {
+ return -EIO;
+ }
+
+ /*
+ * Calculate checksum and write header with checksum.
+ */
+ r = hdr_checksum_calculate(hdr_disk.checksum_alg, &hdr_disk,
+ json_area, hdr_json_len);
+ if (r < 0) {
+ return r;
+ }
+ log_dbg_checksum(cd, hdr_disk.csum, hdr_disk.checksum_alg, "in-memory");
+
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), (char *)&hdr_disk,
+ LUKS2_HDR_BIN_LEN, offset) < (ssize_t)LUKS2_HDR_BIN_LEN)
+ r = -EIO;
+
+ device_sync(cd, device);
+ return r;
+}
+
+static int LUKS2_check_sequence_id(struct crypt_device *cd, struct luks2_hdr *hdr, struct device *device)
+{
+ int devfd;
+ struct luks2_hdr_disk dhdr;
+
+ if (!hdr)
+ return -EINVAL;
+
+ devfd = device_open_locked(cd, device, O_RDONLY);
+ if (devfd < 0)
+ return devfd == -1 ? -EINVAL : devfd;
+
+ /* we need only first 512 bytes, see luks2_hdr_disk structure */
+ if ((read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), &dhdr, 512, 0) != 512))
+ return -EIO;
+
+ /* there's nothing to check if there's no LUKS2 header */
+ if ((be16_to_cpu(dhdr.version) != 2) ||
+ memcmp(dhdr.magic, LUKS2_MAGIC_1ST, LUKS2_MAGIC_L) ||
+ strcmp(dhdr.uuid, hdr->uuid))
+ return 0;
+
+ return hdr->seqid != be64_to_cpu(dhdr.seqid);
+}
+
+int LUKS2_device_write_lock(struct crypt_device *cd, struct luks2_hdr *hdr, struct device *device)
+{
+ int r = device_write_lock(cd, device);
+
+ if (r < 0) {
+ log_err(cd, _("Failed to acquire write lock on device %s."), device_path(device));
+ return r;
+ }
+
+ /* run sequence id check only on first write lock (r == 1) and w/o LUKS2 reencryption in-progress */
+ if (r == 1 && !crypt_get_luks2_reencrypt(cd)) {
+ log_dbg(cd, "Checking context sequence id matches value stored on disk.");
+ if (LUKS2_check_sequence_id(cd, hdr, device)) {
+ device_write_unlock(cd, device);
+ log_err(cd, _("Detected attempt for concurrent LUKS2 metadata update. Aborting operation."));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Convert in-memory LUKS2 header and write it to disk.
+ * This will increase sequence id, write both header copies and calculate checksum.
+ */
+int LUKS2_disk_hdr_write(struct crypt_device *cd, struct luks2_hdr *hdr, struct device *device, bool seqid_check)
+{
+ char *json_area;
+ const char *json_text;
+ size_t json_area_len;
+ int r;
+
+ if (hdr->version != 2) {
+ log_dbg(cd, "Unsupported LUKS2 header version (%u).", hdr->version);
+ return -EINVAL;
+ }
+
+ r = device_check_size(cd, crypt_metadata_device(cd), LUKS2_hdr_and_areas_size(hdr), 1);
+ if (r)
+ return r;
+
+ /*
+ * Allocate and zero JSON area (of proper header size).
+ */
+ json_area_len = hdr->hdr_size - LUKS2_HDR_BIN_LEN;
+ json_area = crypt_zalloc(json_area_len);
+ if (!json_area)
+ return -ENOMEM;
+
+ /*
+ * Generate text space-efficient JSON representation to json area.
+ */
+ json_text = json_object_to_json_string_ext(hdr->jobj,
+ JSON_C_TO_STRING_PLAIN | JSON_C_TO_STRING_NOSLASHESCAPE);
+ if (!json_text || !*json_text) {
+ log_dbg(cd, "Cannot parse JSON object to text representation.");
+ free(json_area);
+ return -ENOMEM;
+ }
+ if (strlen(json_text) > (json_area_len - 1)) {
+ log_dbg(cd, "JSON is too large (%zu > %zu).", strlen(json_text), json_area_len);
+ free(json_area);
+ return -EINVAL;
+ }
+ strncpy(json_area, json_text, json_area_len);
+
+ if (seqid_check)
+ r = LUKS2_device_write_lock(cd, hdr, device);
+ else
+ r = device_write_lock(cd, device);
+ if (r < 0) {
+ free(json_area);
+ return r;
+ }
+
+ /* Increase sequence id before writing it to disk. */
+ hdr->seqid++;
+
+ /* Write primary and secondary header */
+ r = hdr_write_disk(cd, device, hdr, json_area, 0);
+ if (!r)
+ r = hdr_write_disk(cd, device, hdr, json_area, 1);
+
+ if (r)
+ log_dbg(cd, "LUKS2 header write failed (%d).", r);
+
+ device_write_unlock(cd, device);
+
+ free(json_area);
+ return r;
+}
+static int validate_json_area(struct crypt_device *cd, const char *json_area,
+ uint64_t json_len, uint64_t max_length)
+{
+ char c;
+
+ /* Enforce there are no needless opening bytes */
+ if (*json_area != '{') {
+ log_dbg(cd, "ERROR: Opening character must be left curly bracket: '{'.");
+ return -EINVAL;
+ }
+
+ if (json_len >= max_length) {
+ log_dbg(cd, "ERROR: Missing trailing null byte beyond parsed json data string.");
+ return -EINVAL;
+ }
+
+ /*
+ * TODO:
+ * validate there are legal json format characters between
+ * 'json_area' and 'json_area + json_len'
+ */
+
+ do {
+ c = *(json_area + json_len);
+ if (c != '\0') {
+ log_dbg(cd, "ERROR: Forbidden ascii code 0x%02hhx found beyond json data string at offset %" PRIu64,
+ c, json_len);
+ return -EINVAL;
+ }
+ } while (++json_len < max_length);
+
+ return 0;
+}
+
+static int validate_luks2_json_object(struct crypt_device *cd, json_object *jobj_hdr, uint64_t length)
+{
+ int r;
+
+ /* we require top level object to be of json_type_object */
+ r = !json_object_is_type(jobj_hdr, json_type_object);
+ if (r) {
+ log_dbg(cd, "ERROR: Resulting object is not a json object type");
+ return r;
+ }
+
+ r = LUKS2_hdr_validate(cd, jobj_hdr, length);
+ if (r) {
+ log_dbg(cd, "Repairing JSON metadata.");
+ /* try to correct known glitches */
+ LUKS2_hdr_repair(cd, jobj_hdr);
+
+ /* run validation again */
+ r = LUKS2_hdr_validate(cd, jobj_hdr, length);
+ }
+
+ if (r)
+ log_dbg(cd, "ERROR: LUKS2 validation failed");
+
+ return r;
+}
+
+static json_object *parse_and_validate_json(struct crypt_device *cd,
+ const char *json_area, uint64_t max_length)
+{
+ int json_len, r;
+ json_object *jobj = parse_json_len(cd, json_area, max_length, &json_len);
+
+ if (!jobj)
+ return NULL;
+
+ /* successful parse_json_len must not return offset <= 0 */
+ assert(json_len > 0);
+
+ r = validate_json_area(cd, json_area, json_len, max_length);
+ if (!r)
+ r = validate_luks2_json_object(cd, jobj, max_length);
+
+ if (r) {
+ json_object_put(jobj);
+ jobj = NULL;
+ }
+
+ return jobj;
+}
+
+static int detect_device_signatures(struct crypt_device *cd, const char *path)
+{
+ blk_probe_status prb_state;
+ int r;
+ struct blkid_handle *h;
+
+ if (!blk_supported()) {
+ log_dbg(cd, "Blkid probing of device signatures disabled.");
+ return 0;
+ }
+
+ if ((r = blk_init_by_path(&h, path))) {
+ log_dbg(cd, "Failed to initialize blkid_handle by path.");
+ return -EINVAL;
+ }
+
+ /* We don't care about details. Be fast. */
+ blk_set_chains_for_fast_detection(h);
+
+ /* Filter out crypto_LUKS. we don't care now */
+ blk_superblocks_filter_luks(h);
+
+ prb_state = blk_safeprobe(h);
+
+ switch (prb_state) {
+ case PRB_AMBIGUOUS:
+ log_dbg(cd, "Blkid probe couldn't decide device type unambiguously.");
+ /* fall through */
+ case PRB_FAIL:
+ log_dbg(cd, "Blkid probe failed.");
+ r = -EINVAL;
+ break;
+ case PRB_OK: /* crypto_LUKS type is filtered out */
+ r = -EINVAL;
+
+ if (blk_is_partition(h))
+ log_dbg(cd, "Blkid probe detected partition type '%s'", blk_get_partition_type(h));
+ else if (blk_is_superblock(h))
+ log_dbg(cd, "blkid probe detected superblock type '%s'", blk_get_superblock_type(h));
+ break;
+ case PRB_EMPTY:
+ log_dbg(cd, "Blkid probe detected no foreign device signature.");
+ }
+ blk_free(h);
+ return r;
+}
+
+/*
+ * Read and convert on-disk LUKS2 header to in-memory representation..
+ * Try to do recovery if on-disk state is not consistent.
+ */
+int LUKS2_disk_hdr_read(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct device *device, int do_recovery, int do_blkprobe)
+{
+ enum { HDR_OK, HDR_OBSOLETE, HDR_FAIL, HDR_FAIL_IO } state_hdr1, state_hdr2;
+ struct luks2_hdr_disk hdr_disk1, hdr_disk2;
+ char *json_area1 = NULL, *json_area2 = NULL;
+ json_object *jobj_hdr1 = NULL, *jobj_hdr2 = NULL;
+ unsigned int i;
+ int r;
+ uint64_t hdr_size;
+ uint64_t hdr2_offsets[] = LUKS2_HDR2_OFFSETS;
+
+ /* Skip auto-recovery if locks are disabled and we're not doing LUKS2 explicit repair */
+ if (do_recovery && do_blkprobe && !crypt_metadata_locking_enabled()) {
+ do_recovery = 0;
+ log_dbg(cd, "Disabling header auto-recovery due to locking being disabled.");
+ }
+
+ /*
+ * Read primary LUKS2 header (offset 0).
+ */
+ state_hdr1 = HDR_FAIL;
+ r = hdr_read_disk(cd, device, &hdr_disk1, &json_area1, 0, 0);
+ if (r == 0) {
+ jobj_hdr1 = parse_and_validate_json(cd, json_area1, be64_to_cpu(hdr_disk1.hdr_size) - LUKS2_HDR_BIN_LEN);
+ state_hdr1 = jobj_hdr1 ? HDR_OK : HDR_OBSOLETE;
+ } else if (r == -EIO)
+ state_hdr1 = HDR_FAIL_IO;
+
+ /*
+ * Read secondary LUKS2 header (follows primary).
+ */
+ state_hdr2 = HDR_FAIL;
+ if (state_hdr1 != HDR_FAIL && state_hdr1 != HDR_FAIL_IO) {
+ r = hdr_read_disk(cd, device, &hdr_disk2, &json_area2, be64_to_cpu(hdr_disk1.hdr_size), 1);
+ if (r == 0) {
+ jobj_hdr2 = parse_and_validate_json(cd, json_area2, be64_to_cpu(hdr_disk2.hdr_size) - LUKS2_HDR_BIN_LEN);
+ state_hdr2 = jobj_hdr2 ? HDR_OK : HDR_OBSOLETE;
+ } else if (r == -EIO)
+ state_hdr2 = HDR_FAIL_IO;
+ } else {
+ /*
+ * No header size, check all known offsets.
+ */
+ for (r = -EINVAL,i = 0; r < 0 && i < ARRAY_SIZE(hdr2_offsets); i++)
+ r = hdr_read_disk(cd, device, &hdr_disk2, &json_area2, hdr2_offsets[i], 1);
+
+ if (r == 0) {
+ jobj_hdr2 = parse_and_validate_json(cd, json_area2, be64_to_cpu(hdr_disk2.hdr_size) - LUKS2_HDR_BIN_LEN);
+ state_hdr2 = jobj_hdr2 ? HDR_OK : HDR_OBSOLETE;
+ } else if (r == -EIO)
+ state_hdr2 = HDR_FAIL_IO;
+ }
+
+ /*
+ * Check sequence id if both headers are read correctly.
+ */
+ if (state_hdr1 == HDR_OK && state_hdr2 == HDR_OK) {
+ if (be64_to_cpu(hdr_disk1.seqid) > be64_to_cpu(hdr_disk2.seqid))
+ state_hdr2 = HDR_OBSOLETE;
+ else if (be64_to_cpu(hdr_disk1.seqid) < be64_to_cpu(hdr_disk2.seqid))
+ state_hdr1 = HDR_OBSOLETE;
+ }
+
+ /* check header with keyslots to fit the device */
+ if (state_hdr1 == HDR_OK)
+ hdr_size = LUKS2_hdr_and_areas_size_jobj(jobj_hdr1);
+ else if (state_hdr2 == HDR_OK)
+ hdr_size = LUKS2_hdr_and_areas_size_jobj(jobj_hdr2);
+ else {
+ r = (state_hdr1 == HDR_FAIL_IO && state_hdr2 == HDR_FAIL_IO) ? -EIO : -EINVAL;
+ goto err;
+ }
+
+ r = device_check_size(cd, device, hdr_size, 0);
+ if (r)
+ goto err;
+
+ /*
+ * Try to rewrite (recover) bad header. Always regenerate salt for bad header.
+ */
+ if (state_hdr1 == HDR_OK && state_hdr2 != HDR_OK) {
+ log_dbg(cd, "Secondary LUKS2 header requires recovery.");
+
+ if (do_blkprobe && (r = detect_device_signatures(cd, device_path(device)))) {
+ log_err(cd, _("Device contains ambiguous signatures, cannot auto-recover LUKS2.\n"
+ "Please run \"cryptsetup repair\" for recovery."));
+ goto err;
+ }
+
+ if (do_recovery) {
+ memcpy(&hdr_disk2, &hdr_disk1, LUKS2_HDR_BIN_LEN);
+ r = crypt_random_get(cd, (char*)hdr_disk2.salt, sizeof(hdr_disk2.salt), CRYPT_RND_SALT);
+ if (r)
+ log_dbg(cd, "Cannot generate header salt.");
+ else {
+ hdr_from_disk(&hdr_disk1, &hdr_disk2, hdr, 0);
+ r = hdr_write_disk(cd, device, hdr, json_area1, 1);
+ }
+ if (r)
+ log_dbg(cd, "Secondary LUKS2 header recovery failed.");
+ }
+ } else if (state_hdr1 != HDR_OK && state_hdr2 == HDR_OK) {
+ log_dbg(cd, "Primary LUKS2 header requires recovery.");
+
+ if (do_blkprobe && (r = detect_device_signatures(cd, device_path(device)))) {
+ log_err(cd, _("Device contains ambiguous signatures, cannot auto-recover LUKS2.\n"
+ "Please run \"cryptsetup repair\" for recovery."));
+ goto err;
+ }
+
+ if (do_recovery) {
+ memcpy(&hdr_disk1, &hdr_disk2, LUKS2_HDR_BIN_LEN);
+ r = crypt_random_get(cd, (char*)hdr_disk1.salt, sizeof(hdr_disk1.salt), CRYPT_RND_SALT);
+ if (r)
+ log_dbg(cd, "Cannot generate header salt.");
+ else {
+ hdr_from_disk(&hdr_disk2, &hdr_disk1, hdr, 1);
+ r = hdr_write_disk(cd, device, hdr, json_area2, 0);
+ }
+ if (r)
+ log_dbg(cd, "Primary LUKS2 header recovery failed.");
+ }
+ }
+
+ free(json_area1);
+ json_area1 = NULL;
+ free(json_area2);
+ json_area2 = NULL;
+
+ /* wrong lock for write mode during recovery attempt */
+ if (r == -EAGAIN)
+ goto err;
+
+ /*
+ * Even if status is failed, the second header includes salt.
+ */
+ if (state_hdr1 == HDR_OK) {
+ hdr_from_disk(&hdr_disk1, &hdr_disk2, hdr, 0);
+ hdr->jobj = jobj_hdr1;
+ json_object_put(jobj_hdr2);
+ } else if (state_hdr2 == HDR_OK) {
+ hdr_from_disk(&hdr_disk2, &hdr_disk1, hdr, 1);
+ hdr->jobj = jobj_hdr2;
+ json_object_put(jobj_hdr1);
+ }
+
+ /*
+ * FIXME: should this fail? At least one header was read correctly.
+ * r = (state_hdr1 == HDR_FAIL_IO || state_hdr2 == HDR_FAIL_IO) ? -EIO : -EINVAL;
+ */
+ return 0;
+err:
+ log_dbg(cd, "LUKS2 header read failed (%d).", r);
+
+ free(json_area1);
+ free(json_area2);
+ json_object_put(jobj_hdr1);
+ json_object_put(jobj_hdr2);
+ hdr->jobj = NULL;
+ return r;
+}
+
+int LUKS2_hdr_version_unlocked(struct crypt_device *cd, const char *backup_file)
+{
+ struct {
+ char magic[LUKS2_MAGIC_L];
+ uint16_t version;
+ } __attribute__ ((packed)) hdr;
+ struct device *device = NULL;
+ int r = 0, devfd = -1, flags;
+
+ if (!backup_file)
+ device = crypt_metadata_device(cd);
+ else if (device_alloc(cd, &device, backup_file) < 0)
+ return 0;
+
+ if (!device)
+ return 0;
+
+ flags = O_RDONLY;
+ if (device_direct_io(device))
+ flags |= O_DIRECT;
+
+ devfd = open(device_path(device), flags);
+ if (devfd != -1 && (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), &hdr, sizeof(hdr), 0) == sizeof(hdr)) &&
+ !memcmp(hdr.magic, LUKS2_MAGIC_1ST, LUKS2_MAGIC_L))
+ r = (int)be16_to_cpu(hdr.version);
+
+ if (devfd != -1)
+ close(devfd);
+
+ if (backup_file)
+ device_free(cd, device);
+
+ return r;
+}
diff --git a/lib/luks2/luks2_internal.h b/lib/luks2/luks2_internal.h
new file mode 100644
index 0000000..b564a48
--- /dev/null
+++ b/lib/luks2/luks2_internal.h
@@ -0,0 +1,388 @@
+/*
+ * LUKS - Linux Unified Key Setup v2
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTSETUP_LUKS2_INTERNAL_H
+#define _CRYPTSETUP_LUKS2_INTERNAL_H
+
+#include <stdio.h>
+#include <errno.h>
+#include <json-c/json.h>
+
+#include "internal.h"
+#include "luks2.h"
+
+/* override useless forward slash escape when supported by json-c */
+#ifndef JSON_C_TO_STRING_NOSLASHESCAPE
+#define JSON_C_TO_STRING_NOSLASHESCAPE 0
+#endif
+
+/*
+ * On-disk access function prototypes
+ */
+int LUKS2_disk_hdr_read(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct device *device, int do_recovery, int do_blkprobe);
+int LUKS2_disk_hdr_write(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct device *device, bool seqid_check);
+int LUKS2_device_write_lock(struct crypt_device *cd,
+ struct luks2_hdr *hdr, struct device *device);
+
+/*
+ * JSON struct access helpers
+ */
+json_object *LUKS2_get_keyslot_jobj(struct luks2_hdr *hdr, int keyslot);
+json_object *LUKS2_get_token_jobj(struct luks2_hdr *hdr, int token);
+json_object *LUKS2_get_digest_jobj(struct luks2_hdr *hdr, int digest);
+json_object *LUKS2_get_segment_jobj(struct luks2_hdr *hdr, int segment);
+json_object *LUKS2_get_tokens_jobj(struct luks2_hdr *hdr);
+json_object *LUKS2_get_segments_jobj(struct luks2_hdr *hdr);
+
+void hexprint_base64(struct crypt_device *cd, json_object *jobj,
+ const char *sep, const char *line_sep);
+
+uint64_t crypt_jobj_get_uint64(json_object *jobj);
+uint32_t crypt_jobj_get_uint32(json_object *jobj);
+json_object *crypt_jobj_new_uint64(uint64_t value);
+
+int json_object_object_add_by_uint(json_object *jobj, unsigned key, json_object *jobj_val);
+void json_object_object_del_by_uint(json_object *jobj, unsigned key);
+int json_object_copy(json_object *jobj_src, json_object **jobj_dst);
+
+void JSON_DBG(struct crypt_device *cd, json_object *jobj, const char *desc);
+
+/*
+ * LUKS2 JSON validation
+ */
+
+/* validation helper */
+bool validate_json_uint32(json_object *jobj);
+json_object *json_contains(struct crypt_device *cd, json_object *jobj, const char *name,
+ const char *section, const char *key, json_type type);
+json_object *json_contains_string(struct crypt_device *cd, json_object *jobj,
+ const char *name, const char *section, const char *key);
+
+int LUKS2_hdr_validate(struct crypt_device *cd, json_object *hdr_jobj, uint64_t json_size);
+int LUKS2_check_json_size(struct crypt_device *cd, const struct luks2_hdr *hdr);
+int LUKS2_token_validate(struct crypt_device *cd, json_object *hdr_jobj,
+ json_object *jobj_token, const char *key);
+void LUKS2_token_dump(struct crypt_device *cd, int token);
+
+/*
+ * LUKS2 JSON repair for known glitches
+ */
+void LUKS2_hdr_repair(struct crypt_device *cd, json_object *jobj_hdr);
+void LUKS2_keyslots_repair(struct crypt_device *cd, json_object *jobj_hdr);
+
+/*
+ * JSON array helpers
+ */
+json_object *LUKS2_array_jobj(json_object *array, const char *num);
+json_object *LUKS2_array_remove(json_object *array, const char *num);
+
+/*
+ * Plugins API
+ */
+
+/**
+ * LUKS2 keyslots handlers (EXPERIMENTAL)
+ */
+typedef int (*keyslot_alloc_func)(struct crypt_device *cd, int keyslot,
+ size_t volume_key_len,
+ const struct luks2_keyslot_params *params);
+typedef int (*keyslot_update_func)(struct crypt_device *cd, int keyslot,
+ const struct luks2_keyslot_params *params);
+typedef int (*keyslot_open_func) (struct crypt_device *cd, int keyslot,
+ const char *password, size_t password_len,
+ char *volume_key, size_t volume_key_len);
+typedef int (*keyslot_store_func)(struct crypt_device *cd, int keyslot,
+ const char *password, size_t password_len,
+ const char *volume_key, size_t volume_key_len);
+typedef int (*keyslot_wipe_func) (struct crypt_device *cd, int keyslot);
+typedef int (*keyslot_dump_func) (struct crypt_device *cd, int keyslot);
+typedef int (*keyslot_validate_func) (struct crypt_device *cd, json_object *jobj_keyslot);
+typedef void(*keyslot_repair_func) (json_object *jobj_keyslot);
+
+/* see LUKS2_luks2_to_luks1 */
+int placeholder_keyslot_alloc(struct crypt_device *cd,
+ int keyslot,
+ uint64_t area_offset,
+ uint64_t area_length);
+
+/* validate all keyslot implementations in hdr json */
+int LUKS2_keyslots_validate(struct crypt_device *cd, json_object *hdr_jobj);
+
+typedef struct {
+ const char *name;
+ keyslot_alloc_func alloc;
+ keyslot_update_func update;
+ keyslot_open_func open;
+ keyslot_store_func store;
+ keyslot_wipe_func wipe;
+ keyslot_dump_func dump;
+ keyslot_validate_func validate;
+ keyslot_repair_func repair;
+} keyslot_handler;
+
+struct reenc_protection {
+ enum { REENC_PROTECTION_NOT_SET = 0,
+ REENC_PROTECTION_NONE,
+ REENC_PROTECTION_CHECKSUM,
+ REENC_PROTECTION_JOURNAL,
+ REENC_PROTECTION_DATASHIFT } type;
+
+ union {
+ struct {
+ char hash[LUKS2_CHECKSUM_ALG_L];
+ struct crypt_hash *ch;
+ size_t hash_size;
+ /* buffer for checksums */
+ void *checksums;
+ size_t checksums_len;
+ size_t block_size;
+ } csum;
+ struct {
+ uint64_t data_shift;
+ } ds;
+ } p;
+};
+
+/**
+ * LUKS2 digest handlers (EXPERIMENTAL)
+ */
+typedef int (*digest_verify_func)(struct crypt_device *cd, int digest,
+ const char *volume_key, size_t volume_key_len);
+typedef int (*digest_store_func) (struct crypt_device *cd, int digest,
+ const char *volume_key, size_t volume_key_len);
+typedef int (*digest_dump_func) (struct crypt_device *cd, int digest);
+
+typedef struct {
+ const char *name;
+ digest_verify_func verify;
+ digest_store_func store;
+ digest_dump_func dump;
+} digest_handler;
+
+int keyring_open(struct crypt_device *cd,
+ int token,
+ char **buffer,
+ size_t *buffer_len,
+ void *usrptr);
+
+void keyring_dump(struct crypt_device *cd, const char *json);
+
+int keyring_validate(struct crypt_device *cd, const char *json);
+
+void keyring_buffer_free(void *buffer, size_t buffer_size);
+
+struct crypt_token_handler_v2 {
+ const char *name;
+ crypt_token_open_func open;
+ crypt_token_buffer_free_func buffer_free;
+ crypt_token_validate_func validate;
+ crypt_token_dump_func dump;
+
+ /* here ends v1. Do not touch anything above */
+
+ crypt_token_open_pin_func open_pin;
+ crypt_token_version_func version;
+
+ void *dlhandle;
+};
+
+/*
+ * Initial sequence of structure members in union 'u' must be always
+ * identical. Version 4 must fully contain version 3 which must
+ * subsequently fully contain version 2, etc.
+ *
+ * See C standard, section 6.5.2.3, item 5.
+ */
+struct crypt_token_handler_internal {
+ uint32_t version;
+ union {
+ crypt_token_handler v1; /* deprecated public structure */
+ struct crypt_token_handler_v2 v2; /* internal helper v2 structure */
+ } u;
+};
+
+int LUKS2_find_area_gap(struct crypt_device *cd, struct luks2_hdr *hdr,
+ size_t keylength, uint64_t *area_offset, uint64_t *area_length);
+int LUKS2_find_area_max_gap(struct crypt_device *cd, struct luks2_hdr *hdr,
+ uint64_t *area_offset, uint64_t *area_length);
+
+uint64_t LUKS2_hdr_and_areas_size_jobj(json_object *jobj);
+
+int LUKS2_check_cipher(struct crypt_device *cd,
+ size_t keylength,
+ const char *cipher,
+ const char *cipher_mode);
+
+static inline const char *crypt_reencrypt_mode_to_str(crypt_reencrypt_mode_info mi)
+{
+ if (mi == CRYPT_REENCRYPT_REENCRYPT)
+ return "reencrypt";
+ if (mi == CRYPT_REENCRYPT_ENCRYPT)
+ return "encrypt";
+ if (mi == CRYPT_REENCRYPT_DECRYPT)
+ return "decrypt";
+ return "<unknown>";
+}
+
+/*
+ * Generic LUKS2 keyslot
+ */
+int LUKS2_keyslot_reencrypt_store(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const void *buffer,
+ size_t buffer_length);
+
+int LUKS2_keyslot_reencrypt_allocate(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment);
+
+int LUKS2_keyslot_reencrypt_update_needed(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment);
+
+int LUKS2_keyslot_reencrypt_update(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment,
+ struct volume_key *vks);
+
+int LUKS2_keyslot_reencrypt_load(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ struct reenc_protection *rp,
+ bool primary);
+
+int LUKS2_keyslot_reencrypt_digest_create(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint8_t version,
+ struct volume_key *vks);
+
+int LUKS2_keyslot_dump(struct crypt_device *cd,
+ int keyslot);
+
+int LUKS2_keyslot_jobj_area(json_object *jobj_keyslot, uint64_t *offset, uint64_t *length);
+
+/* JSON helpers */
+uint64_t json_segment_get_offset(json_object *jobj_segment, unsigned blockwise);
+const char *json_segment_type(json_object *jobj_segment);
+uint64_t json_segment_get_iv_offset(json_object *jobj_segment);
+uint64_t json_segment_get_size(json_object *jobj_segment, unsigned blockwise);
+const char *json_segment_get_cipher(json_object *jobj_segment);
+uint32_t json_segment_get_sector_size(json_object *jobj_segment);
+bool json_segment_is_backup(json_object *jobj_segment);
+json_object *json_segments_get_segment(json_object *jobj_segments, int segment);
+unsigned json_segments_count(json_object *jobj_segments);
+void json_segment_remove_flag(json_object *jobj_segment, const char *flag);
+uint64_t json_segments_get_minimal_offset(json_object *jobj_segments, unsigned blockwise);
+json_object *json_segment_create_linear(uint64_t offset, const uint64_t *length, unsigned reencryption);
+json_object *json_segment_create_crypt(uint64_t offset, uint64_t iv_offset, const uint64_t *length, const char *cipher, uint32_t sector_size, unsigned reencryption);
+int json_segments_segment_in_reencrypt(json_object *jobj_segments);
+bool json_segment_cmp(json_object *jobj_segment_1, json_object *jobj_segment_2);
+bool json_segment_contains_flag(json_object *jobj_segment, const char *flag_str, size_t len);
+
+int LUKS2_assembly_multisegment_dmd(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct volume_key *vks,
+ json_object *jobj_segments,
+ struct crypt_dm_active_device *dmd);
+
+/*
+ * Generic LUKS2 segment
+ */
+int LUKS2_segments_count(struct luks2_hdr *hdr);
+
+int LUKS2_segment_first_unused_id(struct luks2_hdr *hdr);
+
+int LUKS2_segment_set_flag(json_object *jobj_segment, const char *flag);
+
+json_object *LUKS2_get_segment_by_flag(struct luks2_hdr *hdr, const char *flag);
+
+int LUKS2_get_segment_id_by_flag(struct luks2_hdr *hdr, const char *flag);
+
+int LUKS2_segments_set(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ json_object *jobj_segments,
+ int commit);
+
+uint64_t LUKS2_segment_offset(struct luks2_hdr *hdr,
+ int segment,
+ unsigned blockwise);
+
+uint64_t LUKS2_segment_size(struct luks2_hdr *hdr,
+ int segment,
+ unsigned blockwise);
+
+int LUKS2_segment_is_type(struct luks2_hdr *hdr,
+ int segment,
+ const char *type);
+
+int LUKS2_segment_by_type(struct luks2_hdr *hdr,
+ const char *type);
+
+int LUKS2_last_segment_by_type(struct luks2_hdr *hdr,
+ const char *type);
+
+int LUKS2_get_default_segment(struct luks2_hdr *hdr);
+
+int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr);
+int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr);
+int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise);
+
+/*
+ * Generic LUKS2 digest
+ */
+int LUKS2_digest_verify_by_digest(struct crypt_device *cd,
+ int digest,
+ const struct volume_key *vk);
+
+void LUKS2_digests_erase_unused(struct crypt_device *cd,
+ struct luks2_hdr *hdr);
+
+int LUKS2_digest_dump(struct crypt_device *cd,
+ int digest);
+
+/*
+ * Generic LUKS2 token
+ */
+int LUKS2_tokens_count(struct luks2_hdr *hdr);
+
+/*
+ * LUKS2 generic
+ */
+int LUKS2_reload(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vks,
+ uint64_t device_size,
+ uint32_t flags);
+
+int LUKS2_keyslot_for_segment(struct luks2_hdr *hdr, int keyslot, int segment);
+int LUKS2_find_keyslot(struct luks2_hdr *hdr, const char *type);
+int LUKS2_set_keyslots_size(struct luks2_hdr *hdr, uint64_t data_offset);
+
+#endif
diff --git a/lib/luks2/luks2_json_format.c b/lib/luks2/luks2_json_format.c
new file mode 100644
index 0000000..4456358
--- /dev/null
+++ b/lib/luks2/luks2_json_format.c
@@ -0,0 +1,411 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, LUKS2 header format code
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+#include <uuid/uuid.h>
+
+struct area {
+ uint64_t offset;
+ uint64_t length;
+};
+
+static size_t get_area_size(size_t keylength)
+{
+ /* for now it is AF_split_sectors */
+ return size_round_up(keylength * 4000, 4096);
+}
+
+static size_t get_min_offset(struct luks2_hdr *hdr)
+{
+ return 2 * hdr->hdr_size;
+}
+
+static size_t get_max_offset(struct luks2_hdr *hdr)
+{
+ return LUKS2_hdr_and_areas_size(hdr);
+}
+
+int LUKS2_find_area_max_gap(struct crypt_device *cd, struct luks2_hdr *hdr,
+ uint64_t *area_offset, uint64_t *area_length)
+{
+ struct area areas[LUKS2_KEYSLOTS_MAX], sorted_areas[LUKS2_KEYSLOTS_MAX+1] = {};
+ int i, j, k, area_i;
+ size_t valid_offset, offset, length;
+
+ /* fill area offset + length table */
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ if (!LUKS2_keyslot_area(hdr, i, &areas[i].offset, &areas[i].length))
+ continue;
+ areas[i].length = 0;
+ areas[i].offset = 0;
+ }
+
+ /* sort table */
+ k = 0; /* index in sorted table */
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ offset = get_max_offset(hdr) ?: UINT64_MAX;
+ area_i = -1;
+ /* search for the smallest offset in table */
+ for (j = 0; j < LUKS2_KEYSLOTS_MAX; j++)
+ if (areas[j].offset && areas[j].offset <= offset) {
+ area_i = j;
+ offset = areas[j].offset;
+ }
+
+ if (area_i >= 0) {
+ sorted_areas[k].length = areas[area_i].length;
+ sorted_areas[k].offset = areas[area_i].offset;
+ areas[area_i].length = 0;
+ areas[area_i].offset = 0;
+ k++;
+ }
+ }
+
+ sorted_areas[LUKS2_KEYSLOTS_MAX].offset = get_max_offset(hdr);
+ sorted_areas[LUKS2_KEYSLOTS_MAX].length = 1;
+
+ /* search for the gap we can use */
+ length = valid_offset = 0;
+ offset = get_min_offset(hdr);
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX+1; i++) {
+ /* skip empty */
+ if (sorted_areas[i].offset == 0 || sorted_areas[i].length == 0)
+ continue;
+
+ /* found bigger gap than the last one */
+ if ((offset < sorted_areas[i].offset) && (sorted_areas[i].offset - offset) > length) {
+ length = sorted_areas[i].offset - offset;
+ valid_offset = offset;
+ }
+
+ /* move beyond allocated area */
+ offset = sorted_areas[i].offset + sorted_areas[i].length;
+ }
+
+ /* this search 'algorithm' does not work with unaligned areas */
+ assert(length == size_round_up(length, 4096));
+ assert(valid_offset == size_round_up(valid_offset, 4096));
+
+ if (!length) {
+ log_dbg(cd, "Not enough space in header keyslot area.");
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Found largest free area %zu -> %zu", valid_offset, length + valid_offset);
+
+ *area_offset = valid_offset;
+ *area_length = length;
+
+ return 0;
+}
+
+int LUKS2_find_area_gap(struct crypt_device *cd, struct luks2_hdr *hdr,
+ size_t keylength, uint64_t *area_offset, uint64_t *area_length)
+{
+ struct area areas[LUKS2_KEYSLOTS_MAX], sorted_areas[LUKS2_KEYSLOTS_MAX] = {};
+ int i, j, k, area_i;
+ size_t offset, length;
+
+ /* fill area offset + length table */
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ if (!LUKS2_keyslot_area(hdr, i, &areas[i].offset, &areas[i].length))
+ continue;
+ areas[i].length = 0;
+ areas[i].offset = 0;
+ }
+
+ /* sort table */
+ k = 0; /* index in sorted table */
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ offset = get_max_offset(hdr) ?: UINT64_MAX;
+ area_i = -1;
+ /* search for the smallest offset in table */
+ for (j = 0; j < LUKS2_KEYSLOTS_MAX; j++)
+ if (areas[j].offset && areas[j].offset <= offset) {
+ area_i = j;
+ offset = areas[j].offset;
+ }
+
+ if (area_i >= 0) {
+ sorted_areas[k].length = areas[area_i].length;
+ sorted_areas[k].offset = areas[area_i].offset;
+ areas[area_i].length = 0;
+ areas[area_i].offset = 0;
+ k++;
+ }
+ }
+
+ /* search for the gap we can use */
+ offset = get_min_offset(hdr);
+ length = get_area_size(keylength);
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ /* skip empty */
+ if (sorted_areas[i].offset == 0 || sorted_areas[i].length == 0)
+ continue;
+
+ /* enough space before the used area */
+ if ((offset < sorted_areas[i].offset) && ((offset + length) <= sorted_areas[i].offset))
+ break;
+
+ /* both offset and length are already aligned to 4096 bytes */
+ offset = sorted_areas[i].offset + sorted_areas[i].length;
+ }
+
+ if ((offset + length) > get_max_offset(hdr)) {
+ log_dbg(cd, "Not enough space in header keyslot area.");
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Found area %zu -> %zu", offset, length + offset);
+
+ if (area_offset)
+ *area_offset = offset;
+ if (area_length)
+ *area_length = length;
+
+ return 0;
+}
+
+int LUKS2_check_metadata_area_size(uint64_t metadata_size)
+{
+ /* see LUKS2_HDR2_OFFSETS */
+ return (metadata_size != 0x004000 &&
+ metadata_size != 0x008000 && metadata_size != 0x010000 &&
+ metadata_size != 0x020000 && metadata_size != 0x040000 &&
+ metadata_size != 0x080000 && metadata_size != 0x100000 &&
+ metadata_size != 0x200000 && metadata_size != 0x400000);
+}
+
+int LUKS2_check_keyslots_area_size(uint64_t keyslots_size)
+{
+ return (MISALIGNED_4K(keyslots_size) ||
+ keyslots_size > LUKS2_MAX_KEYSLOTS_SIZE);
+}
+
+int LUKS2_generate_hdr(
+ struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk,
+ const char *cipherName,
+ const char *cipherMode,
+ const char *integrity,
+ const char *uuid,
+ unsigned int sector_size, /* in bytes */
+ uint64_t data_offset, /* in bytes */
+ uint64_t align_offset, /* in bytes */
+ uint64_t required_alignment,
+ uint64_t metadata_size,
+ uint64_t keyslots_size)
+{
+ struct json_object *jobj_segment, *jobj_integrity, *jobj_keyslots, *jobj_segments, *jobj_config;
+ char cipher[128];
+ uuid_t partitionUuid;
+ int r, digest;
+ uint64_t mdev_size;
+
+ if (!metadata_size)
+ metadata_size = LUKS2_HDR_16K_LEN;
+ hdr->hdr_size = metadata_size;
+
+ if (data_offset && data_offset < get_min_offset(hdr)) {
+ log_err(cd, _("Requested data offset is too small."));
+ return -EINVAL;
+ }
+
+ /* Increase keyslot size according to data offset */
+ if (!keyslots_size && data_offset)
+ keyslots_size = data_offset - get_min_offset(hdr);
+
+ /* keyslots size has to be 4 KiB aligned */
+ keyslots_size -= (keyslots_size % 4096);
+
+ if (keyslots_size > LUKS2_MAX_KEYSLOTS_SIZE)
+ keyslots_size = LUKS2_MAX_KEYSLOTS_SIZE;
+
+ if (!keyslots_size) {
+ assert(LUKS2_DEFAULT_HDR_SIZE > 2 * LUKS2_HDR_OFFSET_MAX);
+ keyslots_size = LUKS2_DEFAULT_HDR_SIZE - get_min_offset(hdr);
+ /* Decrease keyslots_size due to metadata device being too small */
+ if (!device_size(crypt_metadata_device(cd), &mdev_size) &&
+ ((keyslots_size + get_min_offset(hdr)) > mdev_size) &&
+ device_fallocate(crypt_metadata_device(cd), keyslots_size + get_min_offset(hdr)) &&
+ (get_min_offset(hdr) <= mdev_size))
+ keyslots_size = mdev_size - get_min_offset(hdr);
+ }
+
+ /* Decrease keyslots_size if we have smaller data_offset */
+ if (data_offset && (keyslots_size + get_min_offset(hdr)) > data_offset) {
+ keyslots_size = data_offset - get_min_offset(hdr);
+ log_dbg(cd, "Decreasing keyslot area size to %" PRIu64
+ " bytes due to the requested data offset %"
+ PRIu64 " bytes.", keyslots_size, data_offset);
+ }
+
+ /* Data offset has priority */
+ if (!data_offset && required_alignment) {
+ data_offset = size_round_up(get_min_offset(hdr) + keyslots_size,
+ (size_t)required_alignment);
+ data_offset += align_offset;
+ }
+
+ log_dbg(cd, "Formatting LUKS2 with JSON metadata area %" PRIu64
+ " bytes and keyslots area %" PRIu64 " bytes.",
+ metadata_size - LUKS2_HDR_BIN_LEN, keyslots_size);
+
+ if (keyslots_size < (LUKS2_HDR_OFFSET_MAX - 2*LUKS2_HDR_16K_LEN))
+ log_std(cd, _("WARNING: keyslots area (%" PRIu64 " bytes) is very small,"
+ " available LUKS2 keyslot count is very limited.\n"),
+ keyslots_size);
+
+ hdr->seqid = 1;
+ hdr->version = 2;
+ memset(hdr->label, 0, LUKS2_LABEL_L);
+ strcpy(hdr->checksum_alg, "sha256");
+ crypt_random_get(cd, (char*)hdr->salt1, LUKS2_SALT_L, CRYPT_RND_SALT);
+ crypt_random_get(cd, (char*)hdr->salt2, LUKS2_SALT_L, CRYPT_RND_SALT);
+
+ if (uuid && uuid_parse(uuid, partitionUuid) == -1) {
+ log_err(cd, _("Wrong LUKS UUID format provided."));
+ return -EINVAL;
+ }
+ if (!uuid)
+ uuid_generate(partitionUuid);
+
+ uuid_unparse(partitionUuid, hdr->uuid);
+
+ if (*cipherMode != '\0')
+ r = snprintf(cipher, sizeof(cipher), "%s-%s", cipherName, cipherMode);
+ else
+ r = snprintf(cipher, sizeof(cipher), "%s", cipherName);
+ if (r < 0 || (size_t)r >= sizeof(cipher))
+ return -EINVAL;
+
+ hdr->jobj = json_object_new_object();
+
+ jobj_keyslots = json_object_new_object();
+ json_object_object_add(hdr->jobj, "keyslots", jobj_keyslots);
+ json_object_object_add(hdr->jobj, "tokens", json_object_new_object());
+ jobj_segments = json_object_new_object();
+ json_object_object_add(hdr->jobj, "segments", jobj_segments);
+ json_object_object_add(hdr->jobj, "digests", json_object_new_object());
+ jobj_config = json_object_new_object();
+ json_object_object_add(hdr->jobj, "config", jobj_config);
+
+ digest = LUKS2_digest_create(cd, "pbkdf2", hdr, vk);
+ if (digest < 0)
+ goto err;
+
+ if (LUKS2_digest_segment_assign(cd, hdr, 0, digest, 1, 0) < 0)
+ goto err;
+
+ jobj_segment = json_segment_create_crypt(data_offset, 0, NULL, cipher, sector_size, 0);
+ if (!jobj_segment)
+ goto err;
+
+ if (integrity) {
+ jobj_integrity = json_object_new_object();
+ json_object_object_add(jobj_integrity, "type", json_object_new_string(integrity));
+ json_object_object_add(jobj_integrity, "journal_encryption", json_object_new_string("none"));
+ json_object_object_add(jobj_integrity, "journal_integrity", json_object_new_string("none"));
+ json_object_object_add(jobj_segment, "integrity", jobj_integrity);
+ }
+
+ json_object_object_add_by_uint(jobj_segments, 0, jobj_segment);
+
+ json_object_object_add(jobj_config, "json_size", crypt_jobj_new_uint64(metadata_size - LUKS2_HDR_BIN_LEN));
+ json_object_object_add(jobj_config, "keyslots_size", crypt_jobj_new_uint64(keyslots_size));
+
+ JSON_DBG(cd, hdr->jobj, "Header JSON:");
+ return 0;
+err:
+ json_object_put(hdr->jobj);
+ hdr->jobj = NULL;
+ return -EINVAL;
+}
+
+int LUKS2_wipe_header_areas(struct crypt_device *cd,
+ struct luks2_hdr *hdr, bool detached_header)
+{
+ int r;
+ uint64_t offset, length;
+ size_t wipe_block;
+
+ /* Wipe complete header, keyslots and padding areas with zeroes. */
+ offset = 0;
+ length = LUKS2_get_data_offset(hdr) * SECTOR_SIZE;
+ wipe_block = 1024 * 1024;
+
+ if (LUKS2_hdr_validate(cd, hdr->jobj, hdr->hdr_size - LUKS2_HDR_BIN_LEN))
+ return -EINVAL;
+
+ /* On detached header wipe at least the first 4k */
+ if (detached_header) {
+ length = 4096;
+ wipe_block = 4096;
+ }
+
+ r = device_check_size(cd, crypt_metadata_device(cd), length, 1);
+ if (r)
+ return r;
+
+ log_dbg(cd, "Wiping LUKS areas (0x%06" PRIx64 " - 0x%06" PRIx64") with zeroes.",
+ offset, length + offset);
+
+ r = crypt_wipe_device(cd, crypt_metadata_device(cd), CRYPT_WIPE_ZERO,
+ offset, length, wipe_block, NULL, NULL);
+ if (r < 0)
+ return r;
+
+ /* Wipe keyslot area */
+ wipe_block = 1024 * 1024;
+ offset = get_min_offset(hdr);
+ length = LUKS2_keyslots_size(hdr);
+
+ log_dbg(cd, "Wiping keyslots area (0x%06" PRIx64 " - 0x%06" PRIx64") with random data.",
+ offset, length + offset);
+
+ return crypt_wipe_device(cd, crypt_metadata_device(cd), CRYPT_WIPE_RANDOM,
+ offset, length, wipe_block, NULL, NULL);
+}
+
+int LUKS2_set_keyslots_size(struct luks2_hdr *hdr, uint64_t data_offset)
+{
+ json_object *jobj_config;
+ uint64_t keyslots_size;
+
+ if (data_offset < get_min_offset(hdr))
+ return 1;
+
+ keyslots_size = data_offset - get_min_offset(hdr);
+
+ /* keep keyslots_size reasonable for custom data alignments */
+ if (keyslots_size > LUKS2_MAX_KEYSLOTS_SIZE)
+ keyslots_size = LUKS2_MAX_KEYSLOTS_SIZE;
+
+ /* keyslots size has to be 4 KiB aligned */
+ keyslots_size -= (keyslots_size % 4096);
+
+ if (!json_object_object_get_ex(hdr->jobj, "config", &jobj_config))
+ return 1;
+
+ json_object_object_add(jobj_config, "keyslots_size", crypt_jobj_new_uint64(keyslots_size));
+ return 0;
+}
diff --git a/lib/luks2/luks2_json_metadata.c b/lib/luks2/luks2_json_metadata.c
new file mode 100644
index 0000000..4771f04
--- /dev/null
+++ b/lib/luks2/luks2_json_metadata.c
@@ -0,0 +1,2874 @@
+/*
+ * LUKS - Linux Unified Key Setup v2
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ * Copyright (C) 2015-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+#include "../integrity/integrity.h"
+#include <ctype.h>
+#include <uuid/uuid.h>
+
+#define LUKS_STRIPES 4000
+
+struct interval {
+ uint64_t offset;
+ uint64_t length;
+};
+
+void hexprint_base64(struct crypt_device *cd, json_object *jobj,
+ const char *sep, const char *line_sep)
+{
+ char *buf = NULL;
+ size_t buf_len;
+ unsigned int i;
+
+ if (crypt_base64_decode(&buf, &buf_len, json_object_get_string(jobj),
+ json_object_get_string_len(jobj)))
+ return;
+
+ for (i = 0; i < buf_len; i++) {
+ if (i && !(i % 16))
+ log_std(cd, "\n\t%s", line_sep);
+ log_std(cd, "%02hhx%s", buf[i], sep);
+ }
+ log_std(cd, "\n");
+ free(buf);
+}
+
+void JSON_DBG(struct crypt_device *cd, json_object *jobj, const char *desc)
+{
+ if (desc)
+ crypt_log(cd, CRYPT_LOG_DEBUG_JSON, desc);
+ crypt_log(cd, CRYPT_LOG_DEBUG_JSON, json_object_to_json_string_ext(jobj,
+ JSON_C_TO_STRING_PRETTY | JSON_C_TO_STRING_NOSLASHESCAPE));
+}
+
+/*
+ * JSON array helpers
+ */
+struct json_object *LUKS2_array_jobj(struct json_object *array, const char *num)
+{
+ struct json_object *jobj1;
+ int i;
+
+ for (i = 0; i < (int) json_object_array_length(array); i++) {
+ jobj1 = json_object_array_get_idx(array, i);
+ if (!strcmp(num, json_object_get_string(jobj1)))
+ return jobj1;
+ }
+
+ return NULL;
+}
+
+struct json_object *LUKS2_array_remove(struct json_object *array, const char *num)
+{
+ struct json_object *jobj1, *jobj_removing = NULL, *array_new;
+ int i;
+
+ jobj_removing = LUKS2_array_jobj(array, num);
+ if (!jobj_removing)
+ return NULL;
+
+ /* Create new array without jobj_removing. */
+ array_new = json_object_new_array();
+ for (i = 0; i < (int) json_object_array_length(array); i++) {
+ jobj1 = json_object_array_get_idx(array, i);
+ if (jobj1 != jobj_removing)
+ json_object_array_add(array_new, json_object_get(jobj1));
+ }
+
+ return array_new;
+}
+
+/*
+ * JSON struct access helpers
+ */
+json_object *LUKS2_get_keyslot_jobj(struct luks2_hdr *hdr, int keyslot)
+{
+ json_object *jobj1, *jobj2;
+ char keyslot_name[16];
+
+ if (!hdr || keyslot < 0)
+ return NULL;
+
+ if (snprintf(keyslot_name, sizeof(keyslot_name), "%u", keyslot) < 1)
+ return NULL;
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj1))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj1, keyslot_name, &jobj2))
+ return NULL;
+
+ return jobj2;
+}
+
+json_object *LUKS2_get_tokens_jobj(struct luks2_hdr *hdr)
+{
+ json_object *jobj_tokens;
+
+ if (!hdr || !json_object_object_get_ex(hdr->jobj, "tokens", &jobj_tokens))
+ return NULL;
+
+ return jobj_tokens;
+}
+
+json_object *LUKS2_get_token_jobj(struct luks2_hdr *hdr, int token)
+{
+ json_object *jobj1, *jobj2;
+ char token_name[16];
+
+ if (!hdr || token < 0)
+ return NULL;
+
+ jobj1 = LUKS2_get_tokens_jobj(hdr);
+ if (!jobj1)
+ return NULL;
+
+ if (snprintf(token_name, sizeof(token_name), "%u", token) < 1)
+ return NULL;
+
+ json_object_object_get_ex(jobj1, token_name, &jobj2);
+ return jobj2;
+}
+
+json_object *LUKS2_get_digest_jobj(struct luks2_hdr *hdr, int digest)
+{
+ json_object *jobj1, *jobj2;
+ char digest_name[16];
+
+ if (!hdr || digest < 0)
+ return NULL;
+
+ if (snprintf(digest_name, sizeof(digest_name), "%u", digest) < 1)
+ return NULL;
+
+ if (!json_object_object_get_ex(hdr->jobj, "digests", &jobj1))
+ return NULL;
+
+ json_object_object_get_ex(jobj1, digest_name, &jobj2);
+ return jobj2;
+}
+
+static json_object *json_get_segments_jobj(json_object *hdr_jobj)
+{
+ json_object *jobj_segments;
+
+ if (!hdr_jobj || !json_object_object_get_ex(hdr_jobj, "segments", &jobj_segments))
+ return NULL;
+
+ return jobj_segments;
+}
+
+json_object *LUKS2_get_segment_jobj(struct luks2_hdr *hdr, int segment)
+{
+ if (!hdr)
+ return NULL;
+
+ if (segment == CRYPT_DEFAULT_SEGMENT)
+ segment = LUKS2_get_default_segment(hdr);
+
+ return json_segments_get_segment(json_get_segments_jobj(hdr->jobj), segment);
+}
+
+json_object *LUKS2_get_segments_jobj(struct luks2_hdr *hdr)
+{
+ return hdr ? json_get_segments_jobj(hdr->jobj) : NULL;
+}
+
+int LUKS2_segments_count(struct luks2_hdr *hdr)
+{
+ if (!hdr)
+ return -EINVAL;
+
+ return json_segments_count(LUKS2_get_segments_jobj(hdr));
+}
+
+int LUKS2_get_default_segment(struct luks2_hdr *hdr)
+{
+ int s = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
+ if (s >= 0)
+ return s;
+
+ if (LUKS2_segments_count(hdr) >= 1)
+ return 0;
+
+ return -EINVAL;
+}
+
+/*
+ * json_type_int needs to be validated first.
+ * See validate_json_uint32()
+ */
+uint32_t crypt_jobj_get_uint32(json_object *jobj)
+{
+ return json_object_get_int64(jobj);
+}
+
+/* jobj has to be json_type_string and numbered */
+static bool json_str_to_uint64(json_object *jobj, uint64_t *value)
+{
+ char *endptr;
+ unsigned long long tmp;
+
+ errno = 0;
+ tmp = strtoull(json_object_get_string(jobj), &endptr, 10);
+ if (*endptr || errno) {
+ *value = 0;
+ return false;
+ }
+
+ *value = tmp;
+ return true;
+}
+
+uint64_t crypt_jobj_get_uint64(json_object *jobj)
+{
+ uint64_t r;
+ json_str_to_uint64(jobj, &r);
+ return r;
+}
+
+json_object *crypt_jobj_new_uint64(uint64_t value)
+{
+ /* 18446744073709551615 */
+ char num[21];
+ int r;
+ json_object *jobj;
+
+ r = snprintf(num, sizeof(num), "%" PRIu64, value);
+ if (r < 0 || (size_t)r >= sizeof(num))
+ return NULL;
+
+ jobj = json_object_new_string(num);
+ return jobj;
+}
+
+/*
+ * Validate helpers
+ */
+static bool numbered(struct crypt_device *cd, const char *name, const char *key)
+{
+ int i;
+
+ for (i = 0; key[i]; i++)
+ if (!isdigit(key[i])) {
+ log_dbg(cd, "%s \"%s\" is not in numbered form.", name, key);
+ return false;
+ }
+ return true;
+}
+
+json_object *json_contains(struct crypt_device *cd, json_object *jobj, const char *name,
+ const char *section, const char *key, json_type type)
+{
+ json_object *sobj;
+
+ if (!json_object_object_get_ex(jobj, key, &sobj) ||
+ !json_object_is_type(sobj, type)) {
+ log_dbg(cd, "%s \"%s\" is missing \"%s\" (%s) specification.",
+ section, name, key, json_type_to_name(type));
+ return NULL;
+ }
+
+ return sobj;
+}
+
+json_object *json_contains_string(struct crypt_device *cd, json_object *jobj,
+ const char *name, const char *section, const char *key)
+{
+ json_object *sobj = json_contains(cd, jobj, name, section, key, json_type_string);
+
+ if (!sobj)
+ return NULL;
+
+ if (strlen(json_object_get_string(sobj)) < 1)
+ return NULL;
+
+ return sobj;
+}
+
+bool validate_json_uint32(json_object *jobj)
+{
+ int64_t tmp;
+
+ errno = 0;
+ tmp = json_object_get_int64(jobj);
+
+ return (errno || tmp < 0 || tmp > UINT32_MAX) ? false : true;
+}
+
+static bool validate_keyslots_array(struct crypt_device *cd, json_object *jarr, json_object *jobj_keys)
+{
+ json_object *jobj;
+ int i = 0, length = (int) json_object_array_length(jarr);
+
+ while (i < length) {
+ jobj = json_object_array_get_idx(jarr, i);
+ if (!json_object_is_type(jobj, json_type_string)) {
+ log_dbg(cd, "Illegal value type in keyslots array at index %d.", i);
+ return false;
+ }
+
+ if (!json_contains(cd, jobj_keys, "", "Keyslots section",
+ json_object_get_string(jobj), json_type_object))
+ return false;
+
+ i++;
+ }
+
+ return true;
+}
+
+static bool validate_segments_array(struct crypt_device *cd, json_object *jarr, json_object *jobj_segments)
+{
+ json_object *jobj;
+ int i = 0, length = (int) json_object_array_length(jarr);
+
+ while (i < length) {
+ jobj = json_object_array_get_idx(jarr, i);
+ if (!json_object_is_type(jobj, json_type_string)) {
+ log_dbg(cd, "Illegal value type in segments array at index %d.", i);
+ return false;
+ }
+
+ if (!json_contains(cd, jobj_segments, "", "Segments section",
+ json_object_get_string(jobj), json_type_object))
+ return false;
+
+ i++;
+ }
+
+ return true;
+}
+
+static bool segment_has_digest(const char *segment_name, json_object *jobj_digests)
+{
+ json_object *jobj_segments;
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ UNUSED(key);
+ json_object_object_get_ex(val, "segments", &jobj_segments);
+ if (LUKS2_array_jobj(jobj_segments, segment_name))
+ return true;
+ }
+
+ return false;
+}
+
+
+static bool validate_intervals(struct crypt_device *cd,
+ int length, const struct interval *ix,
+ uint64_t metadata_size, uint64_t keyslots_area_end)
+{
+ int j, i = 0;
+
+ while (i < length) {
+ /* Offset cannot be inside primary or secondary JSON area */
+ if (ix[i].offset < 2 * metadata_size) {
+ log_dbg(cd, "Illegal area offset: %" PRIu64 ".", ix[i].offset);
+ return false;
+ }
+
+ if (!ix[i].length) {
+ log_dbg(cd, "Area length must be greater than zero.");
+ return false;
+ }
+
+ if (ix[i].offset > (UINT64_MAX - ix[i].length)) {
+ log_dbg(cd, "Interval offset+length overflow.");
+ return false;
+ }
+
+ if ((ix[i].offset + ix[i].length) > keyslots_area_end) {
+ log_dbg(cd, "Area [%" PRIu64 ", %" PRIu64 "] overflows binary keyslots area (ends at offset: %" PRIu64 ").",
+ ix[i].offset, ix[i].offset + ix[i].length, keyslots_area_end);
+ return false;
+ }
+
+ for (j = 0; j < length; j++) {
+ if (i == j)
+ continue;
+
+ if (ix[j].offset > (UINT64_MAX - ix[j].length)) {
+ log_dbg(cd, "Interval offset+length overflow.");
+ return false;
+ }
+
+ if ((ix[i].offset >= ix[j].offset) && (ix[i].offset < (ix[j].offset + ix[j].length))) {
+ log_dbg(cd, "Overlapping areas [%" PRIu64 ",%" PRIu64 "] and [%" PRIu64 ",%" PRIu64 "].",
+ ix[i].offset, ix[i].offset + ix[i].length,
+ ix[j].offset, ix[j].offset + ix[j].length);
+ return false;
+ }
+ }
+
+ i++;
+ }
+
+ return true;
+}
+
+static int LUKS2_keyslot_validate(struct crypt_device *cd, json_object *hdr_keyslot, const char *key)
+{
+ json_object *jobj_key_size;
+
+ if (!json_contains_string(cd, hdr_keyslot, key, "Keyslot", "type"))
+ return 1;
+ if (!(jobj_key_size = json_contains(cd, hdr_keyslot, key, "Keyslot", "key_size", json_type_int)))
+ return 1;
+
+ /* enforce uint32_t type */
+ if (!validate_json_uint32(jobj_key_size)) {
+ log_dbg(cd, "Illegal field \"key_size\":%s.",
+ json_object_get_string(jobj_key_size));
+ return 1;
+ }
+
+ return 0;
+}
+
+int LUKS2_token_validate(struct crypt_device *cd,
+ json_object *hdr_jobj, json_object *jobj_token, const char *key)
+{
+ json_object *jarr, *jobj_keyslots;
+
+ /* keyslots are not yet validated, but we need to know token doesn't reference missing keyslot */
+ if (!json_object_object_get_ex(hdr_jobj, "keyslots", &jobj_keyslots))
+ return 1;
+
+ if (!json_contains_string(cd, jobj_token, key, "Token", "type"))
+ return 1;
+
+ jarr = json_contains(cd, jobj_token, key, "Token", "keyslots", json_type_array);
+ if (!jarr)
+ return 1;
+
+ if (!validate_keyslots_array(cd, jarr, jobj_keyslots))
+ return 1;
+
+ return 0;
+}
+
+static int hdr_validate_json_size(struct crypt_device *cd, json_object *hdr_jobj, uint64_t hdr_json_size)
+{
+ json_object *jobj, *jobj1;
+ const char *json;
+ uint64_t json_area_size, json_size;
+
+ json_object_object_get_ex(hdr_jobj, "config", &jobj);
+ json_object_object_get_ex(jobj, "json_size", &jobj1);
+
+ json = json_object_to_json_string_ext(hdr_jobj,
+ JSON_C_TO_STRING_PLAIN | JSON_C_TO_STRING_NOSLASHESCAPE);
+ json_area_size = crypt_jobj_get_uint64(jobj1);
+ json_size = (uint64_t)strlen(json);
+
+ if (hdr_json_size != json_area_size) {
+ log_dbg(cd, "JSON area size does not match value in binary header.");
+ return 1;
+ }
+
+ if (json_size > json_area_size) {
+ log_dbg(cd, "JSON does not fit in the designated area.");
+ return 1;
+ }
+
+ return 0;
+}
+
+int LUKS2_check_json_size(struct crypt_device *cd, const struct luks2_hdr *hdr)
+{
+ return hdr_validate_json_size(cd, hdr->jobj, hdr->hdr_size - LUKS2_HDR_BIN_LEN);
+}
+
+static int hdr_validate_keyslots(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ json_object *jobj;
+
+ if (!(jobj = json_contains(cd, hdr_jobj, "", "JSON area", "keyslots", json_type_object)))
+ return 1;
+
+ json_object_object_foreach(jobj, key, val) {
+ if (!numbered(cd, "Keyslot", key))
+ return 1;
+ if (LUKS2_keyslot_validate(cd, val, key))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int hdr_validate_tokens(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ json_object *jobj;
+
+ if (!(jobj = json_contains(cd, hdr_jobj, "", "JSON area", "tokens", json_type_object)))
+ return 1;
+
+ json_object_object_foreach(jobj, key, val) {
+ if (!numbered(cd, "Token", key))
+ return 1;
+ if (LUKS2_token_validate(cd, hdr_jobj, val, key))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int hdr_validate_crypt_segment(struct crypt_device *cd, json_object *jobj,
+ const char *key, json_object *jobj_digests,
+ uint64_t size)
+{
+ int r;
+ json_object *jobj_ivoffset, *jobj_sector_size, *jobj_integrity;
+ uint32_t sector_size;
+ uint64_t ivoffset;
+
+ if (!(jobj_ivoffset = json_contains_string(cd, jobj, key, "Segment", "iv_tweak")) ||
+ !json_contains_string(cd, jobj, key, "Segment", "encryption") ||
+ !(jobj_sector_size = json_contains(cd, jobj, key, "Segment", "sector_size", json_type_int)))
+ return 1;
+
+ /* integrity */
+ if (json_object_object_get_ex(jobj, "integrity", &jobj_integrity)) {
+ if (!json_contains(cd, jobj, key, "Segment", "integrity", json_type_object) ||
+ !json_contains_string(cd, jobj_integrity, key, "Segment integrity", "type") ||
+ !json_contains_string(cd, jobj_integrity, key, "Segment integrity", "journal_encryption") ||
+ !json_contains_string(cd, jobj_integrity, key, "Segment integrity", "journal_integrity"))
+ return 1;
+ }
+
+ /* enforce uint32_t type */
+ if (!validate_json_uint32(jobj_sector_size)) {
+ log_dbg(cd, "Illegal field \"sector_size\":%s.",
+ json_object_get_string(jobj_sector_size));
+ return 1;
+ }
+
+ sector_size = crypt_jobj_get_uint32(jobj_sector_size);
+ if (!sector_size || MISALIGNED_512(sector_size)) {
+ log_dbg(cd, "Illegal sector size: %" PRIu32, sector_size);
+ return 1;
+ }
+
+ if (!numbered(cd, "iv_tweak", json_object_get_string(jobj_ivoffset)) ||
+ !json_str_to_uint64(jobj_ivoffset, &ivoffset)) {
+ log_dbg(cd, "Illegal iv_tweak value.");
+ return 1;
+ }
+
+ if (size % sector_size) {
+ log_dbg(cd, "Size field has to be aligned to sector size: %" PRIu32, sector_size);
+ return 1;
+ }
+
+ r = segment_has_digest(key, jobj_digests);
+
+ if (!r)
+ log_dbg(cd, "Crypt segment %s not assigned to key digest.", key);
+
+ return !r;
+}
+
+static bool validate_segment_intervals(struct crypt_device *cd,
+ int length, const struct interval *ix)
+{
+ int j, i = 0;
+
+ while (i < length) {
+ if (ix[i].length == UINT64_MAX && (i != (length - 1))) {
+ log_dbg(cd, "Only last regular segment is allowed to have 'dynamic' size.");
+ return false;
+ }
+
+ for (j = 0; j < length; j++) {
+ if (i == j)
+ continue;
+
+ if (ix[j].length != UINT64_MAX && ix[j].offset > (UINT64_MAX - ix[j].length)) {
+ log_dbg(cd, "Interval offset+length overflow.");
+ return false;
+ }
+
+ if ((ix[i].offset >= ix[j].offset) && (ix[j].length == UINT64_MAX || (ix[i].offset < (ix[j].offset + ix[j].length)))) {
+ log_dbg(cd, "Overlapping segments [%" PRIu64 ",%" PRIu64 "]%s and [%" PRIu64 ",%" PRIu64 "]%s.",
+ ix[i].offset, ix[i].offset + ix[i].length, ix[i].length == UINT64_MAX ? "(dynamic)" : "",
+ ix[j].offset, ix[j].offset + ix[j].length, ix[j].length == UINT64_MAX ? "(dynamic)" : "");
+ return false;
+ }
+ }
+
+ i++;
+ }
+
+ return true;
+}
+
+static int reqs_unknown(uint32_t reqs)
+{
+ return reqs & CRYPT_REQUIREMENT_UNKNOWN;
+}
+
+static int reqs_reencrypt(uint32_t reqs)
+{
+ return reqs & CRYPT_REQUIREMENT_OFFLINE_REENCRYPT;
+}
+
+static int reqs_reencrypt_online(uint32_t reqs)
+{
+ return reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
+}
+
+/*
+ * Config section requirements object must be valid.
+ * Also general segments section must be validated first.
+ */
+static int validate_reencrypt_segments(struct crypt_device *cd, json_object *hdr_jobj, json_object *jobj_segments, int first_backup, int segments_count)
+{
+ json_object *jobj, *jobj_backup_previous = NULL, *jobj_backup_final = NULL;
+ uint32_t reqs;
+ int i, r;
+ struct luks2_hdr dummy = {
+ .jobj = hdr_jobj
+ };
+
+ r = LUKS2_config_get_requirements(cd, &dummy, &reqs);
+ if (r)
+ return 1;
+
+ if (reqs_reencrypt_online(reqs)) {
+ for (i = first_backup; i < segments_count; i++) {
+ jobj = json_segments_get_segment(jobj_segments, i);
+ if (!jobj)
+ return 1;
+ if (json_segment_contains_flag(jobj, "backup-final", 0))
+ jobj_backup_final = jobj;
+ else if (json_segment_contains_flag(jobj, "backup-previous", 0))
+ jobj_backup_previous = jobj;
+ }
+
+ if (!jobj_backup_final || !jobj_backup_previous) {
+ log_dbg(cd, "Backup segment is missing.");
+ return 1;
+ }
+
+ for (i = 0; i < first_backup; i++) {
+ jobj = json_segments_get_segment(jobj_segments, i);
+ if (!jobj)
+ return 1;
+
+ if (json_segment_contains_flag(jobj, "in-reencryption", 0)) {
+ if (!json_segment_cmp(jobj, jobj_backup_final)) {
+ log_dbg(cd, "Segment in reencryption does not match backup final segment.");
+ return 1;
+ }
+ continue;
+ }
+
+ if (!json_segment_cmp(jobj, jobj_backup_final) &&
+ !json_segment_cmp(jobj, jobj_backup_previous)) {
+ log_dbg(cd, "Segment does not match neither backup final or backup previous segment.");
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ json_object *jobj_segments, *jobj_digests, *jobj_offset, *jobj_size, *jobj_type, *jobj_flags, *jobj;
+ uint64_t offset, size;
+ int i, r, count, first_backup = -1;
+ struct interval *intervals = NULL;
+
+ if (!(jobj_segments = json_contains(cd, hdr_jobj, "", "JSON area", "segments", json_type_object)))
+ return 1;
+
+ count = json_object_object_length(jobj_segments);
+ if (count < 1) {
+ log_dbg(cd, "Empty segments section.");
+ return 1;
+ }
+
+ /* digests should already be validated */
+ if (!json_object_object_get_ex(hdr_jobj, "digests", &jobj_digests))
+ return 1;
+
+ json_object_object_foreach(jobj_segments, key, val) {
+ if (!numbered(cd, "Segment", key))
+ return 1;
+
+ /* those fields are mandatory for all segment types */
+ if (!(jobj_type = json_contains_string(cd, val, key, "Segment", "type")) ||
+ !(jobj_offset = json_contains_string(cd, val, key, "Segment", "offset")) ||
+ !(jobj_size = json_contains_string(cd, val, key, "Segment", "size")))
+ return 1;
+
+ if (!numbered(cd, "offset", json_object_get_string(jobj_offset)))
+ return 1;
+
+ if (!json_str_to_uint64(jobj_offset, &offset)) {
+ log_dbg(cd, "Illegal segment offset value.");
+ return 1;
+ }
+
+ /* size "dynamic" means whole device starting at 'offset' */
+ if (strcmp(json_object_get_string(jobj_size), "dynamic")) {
+ if (!numbered(cd, "size", json_object_get_string(jobj_size)))
+ return 1;
+ if (!json_str_to_uint64(jobj_size, &size) || !size) {
+ log_dbg(cd, "Illegal segment size value.");
+ return 1;
+ }
+ } else
+ size = 0;
+
+ /* all device-mapper devices are aligned to 512 sector size */
+ if (MISALIGNED_512(offset)) {
+ log_dbg(cd, "Offset field has to be aligned to sector size: %" PRIu32, SECTOR_SIZE);
+ return 1;
+ }
+ if (MISALIGNED_512(size)) {
+ log_dbg(cd, "Size field has to be aligned to sector size: %" PRIu32, SECTOR_SIZE);
+ return 1;
+ }
+
+ /* flags array is optional and must contain strings */
+ if (json_object_object_get_ex(val, "flags", NULL)) {
+ if (!(jobj_flags = json_contains(cd, val, key, "Segment", "flags", json_type_array)))
+ return 1;
+ for (i = 0; i < (int) json_object_array_length(jobj_flags); i++)
+ if (!json_object_is_type(json_object_array_get_idx(jobj_flags, i), json_type_string))
+ return 1;
+ }
+
+ i = atoi(key);
+ if (json_segment_is_backup(val)) {
+ if (first_backup < 0 || i < first_backup)
+ first_backup = i;
+ } else {
+ if ((first_backup >= 0) && i >= first_backup) {
+ log_dbg(cd, "Regular segment at %d is behind backup segment at %d", i, first_backup);
+ return 1;
+ }
+ }
+
+ /* crypt */
+ if (!strcmp(json_object_get_string(jobj_type), "crypt") &&
+ hdr_validate_crypt_segment(cd, val, key, jobj_digests, size))
+ return 1;
+ }
+
+ if (first_backup == 0) {
+ log_dbg(cd, "No regular segment.");
+ return 1;
+ }
+
+ /* avoid needlessly large allocation when first backup segment is invalid */
+ if (first_backup >= count) {
+ log_dbg(cd, "Gap between last regular segment and backup segment at key %d.", first_backup);
+ return 1;
+ }
+
+ if (first_backup < 0)
+ first_backup = count;
+
+ if ((size_t)first_backup < SIZE_MAX / sizeof(*intervals))
+ intervals = malloc(first_backup * sizeof(*intervals));
+
+ if (!intervals) {
+ log_dbg(cd, "Not enough memory.");
+ return 1;
+ }
+
+ for (i = 0; i < first_backup; i++) {
+ jobj = json_segments_get_segment(jobj_segments, i);
+ if (!jobj) {
+ log_dbg(cd, "Gap at key %d in segments object.", i);
+ free(intervals);
+ return 1;
+ }
+ intervals[i].offset = json_segment_get_offset(jobj, 0);
+ intervals[i].length = json_segment_get_size(jobj, 0) ?: UINT64_MAX;
+ }
+
+ r = !validate_segment_intervals(cd, first_backup, intervals);
+ free(intervals);
+
+ if (r)
+ return 1;
+
+ for (; i < count; i++) {
+ if (!json_segments_get_segment(jobj_segments, i)) {
+ log_dbg(cd, "Gap at key %d in segments object.", i);
+ return 1;
+ }
+ }
+
+ return validate_reencrypt_segments(cd, hdr_jobj, jobj_segments, first_backup, count);
+}
+
+static uint64_t LUKS2_metadata_size_jobj(json_object *jobj)
+{
+ json_object *jobj1, *jobj2;
+ uint64_t json_size;
+
+ json_object_object_get_ex(jobj, "config", &jobj1);
+ json_object_object_get_ex(jobj1, "json_size", &jobj2);
+ json_str_to_uint64(jobj2, &json_size);
+
+ return json_size + LUKS2_HDR_BIN_LEN;
+}
+
+uint64_t LUKS2_metadata_size(struct luks2_hdr *hdr)
+{
+ return LUKS2_metadata_size_jobj(hdr->jobj);
+}
+
+static int hdr_validate_areas(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ struct interval *intervals;
+ json_object *jobj_keyslots, *jobj_offset, *jobj_length, *jobj_segments, *jobj_area;
+ int length, ret, i = 0;
+ uint64_t metadata_size;
+
+ if (!json_object_object_get_ex(hdr_jobj, "keyslots", &jobj_keyslots))
+ return 1;
+
+ /* segments are already validated */
+ if (!json_object_object_get_ex(hdr_jobj, "segments", &jobj_segments))
+ return 1;
+
+ /* config is already validated */
+ metadata_size = LUKS2_metadata_size_jobj(hdr_jobj);
+
+ length = json_object_object_length(jobj_keyslots);
+
+ /* Empty section */
+ if (length == 0)
+ return 0;
+
+ if (length < 0) {
+ log_dbg(cd, "Invalid keyslot areas specification.");
+ return 1;
+ }
+
+ intervals = malloc(length * sizeof(*intervals));
+ if (!intervals) {
+ log_dbg(cd, "Not enough memory.");
+ return -ENOMEM;
+ }
+
+ json_object_object_foreach(jobj_keyslots, key, val) {
+
+ if (!(jobj_area = json_contains(cd, val, key, "Keyslot", "area", json_type_object)) ||
+ !json_contains_string(cd, jobj_area, key, "Keyslot area", "type") ||
+ !(jobj_offset = json_contains_string(cd, jobj_area, key, "Keyslot", "offset")) ||
+ !(jobj_length = json_contains_string(cd, jobj_area, key, "Keyslot", "size")) ||
+ !numbered(cd, "offset", json_object_get_string(jobj_offset)) ||
+ !numbered(cd, "size", json_object_get_string(jobj_length))) {
+ free(intervals);
+ return 1;
+ }
+
+ /* rule out values > UINT64_MAX */
+ if (!json_str_to_uint64(jobj_offset, &intervals[i].offset) ||
+ !json_str_to_uint64(jobj_length, &intervals[i].length)) {
+ log_dbg(cd, "Illegal keyslot area values.");
+ free(intervals);
+ return 1;
+ }
+
+ i++;
+ }
+
+ if (length != i) {
+ free(intervals);
+ return 1;
+ }
+
+ ret = validate_intervals(cd, length, intervals, metadata_size, LUKS2_hdr_and_areas_size_jobj(hdr_jobj)) ? 0 : 1;
+
+ free(intervals);
+
+ return ret;
+}
+
+static int hdr_validate_digests(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ json_object *jarr_keys, *jarr_segs, *jobj, *jobj_keyslots, *jobj_segments;
+
+ if (!(jobj = json_contains(cd, hdr_jobj, "", "JSON area", "digests", json_type_object)))
+ return 1;
+
+ /* keyslots are not yet validated, but we need to know digest doesn't reference missing keyslot */
+ if (!(jobj_keyslots = json_contains(cd, hdr_jobj, "", "JSON area", "keyslots", json_type_object)))
+ return 1;
+
+ /* segments are not yet validated, but we need to know digest doesn't reference missing segment */
+ if (!(jobj_segments = json_contains(cd, hdr_jobj, "", "JSON area", "segments", json_type_object)))
+ return 1;
+
+ json_object_object_foreach(jobj, key, val) {
+ if (!numbered(cd, "Digest", key))
+ return 1;
+
+ if (!json_contains_string(cd, val, key, "Digest", "type") ||
+ !(jarr_keys = json_contains(cd, val, key, "Digest", "keyslots", json_type_array)) ||
+ !(jarr_segs = json_contains(cd, val, key, "Digest", "segments", json_type_array)))
+ return 1;
+
+ if (!validate_keyslots_array(cd, jarr_keys, jobj_keyslots))
+ return 1;
+ if (!validate_segments_array(cd, jarr_segs, jobj_segments))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* requirements being validated in stand-alone routine */
+static int hdr_validate_config(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ json_object *jobj_config, *jobj;
+ int i;
+ uint64_t keyslots_size, metadata_size, segment_offset;
+
+ if (!(jobj_config = json_contains(cd, hdr_jobj, "", "JSON area", "config", json_type_object)))
+ return 1;
+
+ if (!(jobj = json_contains_string(cd, jobj_config, "section", "Config", "json_size")))
+ return 1;
+ if (!json_str_to_uint64(jobj, &metadata_size)) {
+ log_dbg(cd, "Illegal config json_size value.");
+ return 1;
+ }
+
+ /* single metadata instance is assembled from json area size plus
+ * binary header size */
+ metadata_size += LUKS2_HDR_BIN_LEN;
+
+ if (!(jobj = json_contains_string(cd, jobj_config, "section", "Config", "keyslots_size")))
+ return 1;
+ if(!json_str_to_uint64(jobj, &keyslots_size)) {
+ log_dbg(cd, "Illegal config keyslot_size value.");
+ return 1;
+ }
+
+ if (LUKS2_check_metadata_area_size(metadata_size)) {
+ log_dbg(cd, "Unsupported LUKS2 header size (%" PRIu64 ").", metadata_size);
+ return 1;
+ }
+
+ if (LUKS2_check_keyslots_area_size(keyslots_size)) {
+ log_dbg(cd, "Unsupported LUKS2 keyslots size (%" PRIu64 ").", keyslots_size);
+ return 1;
+ }
+
+ /*
+ * validate keyslots_size fits in between (2 * metadata_size) and first
+ * segment_offset (except detached header)
+ */
+ segment_offset = json_segments_get_minimal_offset(json_get_segments_jobj(hdr_jobj), 0);
+ if (segment_offset &&
+ (segment_offset < keyslots_size ||
+ (segment_offset - keyslots_size) < (2 * metadata_size))) {
+ log_dbg(cd, "keyslots_size is too large %" PRIu64 " (bytes). Data offset: %" PRIu64
+ ", keyslots offset: %" PRIu64, keyslots_size, segment_offset, 2 * metadata_size);
+ return 1;
+ }
+
+ /* Flags array is optional */
+ if (json_object_object_get_ex(jobj_config, "flags", &jobj)) {
+ if (!json_contains(cd, jobj_config, "section", "Config", "flags", json_type_array))
+ return 1;
+
+ /* All array members must be strings */
+ for (i = 0; i < (int) json_object_array_length(jobj); i++)
+ if (!json_object_is_type(json_object_array_get_idx(jobj, i), json_type_string))
+ return 1;
+ }
+
+ return 0;
+}
+
+static bool reencrypt_candidate_flag(const char *flag)
+{
+ const char *ptr;
+
+ assert(flag);
+
+ if (!strcmp(flag, "online-reencrypt"))
+ return true;
+
+ if (strncmp(flag, "online-reencrypt-v", 18))
+ return false;
+
+ ptr = flag + 18;
+ if (!*ptr)
+ return false;
+
+ while (*ptr) {
+ if (!isdigit(*ptr))
+ return false;
+ ptr++;
+ }
+
+ return true;
+}
+
+static int hdr_validate_requirements(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ int i;
+ json_object *jobj_config, *jobj, *jobj1;
+ unsigned online_reencrypt_flag = 0;
+
+ if (!(jobj_config = json_contains(cd, hdr_jobj, "", "JSON area", "config", json_type_object)))
+ return 1;
+
+ /* Requirements object is optional */
+ if (json_object_object_get_ex(jobj_config, "requirements", &jobj)) {
+ if (!json_contains(cd, jobj_config, "section", "Config", "requirements", json_type_object))
+ return 1;
+
+ /* Mandatory array is optional */
+ if (json_object_object_get_ex(jobj, "mandatory", &jobj1)) {
+ if (!json_contains(cd, jobj, "section", "Requirements", "mandatory", json_type_array))
+ return 1;
+
+ /* All array members must be strings */
+ for (i = 0; i < (int) json_object_array_length(jobj1); i++) {
+ if (!json_object_is_type(json_object_array_get_idx(jobj1, i), json_type_string))
+ return 1;
+
+ if (reencrypt_candidate_flag(json_object_get_string(json_object_array_get_idx(jobj1, i))))
+ online_reencrypt_flag++;
+
+ }
+ }
+ }
+
+ if (online_reencrypt_flag > 1) {
+ log_dbg(cd, "Multiple online reencryption requirement flags detected.");
+ return 1;
+ }
+
+ return 0;
+}
+
+int LUKS2_hdr_validate(struct crypt_device *cd, json_object *hdr_jobj, uint64_t json_size)
+{
+ struct {
+ int (*validate)(struct crypt_device *, json_object *);
+ } checks[] = {
+ { hdr_validate_requirements },
+ { hdr_validate_tokens },
+ { hdr_validate_digests },
+ { hdr_validate_segments },
+ { hdr_validate_keyslots },
+ { hdr_validate_config },
+ { hdr_validate_areas },
+ { NULL }
+ };
+ int i;
+
+ if (!hdr_jobj)
+ return 1;
+
+ for (i = 0; checks[i].validate; i++)
+ if (checks[i].validate && checks[i].validate(cd, hdr_jobj))
+ return 1;
+
+ if (hdr_validate_json_size(cd, hdr_jobj, json_size))
+ return 1;
+
+ /* validate keyslot implementations */
+ if (LUKS2_keyslots_validate(cd, hdr_jobj))
+ return 1;
+
+ return 0;
+}
+
+static bool hdr_json_free(json_object **jobj)
+{
+ assert(jobj);
+
+ if (json_object_put(*jobj))
+ *jobj = NULL;
+
+ return (*jobj == NULL);
+}
+
+static int hdr_update_copy_for_rollback(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ json_object **jobj_copy;
+
+ assert(hdr);
+ assert(hdr->jobj);
+
+ jobj_copy = (json_object **)&hdr->jobj_rollback;
+
+ if (!hdr_json_free(jobj_copy)) {
+ log_dbg(cd, "LUKS2 rollback metadata copy still in use");
+ return -EINVAL;
+ }
+
+ return json_object_copy(hdr->jobj, jobj_copy) ? -ENOMEM : 0;
+}
+
+/* FIXME: should we expose do_recovery parameter explicitly? */
+int LUKS2_hdr_read(struct crypt_device *cd, struct luks2_hdr *hdr, int repair)
+{
+ int r;
+
+ r = device_read_lock(cd, crypt_metadata_device(cd));
+ if (r) {
+ log_err(cd, _("Failed to acquire read lock on device %s."),
+ device_path(crypt_metadata_device(cd)));
+ return r;
+ }
+
+ r = LUKS2_disk_hdr_read(cd, hdr, crypt_metadata_device(cd), 1, !repair);
+ if (r == -EAGAIN) {
+ /* unlikely: auto-recovery is required and failed due to read lock being held */
+ device_read_unlock(cd, crypt_metadata_device(cd));
+
+ /* Do not use LUKS2_device_write lock. Recovery. */
+ r = device_write_lock(cd, crypt_metadata_device(cd));
+ if (r < 0) {
+ log_err(cd, _("Failed to acquire write lock on device %s."),
+ device_path(crypt_metadata_device(cd)));
+ return r;
+ }
+
+ r = LUKS2_disk_hdr_read(cd, hdr, crypt_metadata_device(cd), 1, !repair);
+
+ device_write_unlock(cd, crypt_metadata_device(cd));
+ } else
+ device_read_unlock(cd, crypt_metadata_device(cd));
+
+ if (!r && (r = hdr_update_copy_for_rollback(cd, hdr)))
+ log_dbg(cd, "Failed to update rollback LUKS2 metadata.");
+
+ return r;
+}
+
+static int hdr_cleanup_and_validate(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ LUKS2_digests_erase_unused(cd, hdr);
+
+ return LUKS2_hdr_validate(cd, hdr->jobj, hdr->hdr_size - LUKS2_HDR_BIN_LEN);
+}
+
+int LUKS2_hdr_write_force(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ int r;
+
+ if (hdr_cleanup_and_validate(cd, hdr))
+ return -EINVAL;
+
+ r = LUKS2_disk_hdr_write(cd, hdr, crypt_metadata_device(cd), false);
+
+ if (!r && (r = hdr_update_copy_for_rollback(cd, hdr)))
+ log_dbg(cd, "Failed to update rollback LUKS2 metadata.");
+
+ return r;
+}
+
+int LUKS2_hdr_write(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ int r;
+
+ if (hdr_cleanup_and_validate(cd, hdr))
+ return -EINVAL;
+
+ r = LUKS2_disk_hdr_write(cd, hdr, crypt_metadata_device(cd), true);
+
+ if (!r && (r = hdr_update_copy_for_rollback(cd, hdr)))
+ log_dbg(cd, "Failed to update rollback LUKS2 metadata.");
+
+ return r;
+}
+
+int LUKS2_hdr_rollback(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ json_object **jobj_copy;
+
+ assert(hdr->jobj_rollback);
+
+ log_dbg(cd, "Rolling back in-memory LUKS2 json metadata.");
+
+ jobj_copy = (json_object **)&hdr->jobj;
+
+ if (!hdr_json_free(jobj_copy)) {
+ log_dbg(cd, "LUKS2 header still in use");
+ return -EINVAL;
+ }
+
+ return json_object_copy(hdr->jobj_rollback, jobj_copy) ? -ENOMEM : 0;
+}
+
+int LUKS2_hdr_uuid(struct crypt_device *cd, struct luks2_hdr *hdr, const char *uuid)
+{
+ uuid_t partitionUuid;
+
+ if (uuid && uuid_parse(uuid, partitionUuid) == -1) {
+ log_err(cd, _("Wrong LUKS UUID format provided."));
+ return -EINVAL;
+ }
+ if (!uuid)
+ uuid_generate(partitionUuid);
+
+ uuid_unparse(partitionUuid, hdr->uuid);
+
+ return LUKS2_hdr_write(cd, hdr);
+}
+
+int LUKS2_hdr_labels(struct crypt_device *cd, struct luks2_hdr *hdr,
+ const char *label, const char *subsystem, int commit)
+{
+ //FIXME: check if the labels are the same and skip this.
+
+ memset(hdr->label, 0, LUKS2_LABEL_L);
+ if (label)
+ strncpy(hdr->label, label, LUKS2_LABEL_L-1);
+
+ memset(hdr->subsystem, 0, LUKS2_LABEL_L);
+ if (subsystem)
+ strncpy(hdr->subsystem, subsystem, LUKS2_LABEL_L-1);
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+void LUKS2_hdr_free(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ json_object **jobj;
+
+ assert(hdr);
+
+ jobj = (json_object **)&hdr->jobj;
+
+ if (!hdr_json_free(jobj))
+ log_dbg(cd, "LUKS2 header still in use");
+
+ jobj = (json_object **)&hdr->jobj_rollback;
+
+ if (!hdr_json_free(jobj))
+ log_dbg(cd, "LUKS2 rollback metadata copy still in use");
+}
+
+static uint64_t LUKS2_keyslots_size_jobj(json_object *jobj)
+{
+ json_object *jobj1, *jobj2;
+ uint64_t keyslots_size;
+
+ json_object_object_get_ex(jobj, "config", &jobj1);
+ json_object_object_get_ex(jobj1, "keyslots_size", &jobj2);
+ json_str_to_uint64(jobj2, &keyslots_size);
+
+ return keyslots_size;
+}
+
+uint64_t LUKS2_keyslots_size(struct luks2_hdr *hdr)
+{
+ return LUKS2_keyslots_size_jobj(hdr->jobj);
+}
+
+uint64_t LUKS2_hdr_and_areas_size_jobj(json_object *jobj)
+{
+ return 2 * LUKS2_metadata_size_jobj(jobj) + LUKS2_keyslots_size_jobj(jobj);
+}
+
+uint64_t LUKS2_hdr_and_areas_size(struct luks2_hdr *hdr)
+{
+ return LUKS2_hdr_and_areas_size_jobj(hdr->jobj);
+}
+
+int LUKS2_hdr_backup(struct crypt_device *cd, struct luks2_hdr *hdr,
+ const char *backup_file)
+{
+ struct device *device = crypt_metadata_device(cd);
+ int fd, devfd, r = 0;
+ ssize_t hdr_size;
+ ssize_t ret, buffer_size;
+ char *buffer = NULL;
+
+ hdr_size = LUKS2_hdr_and_areas_size(hdr);
+ buffer_size = size_round_up(hdr_size, crypt_getpagesize());
+
+ buffer = malloc(buffer_size);
+ if (!buffer)
+ return -ENOMEM;
+
+ log_dbg(cd, "Storing backup of header (%zu bytes).", hdr_size);
+ log_dbg(cd, "Output backup file size: %zu bytes.", buffer_size);
+
+ r = device_read_lock(cd, device);
+ if (r) {
+ log_err(cd, _("Failed to acquire read lock on device %s."),
+ device_path(crypt_metadata_device(cd)));
+ goto out;
+ }
+
+ devfd = device_open_locked(cd, device, O_RDONLY);
+ if (devfd < 0) {
+ device_read_unlock(cd, device);
+ log_err(cd, _("Device %s is not a valid LUKS device."), device_path(device));
+ r = (devfd == -1) ? -EINVAL : devfd;
+ goto out;
+ }
+
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), buffer, hdr_size, 0) < hdr_size) {
+ device_read_unlock(cd, device);
+ r = -EIO;
+ goto out;
+ }
+
+ device_read_unlock(cd, device);
+
+ fd = open(backup_file, O_CREAT|O_EXCL|O_WRONLY, S_IRUSR);
+ if (fd == -1) {
+ if (errno == EEXIST)
+ log_err(cd, _("Requested header backup file %s already exists."), backup_file);
+ else
+ log_err(cd, _("Cannot create header backup file %s."), backup_file);
+ r = -EINVAL;
+ goto out;
+ }
+ ret = write_buffer(fd, buffer, buffer_size);
+ close(fd);
+ if (ret < buffer_size) {
+ log_err(cd, _("Cannot write header backup file %s."), backup_file);
+ r = -EIO;
+ } else
+ r = 0;
+out:
+ crypt_safe_memzero(buffer, buffer_size);
+ free(buffer);
+ return r;
+}
+
+int LUKS2_hdr_restore(struct crypt_device *cd, struct luks2_hdr *hdr,
+ const char *backup_file)
+{
+ struct device *backup_device, *device = crypt_metadata_device(cd);
+ int r, fd, devfd = -1, diff_uuid = 0;
+ ssize_t ret, buffer_size = 0;
+ char *buffer = NULL, msg[1024];
+ struct luks2_hdr hdr_file = {}, tmp_hdr = {};
+ uint32_t reqs = 0;
+
+ r = device_alloc(cd, &backup_device, backup_file);
+ if (r < 0)
+ return r;
+
+ r = device_read_lock(cd, backup_device);
+ if (r) {
+ log_err(cd, _("Failed to acquire read lock on device %s."),
+ device_path(backup_device));
+ device_free(cd, backup_device);
+ return r;
+ }
+
+ r = LUKS2_disk_hdr_read(cd, &hdr_file, backup_device, 0, 0);
+ device_read_unlock(cd, backup_device);
+ device_free(cd, backup_device);
+
+ if (r < 0) {
+ log_err(cd, _("Backup file does not contain valid LUKS header."));
+ goto out;
+ }
+
+ /* do not allow header restore from backup with unmet requirements */
+ if (LUKS2_unmet_requirements(cd, &hdr_file, CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 1)) {
+ log_err(cd, _("Forbidden LUKS2 requirements detected in backup %s."),
+ backup_file);
+ r = -ETXTBSY;
+ goto out;
+ }
+
+ buffer_size = LUKS2_hdr_and_areas_size(&hdr_file);
+ buffer = malloc(buffer_size);
+ if (!buffer) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ fd = open(backup_file, O_RDONLY);
+ if (fd == -1) {
+ log_err(cd, _("Cannot open header backup file %s."), backup_file);
+ r = -EINVAL;
+ goto out;
+ }
+
+ ret = read_buffer(fd, buffer, buffer_size);
+ close(fd);
+ if (ret < buffer_size) {
+ log_err(cd, _("Cannot read header backup file %s."), backup_file);
+ r = -EIO;
+ goto out;
+ }
+
+ r = LUKS2_hdr_read(cd, &tmp_hdr, 0);
+ if (r == 0) {
+ log_dbg(cd, "Device %s already contains LUKS2 header, checking UUID and requirements.", device_path(device));
+ r = LUKS2_config_get_requirements(cd, &tmp_hdr, &reqs);
+ if (r)
+ goto out;
+
+ if (memcmp(tmp_hdr.uuid, hdr_file.uuid, LUKS2_UUID_L))
+ diff_uuid = 1;
+
+ if (!reqs_reencrypt(reqs)) {
+ log_dbg(cd, "Checking LUKS2 header size and offsets.");
+ if (LUKS2_get_data_offset(&tmp_hdr) != LUKS2_get_data_offset(&hdr_file)) {
+ log_err(cd, _("Data offset differ on device and backup, restore failed."));
+ r = -EINVAL;
+ goto out;
+ }
+ /* FIXME: what could go wrong? Erase if we're fine with consequences */
+ if (buffer_size != (ssize_t) LUKS2_hdr_and_areas_size(&tmp_hdr)) {
+ log_err(cd, _("Binary header with keyslot areas size differ on device and backup, restore failed."));
+ r = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ r = snprintf(msg, sizeof(msg), _("Device %s %s%s%s%s"), device_path(device),
+ r ? _("does not contain LUKS2 header. Replacing header can destroy data on that device.") :
+ _("already contains LUKS2 header. Replacing header will destroy existing keyslots."),
+ diff_uuid ? _("\nWARNING: real device header has different UUID than backup!") : "",
+ reqs_unknown(reqs) ? _("\nWARNING: unknown LUKS2 requirements detected in real device header!"
+ "\nReplacing header with backup may corrupt the data on that device!") : "",
+ reqs_reencrypt(reqs) ? _("\nWARNING: Unfinished offline reencryption detected on the device!"
+ "\nReplacing header with backup may corrupt data.") : "");
+ if (r < 0 || (size_t) r >= sizeof(msg)) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ if (!crypt_confirm(cd, msg)) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ log_dbg(cd, "Storing backup of header (%zu bytes) to device %s.", buffer_size, device_path(device));
+
+ /* Do not use LUKS2_device_write lock for checking sequence id on restore */
+ r = device_write_lock(cd, device);
+ if (r < 0) {
+ log_err(cd, _("Failed to acquire write lock on device %s."),
+ device_path(device));
+ goto out;
+ }
+
+ devfd = device_open_locked(cd, device, O_RDWR);
+ if (devfd < 0) {
+ if (errno == EACCES)
+ log_err(cd, _("Cannot write to device %s, permission denied."),
+ device_path(device));
+ else
+ log_err(cd, _("Cannot open device %s."), device_path(device));
+ device_write_unlock(cd, device);
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), buffer, buffer_size, 0) < buffer_size)
+ r = -EIO;
+ else
+ r = 0;
+
+ device_write_unlock(cd, device);
+out:
+ LUKS2_hdr_free(cd, hdr);
+ LUKS2_hdr_free(cd, &hdr_file);
+ LUKS2_hdr_free(cd, &tmp_hdr);
+ crypt_safe_memzero(&hdr_file, sizeof(hdr_file));
+ crypt_safe_memzero(&tmp_hdr, sizeof(tmp_hdr));
+ crypt_safe_memzero(buffer, buffer_size);
+ free(buffer);
+ device_sync(cd, device);
+ return r;
+}
+
+/*
+ * Persistent config flags
+ */
+static const struct {
+ uint32_t flag;
+ const char *description;
+} persistent_flags[] = {
+ { CRYPT_ACTIVATE_ALLOW_DISCARDS, "allow-discards" },
+ { CRYPT_ACTIVATE_SAME_CPU_CRYPT, "same-cpu-crypt" },
+ { CRYPT_ACTIVATE_SUBMIT_FROM_CRYPT_CPUS, "submit-from-crypt-cpus" },
+ { CRYPT_ACTIVATE_NO_JOURNAL, "no-journal" },
+ { CRYPT_ACTIVATE_NO_READ_WORKQUEUE, "no-read-workqueue" },
+ { CRYPT_ACTIVATE_NO_WRITE_WORKQUEUE, "no-write-workqueue" },
+ { 0, NULL }
+};
+
+int LUKS2_config_get_flags(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t *flags)
+{
+ json_object *jobj1, *jobj_config, *jobj_flags;
+ int i, j, found;
+
+ if (!hdr || !flags)
+ return -EINVAL;
+
+ *flags = 0;
+
+ if (!json_object_object_get_ex(hdr->jobj, "config", &jobj_config))
+ return 0;
+
+ if (!json_object_object_get_ex(jobj_config, "flags", &jobj_flags))
+ return 0;
+
+ for (i = 0; i < (int) json_object_array_length(jobj_flags); i++) {
+ jobj1 = json_object_array_get_idx(jobj_flags, i);
+ found = 0;
+ for (j = 0; persistent_flags[j].description && !found; j++)
+ if (!strcmp(persistent_flags[j].description,
+ json_object_get_string(jobj1))) {
+ *flags |= persistent_flags[j].flag;
+ log_dbg(cd, "Using persistent flag %s.",
+ json_object_get_string(jobj1));
+ found = 1;
+ }
+ if (!found)
+ log_verbose(cd, _("Ignored unknown flag %s."),
+ json_object_get_string(jobj1));
+ }
+
+ return 0;
+}
+
+int LUKS2_config_set_flags(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t flags)
+{
+ json_object *jobj_config, *jobj_flags;
+ int i;
+
+ if (!json_object_object_get_ex(hdr->jobj, "config", &jobj_config))
+ return 0;
+
+ jobj_flags = json_object_new_array();
+
+ for (i = 0; persistent_flags[i].description; i++) {
+ if (flags & persistent_flags[i].flag) {
+ log_dbg(cd, "Setting persistent flag: %s.", persistent_flags[i].description);
+ json_object_array_add(jobj_flags,
+ json_object_new_string(persistent_flags[i].description));
+ }
+ }
+
+ /* Replace or add new flags array */
+ json_object_object_add(jobj_config, "flags", jobj_flags);
+
+ return LUKS2_hdr_write(cd, hdr);
+}
+
+/*
+ * json format example (mandatory array must not be ignored,
+ * all other future fields may be added later)
+ *
+ * "requirements": {
+ * mandatory : [],
+ * optional0 : [],
+ * optional1 : "lala"
+ * }
+ */
+
+/* LUKS2 library requirements */
+struct requirement_flag {
+ uint32_t flag;
+ uint8_t version;
+ const char *description;
+};
+
+static const struct requirement_flag unknown_requirement_flag = { CRYPT_REQUIREMENT_UNKNOWN, 0, NULL };
+
+static const struct requirement_flag requirements_flags[] = {
+ { CRYPT_REQUIREMENT_OFFLINE_REENCRYPT,1, "offline-reencrypt" },
+ { CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 2, "online-reencrypt-v2" },
+ { CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 3, "online-reencrypt-v3" },
+ { CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 1, "online-reencrypt" },
+ { 0, 0, NULL }
+};
+
+static const struct requirement_flag *get_requirement_by_name(const char *requirement)
+{
+ int i;
+
+ for (i = 0; requirements_flags[i].description; i++)
+ if (!strcmp(requirement, requirements_flags[i].description))
+ return requirements_flags + i;
+
+ return &unknown_requirement_flag;
+}
+
+static json_object *mandatory_requirements_jobj(struct luks2_hdr *hdr)
+{
+ json_object *jobj_config, *jobj_requirements, *jobj_mandatory;
+
+ assert(hdr);
+
+ if (!json_object_object_get_ex(hdr->jobj, "config", &jobj_config))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj_config, "requirements", &jobj_requirements))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj_requirements, "mandatory", &jobj_mandatory))
+ return NULL;
+
+ return jobj_mandatory;
+}
+
+bool LUKS2_reencrypt_requirement_candidate(struct luks2_hdr *hdr)
+{
+ json_object *jobj_mandatory;
+ int i, len;
+
+ assert(hdr);
+
+ jobj_mandatory = mandatory_requirements_jobj(hdr);
+ if (!jobj_mandatory)
+ return false;
+
+ len = (int) json_object_array_length(jobj_mandatory);
+ if (len <= 0)
+ return false;
+
+ for (i = 0; i < len; i++) {
+ if (reencrypt_candidate_flag(json_object_get_string(json_object_array_get_idx(jobj_mandatory, i))))
+ return true;
+ }
+
+ return false;
+}
+
+int LUKS2_config_get_reencrypt_version(struct luks2_hdr *hdr, uint8_t *version)
+{
+ json_object *jobj_mandatory, *jobj;
+ int i, len;
+ const struct requirement_flag *req;
+
+ assert(hdr);
+ assert(version);
+
+ jobj_mandatory = mandatory_requirements_jobj(hdr);
+ if (!jobj_mandatory)
+ return -ENOENT;
+
+ len = (int) json_object_array_length(jobj_mandatory);
+ if (len <= 0)
+ return -ENOENT;
+
+ for (i = 0; i < len; i++) {
+ jobj = json_object_array_get_idx(jobj_mandatory, i);
+
+ /* search for requirements prefixed with "online-reencrypt" */
+ if (strncmp(json_object_get_string(jobj), "online-reencrypt", 16))
+ continue;
+
+ /* check current library is aware of the requirement */
+ req = get_requirement_by_name(json_object_get_string(jobj));
+ if (req->flag == CRYPT_REQUIREMENT_UNKNOWN)
+ continue;
+
+ *version = req->version;
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static const struct requirement_flag *stored_requirement_name_by_id(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t req_id)
+{
+ json_object *jobj_mandatory, *jobj;
+ int i, len;
+ const struct requirement_flag *req;
+
+ assert(hdr);
+
+ jobj_mandatory = mandatory_requirements_jobj(hdr);
+ if (!jobj_mandatory)
+ return NULL;
+
+ len = (int) json_object_array_length(jobj_mandatory);
+ if (len <= 0)
+ return NULL;
+
+ for (i = 0; i < len; i++) {
+ jobj = json_object_array_get_idx(jobj_mandatory, i);
+ req = get_requirement_by_name(json_object_get_string(jobj));
+ if (req->flag == req_id)
+ return req;
+ }
+
+ return NULL;
+}
+
+/*
+ * returns count of requirements (past cryptsetup 2.0 release)
+ */
+int LUKS2_config_get_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t *reqs)
+{
+ json_object *jobj_mandatory, *jobj;
+ int i, len;
+ const struct requirement_flag *req;
+
+ assert(hdr);
+ assert(reqs);
+
+ *reqs = 0;
+
+ jobj_mandatory = mandatory_requirements_jobj(hdr);
+ if (!jobj_mandatory)
+ return 0;
+
+ len = (int) json_object_array_length(jobj_mandatory);
+ if (len <= 0)
+ return 0;
+
+ log_dbg(cd, "LUKS2 requirements detected:");
+
+ for (i = 0; i < len; i++) {
+ jobj = json_object_array_get_idx(jobj_mandatory, i);
+ req = get_requirement_by_name(json_object_get_string(jobj));
+ log_dbg(cd, "%s - %sknown", json_object_get_string(jobj),
+ reqs_unknown(req->flag) ? "un" : "");
+ *reqs |= req->flag;
+ }
+
+ return 0;
+}
+
+int LUKS2_config_set_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t reqs, bool commit)
+{
+ json_object *jobj_config, *jobj_requirements, *jobj_mandatory, *jobj;
+ int i, r = -EINVAL;
+ const struct requirement_flag *req;
+ uint32_t req_id;
+
+ if (!hdr)
+ return -EINVAL;
+
+ jobj_mandatory = json_object_new_array();
+ if (!jobj_mandatory)
+ return -ENOMEM;
+
+ for (i = 0; requirements_flags[i].description; i++) {
+ req_id = reqs & requirements_flags[i].flag;
+ if (req_id) {
+ /* retain already stored version of requirement flag */
+ req = stored_requirement_name_by_id(cd, hdr, req_id);
+ if (req)
+ jobj = json_object_new_string(req->description);
+ else
+ jobj = json_object_new_string(requirements_flags[i].description);
+ if (!jobj) {
+ r = -ENOMEM;
+ goto err;
+ }
+ json_object_array_add(jobj_mandatory, jobj);
+ /* erase processed flag from input set */
+ reqs &= ~(requirements_flags[i].flag);
+ }
+ }
+
+ /* any remaining bit in requirements is unknown therefore illegal */
+ if (reqs) {
+ log_dbg(cd, "Illegal requirement flag(s) requested");
+ goto err;
+ }
+
+ if (!json_object_object_get_ex(hdr->jobj, "config", &jobj_config))
+ goto err;
+
+ if (!json_object_object_get_ex(jobj_config, "requirements", &jobj_requirements)) {
+ jobj_requirements = json_object_new_object();
+ if (!jobj_requirements) {
+ r = -ENOMEM;
+ goto err;
+ }
+ json_object_object_add(jobj_config, "requirements", jobj_requirements);
+ }
+
+ if (json_object_array_length(jobj_mandatory) > 0) {
+ /* replace mandatory field with new values */
+ json_object_object_add(jobj_requirements, "mandatory", jobj_mandatory);
+ } else {
+ /* new mandatory field was empty, delete old one */
+ json_object_object_del(jobj_requirements, "mandatory");
+ json_object_put(jobj_mandatory);
+ }
+
+ /* remove empty requirements object */
+ if (!json_object_object_length(jobj_requirements))
+ json_object_object_del(jobj_config, "requirements");
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+err:
+ json_object_put(jobj_mandatory);
+ return r;
+}
+
+static json_object *LUKS2_get_mandatory_requirements_filtered_jobj(struct luks2_hdr *hdr,
+ uint32_t filter_req_ids)
+{
+ int i, len;
+ const struct requirement_flag *req;
+ json_object *jobj_mandatory, *jobj_mandatory_filtered, *jobj;
+
+ jobj_mandatory_filtered = json_object_new_array();
+ if (!jobj_mandatory_filtered)
+ return NULL;
+
+ jobj_mandatory = mandatory_requirements_jobj(hdr);
+ if (!jobj_mandatory)
+ return jobj_mandatory_filtered;
+
+ len = (int) json_object_array_length(jobj_mandatory);
+
+ for (i = 0; i < len; i++) {
+ jobj = json_object_array_get_idx(jobj_mandatory, i);
+ req = get_requirement_by_name(json_object_get_string(jobj));
+ if (req->flag == CRYPT_REQUIREMENT_UNKNOWN || req->flag & filter_req_ids)
+ continue;
+ json_object_array_add(jobj_mandatory_filtered,
+ json_object_new_string(req->description));
+ }
+
+ return jobj_mandatory_filtered;
+}
+
+/*
+ * The function looks for specific version of requirement id.
+ * If it can't be fulfilled function fails.
+ */
+int LUKS2_config_set_requirement_version(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint32_t req_id,
+ uint8_t req_version,
+ bool commit)
+{
+ json_object *jobj_config, *jobj_requirements, *jobj_mandatory;
+ const struct requirement_flag *req;
+ int r = -EINVAL;
+
+ if (!hdr || req_id == CRYPT_REQUIREMENT_UNKNOWN)
+ return -EINVAL;
+
+ req = requirements_flags;
+
+ while (req->description) {
+ /* we have a match */
+ if (req->flag == req_id && req->version == req_version)
+ break;
+ req++;
+ }
+
+ if (!req->description)
+ return -EINVAL;
+
+ /*
+ * Creates copy of mandatory requirements set without specific requirement
+ * (no matter the version) we want to set.
+ */
+ jobj_mandatory = LUKS2_get_mandatory_requirements_filtered_jobj(hdr, req_id);
+ if (!jobj_mandatory)
+ return -ENOMEM;
+
+ json_object_array_add(jobj_mandatory, json_object_new_string(req->description));
+
+ if (!json_object_object_get_ex(hdr->jobj, "config", &jobj_config))
+ goto err;
+
+ if (!json_object_object_get_ex(jobj_config, "requirements", &jobj_requirements)) {
+ jobj_requirements = json_object_new_object();
+ if (!jobj_requirements) {
+ r = -ENOMEM;
+ goto err;
+ }
+ json_object_object_add(jobj_config, "requirements", jobj_requirements);
+ }
+
+ json_object_object_add(jobj_requirements, "mandatory", jobj_mandatory);
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+err:
+ json_object_put(jobj_mandatory);
+ return r;
+}
+
+/*
+ * Header dump
+ */
+static void hdr_dump_config(struct crypt_device *cd, json_object *hdr_jobj)
+{
+
+ json_object *jobj1, *jobj_config, *jobj_flags, *jobj_requirements, *jobj_mandatory;
+ int i = 0, flags = 0, reqs = 0;
+
+ log_std(cd, "Flags: \t");
+
+ if (json_object_object_get_ex(hdr_jobj, "config", &jobj_config)) {
+ if (json_object_object_get_ex(jobj_config, "flags", &jobj_flags))
+ flags = (int) json_object_array_length(jobj_flags);
+ if (json_object_object_get_ex(jobj_config, "requirements", &jobj_requirements) &&
+ json_object_object_get_ex(jobj_requirements, "mandatory", &jobj_mandatory))
+ reqs = (int) json_object_array_length(jobj_mandatory);
+ }
+
+ for (i = 0; i < flags; i++) {
+ jobj1 = json_object_array_get_idx(jobj_flags, i);
+ log_std(cd, "%s ", json_object_get_string(jobj1));
+ }
+
+ log_std(cd, "%s\n%s", flags > 0 ? "" : "(no flags)", reqs > 0 ? "" : "\n");
+
+ if (reqs > 0) {
+ log_std(cd, "Requirements:\t");
+ for (i = 0; i < reqs; i++) {
+ jobj1 = json_object_array_get_idx(jobj_mandatory, i);
+ log_std(cd, "%s ", json_object_get_string(jobj1));
+ }
+ log_std(cd, "\n\n");
+ }
+}
+
+static const char *get_priority_desc(json_object *jobj)
+{
+ crypt_keyslot_priority priority;
+ json_object *jobj_priority;
+ const char *text;
+
+ if (json_object_object_get_ex(jobj, "priority", &jobj_priority))
+ priority = (crypt_keyslot_priority)(int)json_object_get_int(jobj_priority);
+ else
+ priority = CRYPT_SLOT_PRIORITY_NORMAL;
+
+ switch (priority) {
+ case CRYPT_SLOT_PRIORITY_IGNORE: text = "ignored"; break;
+ case CRYPT_SLOT_PRIORITY_PREFER: text = "preferred"; break;
+ case CRYPT_SLOT_PRIORITY_NORMAL: text = "normal"; break;
+ default: text = "invalid";
+ }
+
+ return text;
+}
+
+static void hdr_dump_keyslots(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ char slot[16];
+ json_object *keyslots_jobj, *digests_jobj, *jobj2, *jobj3, *val;
+ const char *tmps;
+ int i, j, r;
+
+ log_std(cd, "Keyslots:\n");
+ json_object_object_get_ex(hdr_jobj, "keyslots", &keyslots_jobj);
+
+ for (j = 0; j < LUKS2_KEYSLOTS_MAX; j++) {
+ if (snprintf(slot, sizeof(slot), "%i", j) < 0)
+ slot[0] = '\0';
+ json_object_object_get_ex(keyslots_jobj, slot, &val);
+ if (!val)
+ continue;
+
+ json_object_object_get_ex(val, "type", &jobj2);
+ tmps = json_object_get_string(jobj2);
+
+ r = LUKS2_keyslot_for_segment(crypt_get_hdr(cd, CRYPT_LUKS2), j, CRYPT_ONE_SEGMENT);
+ log_std(cd, " %s: %s%s\n", slot, tmps, r == -ENOENT ? " (unbound)" : "");
+
+ if (json_object_object_get_ex(val, "key_size", &jobj2))
+ log_std(cd, "\tKey: %u bits\n", crypt_jobj_get_uint32(jobj2) * 8);
+
+ log_std(cd, "\tPriority: %s\n", get_priority_desc(val));
+
+ LUKS2_keyslot_dump(cd, j);
+
+ json_object_object_get_ex(hdr_jobj, "digests", &digests_jobj);
+ json_object_object_foreach(digests_jobj, key2, val2) {
+ json_object_object_get_ex(val2, "keyslots", &jobj2);
+ for (i = 0; i < (int) json_object_array_length(jobj2); i++) {
+ jobj3 = json_object_array_get_idx(jobj2, i);
+ if (!strcmp(slot, json_object_get_string(jobj3))) {
+ log_std(cd, "\tDigest ID: %s\n", key2);
+ }
+ }
+ }
+ }
+}
+
+static void hdr_dump_tokens(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ char token[16];
+ json_object *tokens_jobj, *jobj2, *jobj3, *val;
+ const char *tmps;
+ int i, j;
+
+ log_std(cd, "Tokens:\n");
+ json_object_object_get_ex(hdr_jobj, "tokens", &tokens_jobj);
+
+ for (j = 0; j < LUKS2_TOKENS_MAX; j++) {
+ if (snprintf(token, sizeof(token), "%i", j) < 0)
+ token[0] = '\0';
+ json_object_object_get_ex(tokens_jobj, token, &val);
+ if (!val)
+ continue;
+
+ json_object_object_get_ex(val, "type", &jobj2);
+ tmps = json_object_get_string(jobj2);
+ log_std(cd, " %s: %s\n", token, tmps);
+
+ LUKS2_token_dump(cd, j);
+
+ json_object_object_get_ex(val, "keyslots", &jobj2);
+ for (i = 0; i < (int) json_object_array_length(jobj2); i++) {
+ jobj3 = json_object_array_get_idx(jobj2, i);
+ log_std(cd, "\tKeyslot: %s\n", json_object_get_string(jobj3));
+ }
+ }
+}
+
+static void hdr_dump_segments(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ char segment[16];
+ json_object *jobj_segments, *jobj_segment, *jobj1, *jobj2;
+ int i, j, flags;
+ uint64_t value;
+
+ log_std(cd, "Data segments:\n");
+ json_object_object_get_ex(hdr_jobj, "segments", &jobj_segments);
+
+ for (i = 0; i < LUKS2_SEGMENT_MAX; i++) {
+ if (snprintf(segment, sizeof(segment), "%i", i) < 0)
+ segment[0] = '\0';
+ if (!json_object_object_get_ex(jobj_segments, segment, &jobj_segment))
+ continue;
+
+ json_object_object_get_ex(jobj_segment, "type", &jobj1);
+ log_std(cd, " %s: %s\n", segment, json_object_get_string(jobj1));
+
+ json_object_object_get_ex(jobj_segment, "offset", &jobj1);
+ json_str_to_uint64(jobj1, &value);
+ log_std(cd, "\toffset: %" PRIu64 " [bytes]\n", value);
+
+ json_object_object_get_ex(jobj_segment, "size", &jobj1);
+ if (!(strcmp(json_object_get_string(jobj1), "dynamic")))
+ log_std(cd, "\tlength: (whole device)\n");
+ else {
+ json_str_to_uint64(jobj1, &value);
+ log_std(cd, "\tlength: %" PRIu64 " [bytes]\n", value);
+ }
+
+ if (json_object_object_get_ex(jobj_segment, "encryption", &jobj1))
+ log_std(cd, "\tcipher: %s\n", json_object_get_string(jobj1));
+
+ if (json_object_object_get_ex(jobj_segment, "sector_size", &jobj1))
+ log_std(cd, "\tsector: %" PRIu32 " [bytes]\n", crypt_jobj_get_uint32(jobj1));
+
+ if (json_object_object_get_ex(jobj_segment, "integrity", &jobj1) &&
+ json_object_object_get_ex(jobj1, "type", &jobj2))
+ log_std(cd, "\tintegrity: %s\n", json_object_get_string(jobj2));
+
+ if (json_object_object_get_ex(jobj_segment, "flags", &jobj1) &&
+ (flags = (int)json_object_array_length(jobj1)) > 0) {
+ jobj2 = json_object_array_get_idx(jobj1, 0);
+ log_std(cd, "\tflags : %s", json_object_get_string(jobj2));
+ for (j = 1; j < flags; j++) {
+ jobj2 = json_object_array_get_idx(jobj1, j);
+ log_std(cd, ", %s", json_object_get_string(jobj2));
+ }
+ log_std(cd, "\n");
+ }
+
+ log_std(cd, "\n");
+ }
+}
+
+static void hdr_dump_digests(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ char key[16];
+ json_object *jobj1, *jobj2, *val;
+ const char *tmps;
+ int i;
+
+ log_std(cd, "Digests:\n");
+ json_object_object_get_ex(hdr_jobj, "digests", &jobj1);
+
+ for (i = 0; i < LUKS2_DIGEST_MAX; i++) {
+ if (snprintf(key, sizeof(key), "%i", i) < 0)
+ key[0] = '\0';
+ json_object_object_get_ex(jobj1, key, &val);
+ if (!val)
+ continue;
+
+ json_object_object_get_ex(val, "type", &jobj2);
+ tmps = json_object_get_string(jobj2);
+ log_std(cd, " %s: %s\n", key, tmps);
+
+ LUKS2_digest_dump(cd, i);
+ }
+}
+
+int LUKS2_hdr_dump(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ if (!hdr->jobj)
+ return -EINVAL;
+
+ JSON_DBG(cd, hdr->jobj, NULL);
+
+ log_std(cd, "LUKS header information\n");
+ log_std(cd, "Version: \t%u\n", hdr->version);
+ log_std(cd, "Epoch: \t%" PRIu64 "\n", hdr->seqid);
+ log_std(cd, "Metadata area: \t%" PRIu64 " [bytes]\n", LUKS2_metadata_size(hdr));
+ log_std(cd, "Keyslots area: \t%" PRIu64 " [bytes]\n", LUKS2_keyslots_size(hdr));
+ log_std(cd, "UUID: \t%s\n", *hdr->uuid ? hdr->uuid : "(no UUID)");
+ log_std(cd, "Label: \t%s\n", *hdr->label ? hdr->label : "(no label)");
+ log_std(cd, "Subsystem: \t%s\n", *hdr->subsystem ? hdr->subsystem : "(no subsystem)");
+
+ hdr_dump_config(cd, hdr->jobj);
+ hdr_dump_segments(cd, hdr->jobj);
+ hdr_dump_keyslots(cd, hdr->jobj);
+ hdr_dump_tokens(cd, hdr->jobj);
+ hdr_dump_digests(cd, hdr->jobj);
+
+ return 0;
+}
+
+int LUKS2_hdr_dump_json(struct crypt_device *cd, struct luks2_hdr *hdr, const char **json)
+{
+ const char *json_buf;
+
+ json_buf = json_object_to_json_string_ext(hdr->jobj,
+ JSON_C_TO_STRING_PRETTY | JSON_C_TO_STRING_NOSLASHESCAPE);
+
+ if (!json_buf)
+ return -EINVAL;
+
+ if (json)
+ *json = json_buf;
+ else
+ crypt_log(cd, CRYPT_LOG_NORMAL, json_buf);
+
+ return 0;
+}
+
+int LUKS2_get_data_size(struct luks2_hdr *hdr, uint64_t *size, bool *dynamic)
+{
+ int i, len, sector_size;
+ json_object *jobj_segments, *jobj_segment, *jobj_size;
+ uint64_t tmp = 0;
+
+ if (!size || !json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments))
+ return -EINVAL;
+
+ len = json_object_object_length(jobj_segments);
+
+ for (i = 0; i < len; i++) {
+ if (!(jobj_segment = json_segments_get_segment(jobj_segments, i)))
+ return -EINVAL;
+
+ if (json_segment_is_backup(jobj_segment))
+ break;
+
+ json_object_object_get_ex(jobj_segment, "size", &jobj_size);
+ if (!strcmp(json_object_get_string(jobj_size), "dynamic")) {
+ sector_size = json_segment_get_sector_size(jobj_segment);
+ /* last dynamic segment must have at least one sector in size */
+ if (tmp)
+ *size = tmp + (sector_size > 0 ? sector_size : SECTOR_SIZE);
+ else
+ *size = 0;
+ if (dynamic)
+ *dynamic = true;
+ return 0;
+ }
+
+ tmp += crypt_jobj_get_uint64(jobj_size);
+ }
+
+ /* impossible, real device size must not be zero */
+ if (!tmp)
+ return -EINVAL;
+
+ *size = tmp;
+ if (dynamic)
+ *dynamic = false;
+ return 0;
+}
+
+uint64_t LUKS2_get_data_offset(struct luks2_hdr *hdr)
+{
+ crypt_reencrypt_info ri;
+ json_object *jobj;
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_CLEAN || ri == CRYPT_REENCRYPT_CRASH) {
+ jobj = LUKS2_get_segment_by_flag(hdr, "backup-final");
+ if (jobj)
+ return json_segment_get_offset(jobj, 1);
+ }
+
+ return json_segments_get_minimal_offset(LUKS2_get_segments_jobj(hdr), 1);
+}
+
+const char *LUKS2_get_cipher(struct luks2_hdr *hdr, int segment)
+{
+ json_object *jobj_segment;
+
+ if (!hdr)
+ return NULL;
+
+ if (segment == CRYPT_DEFAULT_SEGMENT)
+ segment = LUKS2_get_default_segment(hdr);
+
+ jobj_segment = json_segments_get_segment(json_get_segments_jobj(hdr->jobj), segment);
+ if (!jobj_segment)
+ return NULL;
+
+ /* FIXME: default encryption (for other segment types) must be string here. */
+ return json_segment_get_cipher(jobj_segment) ?: "null";
+}
+
+crypt_reencrypt_info LUKS2_reencrypt_status(struct luks2_hdr *hdr)
+{
+ uint32_t reqs;
+
+ /*
+ * Any unknown requirement or offline reencryption should abort
+ * anything related to online-reencryption handling
+ */
+ if (LUKS2_config_get_requirements(NULL, hdr, &reqs))
+ return CRYPT_REENCRYPT_INVALID;
+
+ if (!reqs_reencrypt_online(reqs))
+ return CRYPT_REENCRYPT_NONE;
+
+ if (json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr)) < 0)
+ return CRYPT_REENCRYPT_CLEAN;
+
+ return CRYPT_REENCRYPT_CRASH;
+}
+
+const char *LUKS2_get_keyslot_cipher(struct luks2_hdr *hdr, int keyslot, size_t *key_size)
+{
+ json_object *jobj_keyslot, *jobj_area, *jobj1;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return NULL;
+
+ /* currently we only support raw length preserving area encryption */
+ json_object_object_get_ex(jobj_area, "type", &jobj1);
+ if (strcmp(json_object_get_string(jobj1), "raw"))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj_area, "key_size", &jobj1))
+ return NULL;
+ *key_size = json_object_get_int(jobj1);
+
+ if (!json_object_object_get_ex(jobj_area, "encryption", &jobj1))
+ return NULL;
+
+ return json_object_get_string(jobj1);
+}
+
+const char *LUKS2_get_integrity(struct luks2_hdr *hdr, int segment)
+{
+ json_object *jobj1, *jobj2, *jobj3;
+
+ jobj1 = LUKS2_get_segment_jobj(hdr, segment);
+ if (!jobj1)
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj1, "integrity", &jobj2))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj2, "type", &jobj3))
+ return NULL;
+
+ return json_object_get_string(jobj3);
+}
+
+/* FIXME: this only ensures that once we have journal encryption, it is not ignored. */
+/* implement segment count and type restrictions (crypt and only single crypt) */
+static int LUKS2_integrity_compatible(struct luks2_hdr *hdr)
+{
+ json_object *jobj1, *jobj2, *jobj3, *jobj4;
+ const char *str;
+
+ if (!json_object_object_get_ex(hdr->jobj, "segments", &jobj1))
+ return 0;
+
+ if (!(jobj2 = LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT)))
+ return 0;
+
+ if (!json_object_object_get_ex(jobj2, "integrity", &jobj3))
+ return 0;
+
+ if (!json_object_object_get_ex(jobj3, "journal_encryption", &jobj4) ||
+ !(str = json_object_get_string(jobj4)) ||
+ strcmp(str, "none"))
+ return 0;
+
+ if (!json_object_object_get_ex(jobj3, "journal_integrity", &jobj4) ||
+ !(str = json_object_get_string(jobj4)) ||
+ strcmp(str, "none"))
+ return 0;
+
+ return 1;
+}
+
+static int LUKS2_keyslot_get_volume_key_size(struct luks2_hdr *hdr, const char *keyslot)
+{
+ json_object *jobj1, *jobj2, *jobj3;
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj1))
+ return -1;
+
+ if (!json_object_object_get_ex(jobj1, keyslot, &jobj2))
+ return -1;
+
+ if (!json_object_object_get_ex(jobj2, "key_size", &jobj3))
+ return -1;
+
+ return json_object_get_int(jobj3);
+}
+
+/* Key size used for encryption of keyslot */
+int LUKS2_get_keyslot_stored_key_size(struct luks2_hdr *hdr, int keyslot)
+{
+ char keyslot_name[16];
+
+ if (snprintf(keyslot_name, sizeof(keyslot_name), "%u", keyslot) < 1)
+ return -1;
+
+ return LUKS2_keyslot_get_volume_key_size(hdr, keyslot_name);
+}
+
+int LUKS2_get_volume_key_size(struct luks2_hdr *hdr, int segment)
+{
+ json_object *jobj_digests, *jobj_digest_segments, *jobj_digest_keyslots, *jobj1;
+ char buf[16];
+
+ if (segment == CRYPT_DEFAULT_SEGMENT)
+ segment = LUKS2_get_default_segment(hdr);
+
+ if (snprintf(buf, sizeof(buf), "%u", segment) < 1)
+ return -1;
+
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ UNUSED(key);
+ json_object_object_get_ex(val, "segments", &jobj_digest_segments);
+ json_object_object_get_ex(val, "keyslots", &jobj_digest_keyslots);
+
+ if (!LUKS2_array_jobj(jobj_digest_segments, buf))
+ continue;
+ if (json_object_array_length(jobj_digest_keyslots) <= 0)
+ continue;
+
+ jobj1 = json_object_array_get_idx(jobj_digest_keyslots, 0);
+
+ return LUKS2_keyslot_get_volume_key_size(hdr, json_object_get_string(jobj1));
+ }
+
+ return -1;
+}
+
+uint32_t LUKS2_get_sector_size(struct luks2_hdr *hdr)
+{
+ return json_segment_get_sector_size(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT));
+}
+
+int LUKS2_assembly_multisegment_dmd(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct volume_key *vks,
+ json_object *jobj_segments,
+ struct crypt_dm_active_device *dmd)
+{
+ struct volume_key *vk;
+ json_object *jobj;
+ enum devcheck device_check;
+ int r;
+ unsigned s = 0;
+ uint64_t data_offset, segment_size, segment_offset, segment_start = 0;
+ struct dm_target *t = &dmd->segment;
+
+ if (dmd->flags & CRYPT_ACTIVATE_SHARED)
+ device_check = DEV_OK;
+ else
+ device_check = DEV_EXCL;
+
+ data_offset = LUKS2_reencrypt_data_offset(hdr, true);
+
+ r = device_block_adjust(cd, crypt_data_device(cd), device_check,
+ data_offset, &dmd->size, &dmd->flags);
+ if (r)
+ return r;
+
+ r = dm_targets_allocate(&dmd->segment, json_segments_count(jobj_segments));
+ if (r)
+ goto err;
+
+ r = -EINVAL;
+
+ while (t) {
+ jobj = json_segments_get_segment(jobj_segments, s);
+ if (!jobj) {
+ log_dbg(cd, "Internal error. Segment %u is null.", s);
+ r = -EINVAL;
+ goto err;
+ }
+
+ segment_offset = json_segment_get_offset(jobj, 1);
+ segment_size = json_segment_get_size(jobj, 1);
+ /* 'dynamic' length allowed in last segment only */
+ if (!segment_size && !t->next)
+ segment_size = dmd->size - segment_start;
+ if (!segment_size) {
+ log_dbg(cd, "Internal error. Wrong segment size %u", s);
+ r = -EINVAL;
+ goto err;
+ }
+
+ if (!strcmp(json_segment_type(jobj), "crypt")) {
+ vk = crypt_volume_key_by_id(vks, LUKS2_digest_by_segment(hdr, s));
+ if (!vk) {
+ log_err(cd, _("Missing key for dm-crypt segment %u"), s);
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = dm_crypt_target_set(t, segment_start, segment_size,
+ crypt_data_device(cd), vk,
+ json_segment_get_cipher(jobj),
+ json_segment_get_iv_offset(jobj),
+ segment_offset, "none", 0,
+ json_segment_get_sector_size(jobj));
+ if (r) {
+ log_err(cd, _("Failed to set dm-crypt segment."));
+ goto err;
+ }
+ } else if (!strcmp(json_segment_type(jobj), "linear")) {
+ r = dm_linear_target_set(t, segment_start, segment_size, crypt_data_device(cd), segment_offset);
+ if (r) {
+ log_err(cd, _("Failed to set dm-linear segment."));
+ goto err;
+ }
+ } else {
+ r = -EINVAL;
+ goto err;
+ }
+
+ segment_start += segment_size;
+ t = t->next;
+ s++;
+ }
+
+ return r;
+err:
+ dm_targets_free(cd, dmd);
+ return r;
+}
+
+/* FIXME: This shares almost all code with activate_multi_custom */
+static int _reload_custom_multi(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vks,
+ json_object *jobj_segments,
+ uint64_t device_size,
+ uint32_t flags)
+{
+ int r;
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ struct crypt_dm_active_device dmd = {
+ .uuid = crypt_get_uuid(cd),
+ .size = device_size >> SECTOR_SHIFT
+ };
+
+ /* do not allow activation when particular requirements detected */
+ if ((r = LUKS2_unmet_requirements(cd, hdr, CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 0)))
+ return r;
+
+ /* Add persistent activation flags */
+ if (!(flags & CRYPT_ACTIVATE_IGNORE_PERSISTENT))
+ LUKS2_config_get_flags(cd, hdr, &dmd.flags);
+
+ dmd.flags |= (flags | CRYPT_ACTIVATE_SHARED);
+
+ r = LUKS2_assembly_multisegment_dmd(cd, hdr, vks, jobj_segments, &dmd);
+ if (!r)
+ r = dm_reload_device(cd, name, &dmd, 0, 0);
+
+ dm_targets_free(cd, &dmd);
+ return r;
+}
+
+int LUKS2_reload(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vks,
+ uint64_t device_size,
+ uint32_t flags)
+{
+ if (crypt_get_integrity_tag_size(cd))
+ return -ENOTSUP;
+
+ return _reload_custom_multi(cd, name, vks,
+ LUKS2_get_segments_jobj(crypt_get_hdr(cd, CRYPT_LUKS2)), device_size, flags);
+}
+
+int LUKS2_activate_multi(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vks,
+ uint64_t device_size,
+ uint32_t flags)
+{
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ json_object *jobj_segments = LUKS2_get_segments_jobj(hdr);
+ int r;
+ struct crypt_dm_active_device dmd = {
+ .size = device_size,
+ .uuid = crypt_get_uuid(cd)
+ };
+
+ /* do not allow activation when particular requirements detected */
+ if ((r = LUKS2_unmet_requirements(cd, hdr, CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 0)))
+ return r;
+
+ /* Add persistent activation flags */
+ if (!(flags & CRYPT_ACTIVATE_IGNORE_PERSISTENT))
+ LUKS2_config_get_flags(cd, hdr, &dmd.flags);
+
+ dmd.flags |= flags;
+
+ r = LUKS2_assembly_multisegment_dmd(cd, hdr, vks, jobj_segments, &dmd);
+ if (!r)
+ r = dm_create_device(cd, name, CRYPT_LUKS2, &dmd);
+
+ dm_targets_free(cd, &dmd);
+ return r;
+}
+
+int LUKS2_activate(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vk,
+ uint32_t flags)
+{
+ int r;
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ struct crypt_dm_active_device dmdi = {}, dmd = {
+ .uuid = crypt_get_uuid(cd)
+ };
+
+ /* do not allow activation when particular requirements detected */
+ if ((r = LUKS2_unmet_requirements(cd, hdr, 0, 0)))
+ return r;
+
+ r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
+ vk, crypt_get_cipher_spec(cd), crypt_get_iv_offset(cd),
+ crypt_get_data_offset(cd), crypt_get_integrity(cd) ?: "none",
+ crypt_get_integrity_tag_size(cd), crypt_get_sector_size(cd));
+ if (r < 0)
+ return r;
+
+ /* Add persistent activation flags */
+ if (!(flags & CRYPT_ACTIVATE_IGNORE_PERSISTENT))
+ LUKS2_config_get_flags(cd, hdr, &dmd.flags);
+
+ dmd.flags |= flags;
+
+ if (crypt_get_integrity_tag_size(cd)) {
+ if (!LUKS2_integrity_compatible(hdr)) {
+ log_err(cd, _("Unsupported device integrity configuration."));
+ return -EINVAL;
+ }
+
+ if (dmd.flags & CRYPT_ACTIVATE_ALLOW_DISCARDS) {
+ log_err(cd, _("Discard/TRIM is not supported."));
+ return -EINVAL;
+ }
+
+ r = INTEGRITY_create_dmd_device(cd, NULL, NULL, NULL, NULL, &dmdi, dmd.flags, 0);
+ if (r)
+ return r;
+
+ dmdi.flags |= CRYPT_ACTIVATE_PRIVATE;
+ dmdi.uuid = dmd.uuid;
+ dmd.segment.u.crypt.offset = 0;
+ dmd.segment.size = dmdi.segment.size;
+
+ r = create_or_reload_device_with_integrity(cd, name, CRYPT_LUKS2, &dmd, &dmdi);
+ } else
+ r = create_or_reload_device(cd, name, CRYPT_LUKS2, &dmd);
+
+ dm_targets_free(cd, &dmd);
+ dm_targets_free(cd, &dmdi);
+
+ return r;
+}
+
+static bool is_reencryption_helper(const char *name)
+{
+ size_t len;
+
+ if (!name)
+ return false;
+
+ len = strlen(name);
+ return (len >= 9 && (!strncmp(name + len - 8, "-hotzone-", 9) ||
+ !strcmp(name + len - 8, "-overlay")));
+
+}
+
+static bool contains_reencryption_helper(char **names)
+{
+ while (*names) {
+ if (is_reencryption_helper(*names++))
+ return true;
+ }
+
+ return false;
+}
+
+int LUKS2_deactivate(struct crypt_device *cd, const char *name, struct luks2_hdr *hdr, struct crypt_dm_active_device *dmd, uint32_t flags)
+{
+ int r, ret;
+ struct dm_target *tgt;
+ crypt_status_info ci;
+ struct crypt_dm_active_device dmdc;
+ char **dep, deps_uuid_prefix[40], *deps[MAX_DM_DEPS+1] = { 0 };
+ const char *namei = NULL;
+ struct crypt_lock_handle *reencrypt_lock = NULL;
+
+ if (!dmd || !dmd->uuid || strncmp(CRYPT_LUKS2, dmd->uuid, sizeof(CRYPT_LUKS2)-1))
+ return -EINVAL;
+
+ /* uuid mismatch with metadata (if available) */
+ if (hdr && crypt_uuid_cmp(dmd->uuid, hdr->uuid))
+ return -EINVAL;
+
+ r = snprintf(deps_uuid_prefix, sizeof(deps_uuid_prefix), CRYPT_SUBDEV "-%.32s", dmd->uuid + 6);
+ if (r < 0 || (size_t)r != (sizeof(deps_uuid_prefix) - 1))
+ return -EINVAL;
+
+ tgt = &dmd->segment;
+
+ /* TODO: We have LUKS2 dependencies now */
+ if (single_segment(dmd) && tgt->type == DM_CRYPT && tgt->u.crypt.tag_size)
+ namei = device_dm_name(tgt->data_device);
+
+ r = dm_device_deps(cd, name, deps_uuid_prefix, deps, ARRAY_SIZE(deps));
+ if (r < 0)
+ goto out;
+
+ if (contains_reencryption_helper(deps)) {
+ r = LUKS2_reencrypt_lock_by_dm_uuid(cd, dmd->uuid, &reencrypt_lock);
+ if (r) {
+ if (r == -EBUSY)
+ log_err(cd, _("Reencryption in-progress. Cannot deactivate device."));
+ else
+ log_err(cd, _("Failed to get reencryption lock."));
+ goto out;
+ }
+ }
+
+ dep = deps;
+ while (*dep) {
+ if (is_reencryption_helper(*dep) && (dm_status_suspended(cd, *dep) > 0)) {
+ if (dm_error_device(cd, *dep))
+ log_err(cd, _("Failed to replace suspended device %s with dm-error target."), *dep);
+ }
+ dep++;
+ }
+
+ r = dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEY | DM_ACTIVE_CRYPT_KEYSIZE, &dmdc);
+ if (r < 0) {
+ memset(&dmdc, 0, sizeof(dmdc));
+ dmdc.segment.type = DM_UNKNOWN;
+ }
+
+ /* Remove top level device first */
+ r = dm_remove_device(cd, name, flags);
+ if (!r) {
+ tgt = &dmdc.segment;
+ while (tgt) {
+ if (tgt->type == DM_CRYPT)
+ crypt_drop_keyring_key_by_description(cd, tgt->u.crypt.vk->key_description, LOGON_KEY);
+ tgt = tgt->next;
+ }
+ }
+ dm_targets_free(cd, &dmdc);
+
+ /* TODO: We have LUKS2 dependencies now */
+ if (r >= 0 && namei) {
+ log_dbg(cd, "Deactivating integrity device %s.", namei);
+ r = dm_remove_device(cd, namei, 0);
+ }
+
+ if (!r) {
+ ret = 0;
+ dep = deps;
+ while (*dep) {
+ log_dbg(cd, "Deactivating LUKS2 dependent device %s.", *dep);
+ r = dm_query_device(cd, *dep, DM_ACTIVE_CRYPT_KEY | DM_ACTIVE_CRYPT_KEYSIZE, &dmdc);
+ if (r < 0) {
+ memset(&dmdc, 0, sizeof(dmdc));
+ dmdc.segment.type = DM_UNKNOWN;
+ }
+
+ r = dm_remove_device(cd, *dep, flags);
+ if (r < 0) {
+ ci = crypt_status(cd, *dep);
+ if (ci == CRYPT_BUSY)
+ log_err(cd, _("Device %s is still in use."), *dep);
+ if (ci == CRYPT_INACTIVE)
+ r = 0;
+ }
+ if (!r) {
+ tgt = &dmdc.segment;
+ while (tgt) {
+ if (tgt->type == DM_CRYPT)
+ crypt_drop_keyring_key_by_description(cd, tgt->u.crypt.vk->key_description, LOGON_KEY);
+ tgt = tgt->next;
+ }
+ }
+ dm_targets_free(cd, &dmdc);
+ if (r && !ret)
+ ret = r;
+ dep++;
+ }
+ r = ret;
+ }
+
+out:
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ dep = deps;
+ while (*dep)
+ free(*dep++);
+
+ return r;
+}
+
+int LUKS2_unmet_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t reqs_mask, int quiet)
+{
+ uint32_t reqs;
+ int r = LUKS2_config_get_requirements(cd, hdr, &reqs);
+
+ if (r) {
+ if (!quiet)
+ log_err(cd, _("Failed to read LUKS2 requirements."));
+ return r;
+ }
+
+ /* do not mask unknown requirements check */
+ if (reqs_unknown(reqs)) {
+ if (!quiet)
+ log_err(cd, _("Unmet LUKS2 requirements detected."));
+ return -ETXTBSY;
+ }
+
+ /* mask out permitted requirements */
+ reqs &= ~reqs_mask;
+
+ if (reqs_reencrypt(reqs) && !quiet)
+ log_err(cd, _("Operation incompatible with device marked for legacy reencryption. Aborting."));
+ if (reqs_reencrypt_online(reqs) && !quiet)
+ log_err(cd, _("Operation incompatible with device marked for LUKS2 reencryption. Aborting."));
+
+ /* any remaining unmasked requirement fails the check */
+ return reqs ? -EINVAL : 0;
+}
+
+/*
+ * NOTE: this routine is called on json object that failed validation.
+ * Proceed with caution :)
+ *
+ * known glitches so far:
+ *
+ * any version < 2.0.3:
+ * - luks2 keyslot pbkdf params change via crypt_keyslot_change_by_passphrase()
+ * could leave previous type parameters behind. Correct this by purging
+ * all params not needed by current type.
+ */
+void LUKS2_hdr_repair(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ json_object *jobj_keyslots;
+
+ if (!json_object_object_get_ex(hdr_jobj, "keyslots", &jobj_keyslots))
+ return;
+ if (!json_object_is_type(jobj_keyslots, json_type_object))
+ return;
+
+ LUKS2_keyslots_repair(cd, jobj_keyslots);
+}
+
+void json_object_object_del_by_uint(json_object *jobj, unsigned key)
+{
+ char key_name[16];
+
+ if (snprintf(key_name, sizeof(key_name), "%u", key) < 1)
+ return;
+ json_object_object_del(jobj, key_name);
+}
+
+int json_object_object_add_by_uint(json_object *jobj, unsigned key, json_object *jobj_val)
+{
+ char key_name[16];
+
+ if (snprintf(key_name, sizeof(key_name), "%u", key) < 1)
+ return -EINVAL;
+
+#if HAVE_DECL_JSON_OBJECT_OBJECT_ADD_EX
+ return json_object_object_add_ex(jobj, key_name, jobj_val, 0) ? -ENOMEM : 0;
+#else
+ json_object_object_add(jobj, key_name, jobj_val);
+ return 0;
+#endif
+}
+
+/* jobj_dst must contain pointer initialized to NULL (see json-c json_object_deep_copy API) */
+int json_object_copy(json_object *jobj_src, json_object **jobj_dst)
+{
+ if (!jobj_src || !jobj_dst || *jobj_dst)
+ return -1;
+
+#if HAVE_DECL_JSON_OBJECT_DEEP_COPY
+ return json_object_deep_copy(jobj_src, jobj_dst, NULL);
+#else
+ *jobj_dst = json_tokener_parse(json_object_get_string(jobj_src));
+ return *jobj_dst ? 0 : -1;
+#endif
+}
diff --git a/lib/luks2/luks2_keyslot.c b/lib/luks2/luks2_keyslot.c
new file mode 100644
index 0000000..5cf4b83
--- /dev/null
+++ b/lib/luks2/luks2_keyslot.c
@@ -0,0 +1,977 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, keyslot handling
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+/* Internal implementations */
+extern const keyslot_handler luks2_keyslot;
+extern const keyslot_handler reenc_keyslot;
+
+static const keyslot_handler *keyslot_handlers[LUKS2_KEYSLOTS_MAX] = {
+ &luks2_keyslot,
+#if USE_LUKS2_REENCRYPTION
+ &reenc_keyslot,
+#endif
+ NULL
+};
+
+static const keyslot_handler
+*LUKS2_keyslot_handler_type(const char *type)
+{
+ int i;
+
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX && keyslot_handlers[i]; i++) {
+ if (!strcmp(keyslot_handlers[i]->name, type))
+ return keyslot_handlers[i];
+ }
+
+ return NULL;
+}
+
+static const keyslot_handler
+*LUKS2_keyslot_handler(struct crypt_device *cd, int keyslot)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj1, *jobj2;
+
+ if (keyslot < 0)
+ return NULL;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return NULL;
+
+ if (!(jobj1 = LUKS2_get_keyslot_jobj(hdr, keyslot)))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj1, "type", &jobj2))
+ return NULL;
+
+ return LUKS2_keyslot_handler_type(json_object_get_string(jobj2));
+}
+
+int LUKS2_keyslot_find_empty(struct crypt_device *cd, struct luks2_hdr *hdr, size_t keylength)
+{
+ int i;
+
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++)
+ if (!LUKS2_get_keyslot_jobj(hdr, i))
+ break;
+
+ if (i == LUKS2_KEYSLOTS_MAX)
+ return -EINVAL;
+
+ /* Check also there is a space for the key in keyslots area */
+ if (keylength && LUKS2_find_area_gap(cd, hdr, keylength, NULL, NULL) < 0)
+ return -ENOSPC;
+
+ return i;
+}
+
+/* Check if a keyslot is assigned to specific segment */
+static int _keyslot_for_segment(struct luks2_hdr *hdr, int keyslot, int segment)
+{
+ int keyslot_digest, count = 0;
+ unsigned s;
+
+ keyslot_digest = LUKS2_digest_by_keyslot(hdr, keyslot);
+ if (keyslot_digest < 0)
+ return keyslot_digest;
+
+ if (segment >= 0)
+ return keyslot_digest == LUKS2_digest_by_segment(hdr, segment);
+
+ for (s = 0; s < json_segments_count(LUKS2_get_segments_jobj(hdr)); s++) {
+ if (keyslot_digest == LUKS2_digest_by_segment(hdr, s))
+ count++;
+ }
+
+ return count;
+}
+
+static int _keyslot_for_digest(struct luks2_hdr *hdr, int keyslot, int digest)
+{
+ int r = -EINVAL;
+
+ r = LUKS2_digest_by_keyslot(hdr, keyslot);
+ if (r < 0)
+ return r;
+ return r == digest ? 0 : -ENOENT;
+}
+
+int LUKS2_keyslot_for_segment(struct luks2_hdr *hdr, int keyslot, int segment)
+{
+ int r = -EINVAL;
+
+ /* no need to check anything */
+ if (segment == CRYPT_ANY_SEGMENT)
+ return 0; /* ok */
+ if (segment == CRYPT_DEFAULT_SEGMENT) {
+ segment = LUKS2_get_default_segment(hdr);
+ if (segment < 0)
+ return segment;
+ }
+
+ r = _keyslot_for_segment(hdr, keyslot, segment);
+ if (r < 0)
+ return r;
+
+ return r >= 1 ? 0 : -ENOENT;
+}
+
+/* Number of keyslots assigned to a segment or all keyslots for CRYPT_ANY_SEGMENT */
+int LUKS2_keyslot_active_count(struct luks2_hdr *hdr, int segment)
+{
+ int num = 0;
+ json_object *jobj_keyslots;
+
+ json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots);
+
+ json_object_object_foreach(jobj_keyslots, slot, val) {
+ UNUSED(val);
+ if (!LUKS2_keyslot_for_segment(hdr, atoi(slot), segment))
+ num++;
+ }
+
+ return num;
+}
+
+int LUKS2_keyslot_cipher_incompatible(struct crypt_device *cd, const char *cipher_spec)
+{
+ char cipher[MAX_CIPHER_LEN], cipher_mode[MAX_CIPHER_LEN];
+
+ if (!cipher_spec || crypt_is_cipher_null(cipher_spec))
+ return 1;
+
+ if (crypt_parse_name_and_mode(cipher_spec, cipher, NULL, cipher_mode) < 0)
+ return 1;
+
+ /* Keyslot is already authenticated; we cannot use integrity tags here */
+ if (crypt_get_integrity_tag_size(cd))
+ return 1;
+
+ /* Wrapped key schemes cannot be used for keyslot encryption */
+ if (crypt_cipher_wrapped_key(cipher, cipher_mode))
+ return 1;
+
+ /* Check if crypto backend can use the cipher */
+ if (crypt_cipher_ivsize(cipher, cipher_mode) < 0)
+ return 1;
+
+ return 0;
+}
+
+int LUKS2_keyslot_params_default(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct luks2_keyslot_params *params)
+{
+ const struct crypt_pbkdf_type *pbkdf = crypt_get_pbkdf_type(cd);
+ const char *cipher_spec;
+ size_t key_size;
+ int r;
+
+ if (!hdr || !pbkdf || !params)
+ return -EINVAL;
+
+ /*
+ * set keyslot area encryption parameters
+ */
+ params->area_type = LUKS2_KEYSLOT_AREA_RAW;
+ cipher_spec = crypt_keyslot_get_encryption(cd, CRYPT_ANY_SLOT, &key_size);
+ if (!cipher_spec || !key_size)
+ return -EINVAL;
+
+ params->area.raw.key_size = key_size;
+ r = snprintf(params->area.raw.encryption, sizeof(params->area.raw.encryption), "%s", cipher_spec);
+ if (r < 0 || (size_t)r >= sizeof(params->area.raw.encryption))
+ return -EINVAL;
+
+ /*
+ * set keyslot AF parameters
+ */
+ params->af_type = LUKS2_KEYSLOT_AF_LUKS1;
+ /* currently we use hash for AF from pbkdf settings */
+ r = snprintf(params->af.luks1.hash, sizeof(params->af.luks1.hash), "%s", pbkdf->hash ?: DEFAULT_LUKS1_HASH);
+ if (r < 0 || (size_t)r >= sizeof(params->af.luks1.hash))
+ return -EINVAL;
+ params->af.luks1.stripes = 4000;
+
+ return 0;
+}
+
+int LUKS2_keyslot_pbkdf(struct luks2_hdr *hdr, int keyslot, struct crypt_pbkdf_type *pbkdf)
+{
+ json_object *jobj_keyslot, *jobj_kdf, *jobj;
+
+ if (!hdr || !pbkdf)
+ return -EINVAL;
+
+ if (LUKS2_keyslot_info(hdr, keyslot) == CRYPT_SLOT_INVALID)
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -ENOENT;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf))
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_kdf, "type", &jobj))
+ return -EINVAL;
+
+ memset(pbkdf, 0, sizeof(*pbkdf));
+
+ pbkdf->type = json_object_get_string(jobj);
+ if (json_object_object_get_ex(jobj_kdf, "hash", &jobj))
+ pbkdf->hash = json_object_get_string(jobj);
+ if (json_object_object_get_ex(jobj_kdf, "iterations", &jobj))
+ pbkdf->iterations = json_object_get_int(jobj);
+ if (json_object_object_get_ex(jobj_kdf, "time", &jobj))
+ pbkdf->iterations = json_object_get_int(jobj);
+ if (json_object_object_get_ex(jobj_kdf, "memory", &jobj))
+ pbkdf->max_memory_kb = json_object_get_int(jobj);
+ if (json_object_object_get_ex(jobj_kdf, "cpus", &jobj))
+ pbkdf->parallel_threads = json_object_get_int(jobj);
+
+ return 0;
+}
+
+static int LUKS2_keyslot_unbound(struct luks2_hdr *hdr, int keyslot)
+{
+ json_object *jobj_digest, *jobj_segments;
+ int digest = LUKS2_digest_by_keyslot(hdr, keyslot);
+
+ if (digest < 0)
+ return 0;
+
+ if (!(jobj_digest = LUKS2_get_digest_jobj(hdr, digest)))
+ return 0;
+
+ json_object_object_get_ex(jobj_digest, "segments", &jobj_segments);
+ if (!jobj_segments || !json_object_is_type(jobj_segments, json_type_array) ||
+ json_object_array_length(jobj_segments) == 0)
+ return 1;
+
+ return 0;
+}
+
+crypt_keyslot_info LUKS2_keyslot_info(struct luks2_hdr *hdr, int keyslot)
+{
+ if(keyslot >= LUKS2_KEYSLOTS_MAX || keyslot < 0)
+ return CRYPT_SLOT_INVALID;
+
+ if (!LUKS2_get_keyslot_jobj(hdr, keyslot))
+ return CRYPT_SLOT_INACTIVE;
+
+ if (LUKS2_digest_by_keyslot(hdr, keyslot) < 0 ||
+ LUKS2_keyslot_unbound(hdr, keyslot))
+ return CRYPT_SLOT_UNBOUND;
+
+ if (LUKS2_keyslot_active_count(hdr, CRYPT_DEFAULT_SEGMENT) == 1 &&
+ !LUKS2_keyslot_for_segment(hdr, keyslot, CRYPT_DEFAULT_SEGMENT))
+ return CRYPT_SLOT_ACTIVE_LAST;
+
+ return CRYPT_SLOT_ACTIVE;
+}
+
+int LUKS2_keyslot_jobj_area(json_object *jobj_keyslot, uint64_t *offset, uint64_t *length)
+{
+ json_object *jobj_area, *jobj;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_area, "offset", &jobj))
+ return -EINVAL;
+ *offset = crypt_jobj_get_uint64(jobj);
+
+ if (!json_object_object_get_ex(jobj_area, "size", &jobj))
+ return -EINVAL;
+ *length = crypt_jobj_get_uint64(jobj);
+
+ return 0;
+}
+
+int LUKS2_keyslot_area(struct luks2_hdr *hdr,
+ int keyslot,
+ uint64_t *offset,
+ uint64_t *length)
+{
+ json_object *jobj_keyslot;
+
+ if (LUKS2_keyslot_info(hdr, keyslot) == CRYPT_SLOT_INVALID)
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -ENOENT;
+
+ return LUKS2_keyslot_jobj_area(jobj_keyslot, offset, length);
+}
+
+static int _open_and_verify(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const keyslot_handler *h,
+ int keyslot,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vk)
+{
+ int r, key_size = LUKS2_get_keyslot_stored_key_size(hdr, keyslot);
+
+ if (key_size < 0)
+ return -EINVAL;
+
+ *vk = crypt_alloc_volume_key(key_size, NULL);
+ if (!*vk)
+ return -ENOMEM;
+
+ r = h->open(cd, keyslot, password, password_len, (*vk)->key, (*vk)->keylength);
+ if (r < 0)
+ log_dbg(cd, "Keyslot %d (%s) open failed with %d.", keyslot, h->name, r);
+ else
+ r = LUKS2_digest_verify(cd, hdr, *vk, keyslot);
+
+ if (r < 0) {
+ crypt_free_volume_key(*vk);
+ *vk = NULL;
+ }
+
+ crypt_volume_key_set_id(*vk, r);
+
+ return r < 0 ? r : keyslot;
+}
+
+static int LUKS2_open_and_verify_by_digest(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int digest,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vk)
+{
+ const keyslot_handler *h;
+ int r;
+
+ if (!(h = LUKS2_keyslot_handler(cd, keyslot)))
+ return -ENOENT;
+
+ r = h->validate(cd, LUKS2_get_keyslot_jobj(hdr, keyslot));
+ if (r) {
+ log_dbg(cd, "Keyslot %d validation failed.", keyslot);
+ return r;
+ }
+
+ r = _keyslot_for_digest(hdr, keyslot, digest);
+ if (r) {
+ if (r == -ENOENT)
+ log_dbg(cd, "Keyslot %d unusable for digest %d.", keyslot, digest);
+ return r;
+ }
+
+ return _open_and_verify(cd, hdr, h, keyslot, password, password_len, vk);
+}
+
+static int LUKS2_open_and_verify(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int segment,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vk)
+{
+ const keyslot_handler *h;
+ int r;
+
+ if (!(h = LUKS2_keyslot_handler(cd, keyslot)))
+ return -ENOENT;
+
+ r = h->validate(cd, LUKS2_get_keyslot_jobj(hdr, keyslot));
+ if (r) {
+ log_dbg(cd, "Keyslot %d validation failed.", keyslot);
+ return r;
+ }
+
+ r = LUKS2_keyslot_for_segment(hdr, keyslot, segment);
+ if (r) {
+ if (r == -ENOENT)
+ log_dbg(cd, "Keyslot %d unusable for segment %d.", keyslot, segment);
+ return r;
+ }
+
+ return _open_and_verify(cd, hdr, h, keyslot, password, password_len, vk);
+}
+
+static int LUKS2_keyslot_open_priority_digest(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ crypt_keyslot_priority priority,
+ const char *password,
+ size_t password_len,
+ int digest,
+ struct volume_key **vk)
+{
+ json_object *jobj_keyslots, *jobj;
+ crypt_keyslot_priority slot_priority;
+ int keyslot, r = -ENOENT;
+
+ json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots);
+
+ json_object_object_foreach(jobj_keyslots, slot, val) {
+ if (!json_object_object_get_ex(val, "priority", &jobj))
+ slot_priority = CRYPT_SLOT_PRIORITY_NORMAL;
+ else
+ slot_priority = json_object_get_int(jobj);
+
+ keyslot = atoi(slot);
+ if (slot_priority != priority) {
+ log_dbg(cd, "Keyslot %d priority %d != %d (required), skipped.",
+ keyslot, slot_priority, priority);
+ continue;
+ }
+
+ r = LUKS2_open_and_verify_by_digest(cd, hdr, keyslot, digest, password, password_len, vk);
+
+ /* Do not retry for errors that are no -EPERM or -ENOENT,
+ former meaning password wrong, latter key slot unusable for segment */
+ if ((r != -EPERM) && (r != -ENOENT))
+ break;
+ }
+
+ return r;
+}
+
+static int LUKS2_keyslot_open_priority(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ crypt_keyslot_priority priority,
+ const char *password,
+ size_t password_len,
+ int segment,
+ struct volume_key **vk)
+{
+ json_object *jobj_keyslots, *jobj;
+ crypt_keyslot_priority slot_priority;
+ int keyslot, r = -ENOENT;
+
+ json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots);
+
+ json_object_object_foreach(jobj_keyslots, slot, val) {
+ if (!json_object_object_get_ex(val, "priority", &jobj))
+ slot_priority = CRYPT_SLOT_PRIORITY_NORMAL;
+ else
+ slot_priority = json_object_get_int(jobj);
+
+ keyslot = atoi(slot);
+ if (slot_priority != priority) {
+ log_dbg(cd, "Keyslot %d priority %d != %d (required), skipped.",
+ keyslot, slot_priority, priority);
+ continue;
+ }
+
+ r = LUKS2_open_and_verify(cd, hdr, keyslot, segment, password, password_len, vk);
+
+ /* Do not retry for errors that are no -EPERM or -ENOENT,
+ former meaning password wrong, latter key slot unusable for segment */
+ if ((r != -EPERM) && (r != -ENOENT))
+ break;
+ }
+
+ return r;
+}
+
+static int LUKS2_keyslot_open_by_digest(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int digest,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vk)
+{
+ int r_prio, r = -EINVAL;
+
+ if (digest < 0)
+ return r;
+
+ if (keyslot == CRYPT_ANY_SLOT) {
+ r_prio = LUKS2_keyslot_open_priority_digest(cd, hdr, CRYPT_SLOT_PRIORITY_PREFER,
+ password, password_len, digest, vk);
+ if (r_prio >= 0)
+ r = r_prio;
+ else if (r_prio != -EPERM && r_prio != -ENOENT)
+ r = r_prio;
+ else
+ r = LUKS2_keyslot_open_priority_digest(cd, hdr, CRYPT_SLOT_PRIORITY_NORMAL,
+ password, password_len, digest, vk);
+ /* Prefer password wrong to no entry from priority slot */
+ if (r_prio == -EPERM && r == -ENOENT)
+ r = r_prio;
+ } else
+ r = LUKS2_open_and_verify_by_digest(cd, hdr, keyslot, digest, password, password_len, vk);
+
+ return r;
+}
+
+int LUKS2_keyslot_open_all_segments(struct crypt_device *cd,
+ int keyslot_old,
+ int keyslot_new,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vks)
+{
+ struct volume_key *vk = NULL;
+ int digest_old, digest_new, r = -EINVAL;
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+
+ digest_old = LUKS2_reencrypt_digest_old(hdr);
+ if (digest_old >= 0) {
+ log_dbg(cd, "Trying to unlock volume key (digest: %d) using keyslot %d.", digest_old, keyslot_old);
+ r = LUKS2_keyslot_open_by_digest(cd, hdr, keyslot_old, digest_old, password, password_len, &vk);
+ if (r < 0)
+ goto out;
+ crypt_volume_key_add_next(vks, vk);
+ }
+
+ digest_new = LUKS2_reencrypt_digest_new(hdr);
+ if (digest_new >= 0 && digest_old != digest_new) {
+ log_dbg(cd, "Trying to unlock volume key (digest: %d) using keyslot %d.", digest_new, keyslot_new);
+ r = LUKS2_keyslot_open_by_digest(cd, hdr, keyslot_new, digest_new, password, password_len, &vk);
+ if (r < 0)
+ goto out;
+ crypt_volume_key_add_next(vks, vk);
+ }
+out:
+ if (r < 0) {
+ crypt_free_volume_key(*vks);
+ *vks = NULL;
+
+ if (r == -ENOMEM)
+ log_err(cd, _("Not enough available memory to open a keyslot."));
+ else if (r != -EPERM && r != -ENOENT)
+ log_err(cd, _("Keyslot open failed."));
+ }
+ return r;
+}
+
+int LUKS2_keyslot_open(struct crypt_device *cd,
+ int keyslot,
+ int segment,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vk)
+{
+ struct luks2_hdr *hdr;
+ int r_prio, r = -EINVAL;
+
+ hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+
+ if (keyslot == CRYPT_ANY_SLOT) {
+ r_prio = LUKS2_keyslot_open_priority(cd, hdr, CRYPT_SLOT_PRIORITY_PREFER,
+ password, password_len, segment, vk);
+ if (r_prio >= 0)
+ r = r_prio;
+ else if (r_prio != -EPERM && r_prio != -ENOENT)
+ r = r_prio;
+ else
+ r = LUKS2_keyslot_open_priority(cd, hdr, CRYPT_SLOT_PRIORITY_NORMAL,
+ password, password_len, segment, vk);
+ /* Prefer password wrong to no entry from priority slot */
+ if (r_prio == -EPERM && r == -ENOENT)
+ r = r_prio;
+ } else
+ r = LUKS2_open_and_verify(cd, hdr, keyslot, segment, password, password_len, vk);
+
+ if (r < 0) {
+ if (r == -ENOMEM)
+ log_err(cd, _("Not enough available memory to open a keyslot."));
+ else if (r != -EPERM && r != -ENOENT)
+ log_err(cd, _("Keyslot open failed."));
+ }
+
+ return r;
+}
+
+int LUKS2_keyslot_reencrypt_store(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const void *buffer,
+ size_t buffer_length)
+{
+ const keyslot_handler *h;
+ int r;
+
+ if (!(h = LUKS2_keyslot_handler(cd, keyslot)) || strcmp(h->name, "reencrypt"))
+ return -EINVAL;
+
+ r = h->validate(cd, LUKS2_get_keyslot_jobj(hdr, keyslot));
+ if (r) {
+ log_dbg(cd, "Keyslot validation failed.");
+ return r;
+ }
+
+ return h->store(cd, keyslot, NULL, 0,
+ buffer, buffer_length);
+}
+
+int LUKS2_keyslot_store(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const char *password,
+ size_t password_len,
+ const struct volume_key *vk,
+ const struct luks2_keyslot_params *params)
+{
+ const keyslot_handler *h;
+ int r;
+
+ if (keyslot == CRYPT_ANY_SLOT)
+ return -EINVAL;
+
+ if (!LUKS2_get_keyslot_jobj(hdr, keyslot)) {
+ /* Try to allocate default and empty keyslot type */
+ h = LUKS2_keyslot_handler_type("luks2");
+ if (!h)
+ return -EINVAL;
+
+ r = h->alloc(cd, keyslot, vk->keylength, params);
+ if (r)
+ return r;
+ } else {
+ if (!(h = LUKS2_keyslot_handler(cd, keyslot)))
+ return -EINVAL;
+
+ r = h->update(cd, keyslot, params);
+ if (r) {
+ log_dbg(cd, "Failed to update keyslot %d json.", keyslot);
+ return r;
+ }
+ }
+
+ r = h->validate(cd, LUKS2_get_keyslot_jobj(hdr, keyslot));
+ if (r) {
+ log_dbg(cd, "Keyslot validation failed.");
+ return r;
+ }
+
+ if (LUKS2_hdr_validate(cd, hdr->jobj, hdr->hdr_size - LUKS2_HDR_BIN_LEN))
+ return -EINVAL;
+
+ return h->store(cd, keyslot, password, password_len,
+ vk->key, vk->keylength);
+}
+
+int LUKS2_keyslot_wipe(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int wipe_area_only)
+{
+ struct device *device = crypt_metadata_device(cd);
+ uint64_t area_offset, area_length;
+ int r;
+ json_object *jobj_keyslot, *jobj_keyslots;
+ const keyslot_handler *h;
+
+ h = LUKS2_keyslot_handler(cd, keyslot);
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots))
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -ENOENT;
+
+ if (wipe_area_only)
+ log_dbg(cd, "Wiping keyslot %d area only.", keyslot);
+
+ r = LUKS2_device_write_lock(cd, hdr, device);
+ if (r)
+ return r;
+
+ /* secure deletion of possible key material in keyslot area */
+ r = crypt_keyslot_area(cd, keyslot, &area_offset, &area_length);
+ if (r && r != -ENOENT)
+ goto out;
+
+ if (!r) {
+ r = crypt_wipe_device(cd, device, CRYPT_WIPE_SPECIAL, area_offset,
+ area_length, area_length, NULL, NULL);
+ if (r) {
+ if (r == -EACCES) {
+ log_err(cd, _("Cannot write to device %s, permission denied."),
+ device_path(device));
+ r = -EINVAL;
+ } else
+ log_err(cd, _("Cannot wipe device %s."), device_path(device));
+ goto out;
+ }
+ }
+
+ if (wipe_area_only)
+ goto out;
+
+ /* Slot specific wipe */
+ if (h) {
+ r = h->wipe(cd, keyslot);
+ if (r < 0)
+ goto out;
+ } else
+ log_dbg(cd, "Wiping keyslot %d without specific-slot handler loaded.", keyslot);
+
+ json_object_object_del_by_uint(jobj_keyslots, keyslot);
+
+ r = LUKS2_hdr_write(cd, hdr);
+out:
+ device_write_unlock(cd, crypt_metadata_device(cd));
+ return r;
+}
+
+int LUKS2_keyslot_dump(struct crypt_device *cd, int keyslot)
+{
+ const keyslot_handler *h;
+
+ if (!(h = LUKS2_keyslot_handler(cd, keyslot)))
+ return -EINVAL;
+
+ return h->dump(cd, keyslot);
+}
+
+crypt_keyslot_priority LUKS2_keyslot_priority_get(struct luks2_hdr *hdr, int keyslot)
+{
+ json_object *jobj_keyslot, *jobj_priority;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return CRYPT_SLOT_PRIORITY_INVALID;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "priority", &jobj_priority))
+ return CRYPT_SLOT_PRIORITY_NORMAL;
+
+ return json_object_get_int(jobj_priority);
+}
+
+int LUKS2_keyslot_priority_set(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, crypt_keyslot_priority priority, int commit)
+{
+ json_object *jobj_keyslot;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ if (priority == CRYPT_SLOT_PRIORITY_NORMAL)
+ json_object_object_del(jobj_keyslot, "priority");
+ else
+ json_object_object_add(jobj_keyslot, "priority", json_object_new_int(priority));
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+int placeholder_keyslot_alloc(struct crypt_device *cd,
+ int keyslot,
+ uint64_t area_offset,
+ uint64_t area_length)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj_keyslots, *jobj_keyslot, *jobj_area;
+
+ log_dbg(cd, "Allocating placeholder keyslot %d for LUKS1 down conversion.", keyslot);
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ if (keyslot < 0 || keyslot >= LUKS2_KEYSLOTS_MAX)
+ return -EINVAL;
+
+ if (LUKS2_get_keyslot_jobj(hdr, keyslot))
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots))
+ return -EINVAL;
+
+ jobj_keyslot = json_object_new_object();
+ json_object_object_add(jobj_keyslot, "type", json_object_new_string("placeholder"));
+ /*
+ * key_size = -1 makes placeholder keyslot impossible to pass validation.
+ * It's a safeguard against accidentally storing temporary conversion
+ * LUKS2 header.
+ */
+ json_object_object_add(jobj_keyslot, "key_size", json_object_new_int(-1));
+
+ /* Area object */
+ jobj_area = json_object_new_object();
+ json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(area_offset));
+ json_object_object_add(jobj_area, "size", crypt_jobj_new_uint64(area_length));
+ json_object_object_add(jobj_keyslot, "area", jobj_area);
+
+ json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot);
+
+ return 0;
+}
+
+static unsigned LUKS2_get_keyslot_digests_count(json_object *hdr_jobj, int keyslot)
+{
+ char num[16];
+ json_object *jobj_digests, *jobj_keyslots;
+ unsigned count = 0;
+
+ if (!json_object_object_get_ex(hdr_jobj, "digests", &jobj_digests))
+ return 0;
+
+ if (snprintf(num, sizeof(num), "%u", keyslot) < 0)
+ return 0;
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ UNUSED(key);
+ json_object_object_get_ex(val, "keyslots", &jobj_keyslots);
+ if (LUKS2_array_jobj(jobj_keyslots, num))
+ count++;
+ }
+
+ return count;
+}
+
+/* run only on header that passed basic format validation */
+int LUKS2_keyslots_validate(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ const keyslot_handler *h;
+ int keyslot;
+ json_object *jobj_keyslots, *jobj_type;
+ uint32_t reqs, reencrypt_count = 0;
+ struct luks2_hdr dummy = {
+ .jobj = hdr_jobj
+ };
+
+ if (!json_object_object_get_ex(hdr_jobj, "keyslots", &jobj_keyslots))
+ return -EINVAL;
+
+ if (LUKS2_config_get_requirements(cd, &dummy, &reqs))
+ return -EINVAL;
+
+ json_object_object_foreach(jobj_keyslots, slot, val) {
+ keyslot = atoi(slot);
+ json_object_object_get_ex(val, "type", &jobj_type);
+ h = LUKS2_keyslot_handler_type(json_object_get_string(jobj_type));
+ if (!h)
+ continue;
+ if (h->validate && h->validate(cd, val)) {
+ log_dbg(cd, "Keyslot type %s validation failed on keyslot %d.", h->name, keyslot);
+ return -EINVAL;
+ }
+
+ if (!strcmp(h->name, "luks2") && LUKS2_get_keyslot_digests_count(hdr_jobj, keyslot) != 1) {
+ log_dbg(cd, "Keyslot %d is not assigned to exactly 1 digest.", keyslot);
+ return -EINVAL;
+ }
+
+ if (!strcmp(h->name, "reencrypt"))
+ reencrypt_count++;
+ }
+
+ if ((reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT) && reencrypt_count == 0) {
+ log_dbg(cd, "Missing reencryption keyslot.");
+ return -EINVAL;
+ }
+
+ if (reencrypt_count && !LUKS2_reencrypt_requirement_candidate(&dummy)) {
+ log_dbg(cd, "Missing reencryption requirement flag.");
+ return -EINVAL;
+ }
+
+ if (reencrypt_count > 1) {
+ log_dbg(cd, "Too many reencryption keyslots.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void LUKS2_keyslots_repair(struct crypt_device *cd, json_object *jobj_keyslots)
+{
+ const keyslot_handler *h;
+ json_object *jobj_type;
+
+ json_object_object_foreach(jobj_keyslots, slot, val) {
+ UNUSED(slot);
+ if (!json_object_is_type(val, json_type_object) ||
+ !json_object_object_get_ex(val, "type", &jobj_type) ||
+ !json_object_is_type(jobj_type, json_type_string))
+ continue;
+
+ h = LUKS2_keyslot_handler_type(json_object_get_string(jobj_type));
+ if (h && h->repair)
+ h->repair(val);
+ }
+}
+
+/* assumes valid header */
+int LUKS2_find_keyslot(struct luks2_hdr *hdr, const char *type)
+{
+ int i;
+ json_object *jobj_keyslot, *jobj_type;
+
+ if (!type)
+ return -EINVAL;
+
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, i);
+ if (!jobj_keyslot)
+ continue;
+
+ json_object_object_get_ex(jobj_keyslot, "type", &jobj_type);
+ if (!strcmp(json_object_get_string(jobj_type), type))
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+/* assumes valid header, it does not move references in tokens/digests etc! */
+int LUKS2_keyslot_swap(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, int keyslot2)
+{
+ json_object *jobj_keyslots, *jobj_keyslot, *jobj_keyslot2;
+ int r;
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots))
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ jobj_keyslot2 = LUKS2_get_keyslot_jobj(hdr, keyslot2);
+ if (!jobj_keyslot2)
+ return -EINVAL;
+
+ /* This transfer owner of object, no need for json_object_put */
+ json_object_get(jobj_keyslot);
+ json_object_get(jobj_keyslot2);
+
+ json_object_object_del_by_uint(jobj_keyslots, keyslot);
+ r = json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot2);
+ if (r < 0) {
+ log_dbg(cd, "Failed to swap keyslot %d.", keyslot);
+ return r;
+ }
+
+ json_object_object_del_by_uint(jobj_keyslots, keyslot2);
+ r = json_object_object_add_by_uint(jobj_keyslots, keyslot2, jobj_keyslot);
+ if (r < 0)
+ log_dbg(cd, "Failed to swap keyslot2 %d.", keyslot2);
+
+ return r;
+}
diff --git a/lib/luks2/luks2_keyslot_luks2.c b/lib/luks2/luks2_keyslot_luks2.c
new file mode 100644
index 0000000..491dcad
--- /dev/null
+++ b/lib/luks2/luks2_keyslot_luks2.c
@@ -0,0 +1,821 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, LUKS2 type keyslot handler
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <limits.h>
+#include "luks2_internal.h"
+
+/* FIXME: move keyslot encryption to crypto backend */
+#include "../luks1/af.h"
+
+#define LUKS_SALTSIZE 32
+#define LUKS_SLOT_ITERATIONS_MIN 1000
+#define LUKS_STRIPES 4000
+
+/* Serialize memory-hard keyslot access: optional workaround for parallel processing */
+#define MIN_MEMORY_FOR_SERIALIZE_LOCK_KB 32*1024 /* 32MB */
+
+/* coverity[ -taint_source : arg-0 ] */
+static int luks2_encrypt_to_storage(char *src, size_t srcLength,
+ const char *cipher, const char *cipher_mode,
+ struct volume_key *vk, unsigned int sector,
+ struct crypt_device *cd)
+{
+#ifndef ENABLE_AF_ALG /* Support for old kernel without Crypto API */
+ return LUKS_encrypt_to_storage(src, srcLength, cipher, cipher_mode, vk, sector, cd);
+#else
+ struct crypt_storage *s;
+ int devfd, r;
+ struct device *device = crypt_metadata_device(cd);
+
+ /* Only whole sector writes supported */
+ if (MISALIGNED_512(srcLength))
+ return -EINVAL;
+
+ /* Encrypt buffer */
+ r = crypt_storage_init(&s, SECTOR_SIZE, cipher, cipher_mode, vk->key, vk->keylength, false);
+ if (r) {
+ log_err(cd, _("Cannot use %s-%s cipher for keyslot encryption."), cipher, cipher_mode);
+ return r;
+ }
+
+ r = crypt_storage_encrypt(s, 0, srcLength, src);
+ crypt_storage_destroy(s);
+ if (r) {
+ log_err(cd, _("IO error while encrypting keyslot."));
+ return r;
+ }
+
+ devfd = device_open_locked(cd, device, O_RDWR);
+ if (devfd >= 0) {
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), src,
+ srcLength, sector * SECTOR_SIZE) < 0)
+ r = -EIO;
+ else
+ r = 0;
+
+ device_sync(cd, device);
+ } else
+ r = -EIO;
+
+ if (r)
+ log_err(cd, _("IO error while encrypting keyslot."));
+
+ return r;
+#endif
+}
+
+static int luks2_decrypt_from_storage(char *dst, size_t dstLength,
+ const char *cipher, const char *cipher_mode, struct volume_key *vk,
+ unsigned int sector, struct crypt_device *cd)
+{
+ struct device *device = crypt_metadata_device(cd);
+#ifndef ENABLE_AF_ALG /* Support for old kernel without Crypto API */
+ int r = device_read_lock(cd, device);
+ if (r) {
+ log_err(cd, _("Failed to acquire read lock on device %s."), device_path(device));
+ return r;
+ }
+ r = LUKS_decrypt_from_storage(dst, dstLength, cipher, cipher_mode, vk, sector, cd);
+ device_read_unlock(cd, crypt_metadata_device(cd));
+ return r;
+#else
+ struct crypt_storage *s;
+ int devfd, r;
+
+ /* Only whole sector writes supported */
+ if (MISALIGNED_512(dstLength))
+ return -EINVAL;
+
+ r = crypt_storage_init(&s, SECTOR_SIZE, cipher, cipher_mode, vk->key, vk->keylength, false);
+ if (r) {
+ log_err(cd, _("Cannot use %s-%s cipher for keyslot encryption."), cipher, cipher_mode);
+ return r;
+ }
+
+ r = device_read_lock(cd, device);
+ if (r) {
+ log_err(cd, _("Failed to acquire read lock on device %s."),
+ device_path(device));
+ crypt_storage_destroy(s);
+ return r;
+ }
+
+ devfd = device_open_locked(cd, device, O_RDONLY);
+ if (devfd >= 0) {
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), dst,
+ dstLength, sector * SECTOR_SIZE) < 0)
+ r = -EIO;
+ else
+ r = 0;
+ } else
+ r = -EIO;
+
+ device_read_unlock(cd, device);
+
+ /* Decrypt buffer */
+ if (!r)
+ r = crypt_storage_decrypt(s, 0, dstLength, dst);
+ else
+ log_err(cd, _("IO error while decrypting keyslot."));
+
+ crypt_storage_destroy(s);
+ return r;
+#endif
+}
+
+static int luks2_keyslot_get_pbkdf_params(json_object *jobj_keyslot,
+ struct crypt_pbkdf_type *pbkdf, char **salt)
+{
+ json_object *jobj_kdf, *jobj1, *jobj2;
+ size_t salt_len;
+ int r;
+
+ if (!jobj_keyslot || !pbkdf)
+ return -EINVAL;
+
+ memset(pbkdf, 0, sizeof(*pbkdf));
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf))
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_kdf, "type", &jobj1))
+ return -EINVAL;
+ pbkdf->type = json_object_get_string(jobj1);
+ if (!strcmp(pbkdf->type, CRYPT_KDF_PBKDF2)) {
+ if (!json_object_object_get_ex(jobj_kdf, "hash", &jobj2))
+ return -EINVAL;
+ pbkdf->hash = json_object_get_string(jobj2);
+ if (!json_object_object_get_ex(jobj_kdf, "iterations", &jobj2))
+ return -EINVAL;
+ pbkdf->iterations = json_object_get_int(jobj2);
+ pbkdf->max_memory_kb = 0;
+ pbkdf->parallel_threads = 0;
+ } else {
+ if (!json_object_object_get_ex(jobj_kdf, "time", &jobj2))
+ return -EINVAL;
+ pbkdf->iterations = json_object_get_int(jobj2);
+ if (!json_object_object_get_ex(jobj_kdf, "memory", &jobj2))
+ return -EINVAL;
+ pbkdf->max_memory_kb = json_object_get_int(jobj2);
+ if (!json_object_object_get_ex(jobj_kdf, "cpus", &jobj2))
+ return -EINVAL;
+ pbkdf->parallel_threads = json_object_get_int(jobj2);
+ }
+
+ if (!json_object_object_get_ex(jobj_kdf, "salt", &jobj2))
+ return -EINVAL;
+
+ r = crypt_base64_decode(salt, &salt_len, json_object_get_string(jobj2),
+ json_object_get_string_len(jobj2));
+ if (r < 0)
+ return r;
+
+ if (salt_len != LUKS_SALTSIZE) {
+ free(*salt);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int luks2_keyslot_set_key(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ const char *password, size_t passwordLen,
+ const char *volume_key, size_t volume_key_len)
+{
+ struct volume_key *derived_key;
+ char *salt = NULL, cipher[MAX_CIPHER_LEN], cipher_mode[MAX_CIPHER_LEN];
+ char *AfKey = NULL;
+ const char *af_hash = NULL;
+ size_t AFEKSize, keyslot_key_len;
+ json_object *jobj2, *jobj_kdf, *jobj_af, *jobj_area;
+ uint64_t area_offset;
+ struct crypt_pbkdf_type pbkdf;
+ int r;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf) ||
+ !json_object_object_get_ex(jobj_keyslot, "af", &jobj_af) ||
+ !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+
+ /* prevent accidental volume key size change after allocation */
+ if (!json_object_object_get_ex(jobj_keyslot, "key_size", &jobj2))
+ return -EINVAL;
+ if (json_object_get_int(jobj2) != (int)volume_key_len)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_area, "offset", &jobj2))
+ return -EINVAL;
+ area_offset = crypt_jobj_get_uint64(jobj2);
+
+ if (!json_object_object_get_ex(jobj_area, "encryption", &jobj2))
+ return -EINVAL;
+ r = crypt_parse_name_and_mode(json_object_get_string(jobj2), cipher, NULL, cipher_mode);
+ if (r < 0)
+ return r;
+
+ if (!json_object_object_get_ex(jobj_area, "key_size", &jobj2))
+ return -EINVAL;
+ keyslot_key_len = json_object_get_int(jobj2);
+
+ if (!json_object_object_get_ex(jobj_af, "hash", &jobj2))
+ return -EINVAL;
+ af_hash = json_object_get_string(jobj2);
+
+ r = luks2_keyslot_get_pbkdf_params(jobj_keyslot, &pbkdf, &salt);
+ if (r < 0)
+ return r;
+
+ /*
+ * Allocate derived key storage.
+ */
+ derived_key = crypt_alloc_volume_key(keyslot_key_len, NULL);
+ if (!derived_key) {
+ free(salt);
+ return -ENOMEM;
+ }
+ /*
+ * Calculate keyslot content, split and store it to keyslot area.
+ */
+ log_dbg(cd, "Running keyslot key derivation.");
+ r = crypt_pbkdf(pbkdf.type, pbkdf.hash, password, passwordLen,
+ salt, LUKS_SALTSIZE,
+ derived_key->key, derived_key->keylength,
+ pbkdf.iterations, pbkdf.max_memory_kb,
+ pbkdf.parallel_threads);
+ free(salt);
+ if (r < 0) {
+ if ((crypt_backend_flags() & CRYPT_BACKEND_PBKDF2_INT) &&
+ pbkdf.iterations > INT_MAX)
+ log_err(cd, _("PBKDF2 iteration value overflow."));
+ crypt_free_volume_key(derived_key);
+ return r;
+ }
+
+ // FIXME: verity key_size to AFEKSize
+ AFEKSize = AF_split_sectors(volume_key_len, LUKS_STRIPES) * SECTOR_SIZE;
+ AfKey = crypt_safe_alloc(AFEKSize);
+ if (!AfKey) {
+ crypt_free_volume_key(derived_key);
+ return -ENOMEM;
+ }
+
+ r = crypt_hash_size(af_hash);
+ if (r < 0)
+ log_err(cd, _("Hash algorithm %s is not available."), af_hash);
+ else
+ r = AF_split(cd, volume_key, AfKey, volume_key_len, LUKS_STRIPES, af_hash);
+
+ if (r == 0) {
+ log_dbg(cd, "Updating keyslot area [0x%04" PRIx64 "].", area_offset);
+ /* FIXME: sector_offset should be size_t, fix LUKS_encrypt... accordingly */
+ r = luks2_encrypt_to_storage(AfKey, AFEKSize, cipher, cipher_mode,
+ derived_key, (unsigned)(area_offset / SECTOR_SIZE), cd);
+ }
+
+ crypt_safe_free(AfKey);
+ crypt_free_volume_key(derived_key);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int luks2_keyslot_get_key(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ const char *password, size_t passwordLen,
+ char *volume_key, size_t volume_key_len)
+{
+ struct volume_key *derived_key = NULL;
+ struct crypt_pbkdf_type pbkdf;
+ char *AfKey = NULL;
+ size_t AFEKSize;
+ const char *af_hash = NULL;
+ char *salt = NULL, cipher[MAX_CIPHER_LEN], cipher_mode[MAX_CIPHER_LEN];
+ json_object *jobj2, *jobj_af, *jobj_area;
+ uint64_t area_offset;
+ size_t keyslot_key_len;
+ bool try_serialize_lock = false;
+ int r;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "af", &jobj_af) ||
+ !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_af, "hash", &jobj2))
+ return -EINVAL;
+ af_hash = json_object_get_string(jobj2);
+
+ if (!json_object_object_get_ex(jobj_area, "offset", &jobj2))
+ return -EINVAL;
+ area_offset = crypt_jobj_get_uint64(jobj2);
+
+ if (!json_object_object_get_ex(jobj_area, "encryption", &jobj2))
+ return -EINVAL;
+ r = crypt_parse_name_and_mode(json_object_get_string(jobj2), cipher, NULL, cipher_mode);
+ if (r < 0)
+ return r;
+
+ if (!json_object_object_get_ex(jobj_area, "key_size", &jobj2))
+ return -EINVAL;
+ keyslot_key_len = json_object_get_int(jobj2);
+
+ r = luks2_keyslot_get_pbkdf_params(jobj_keyslot, &pbkdf, &salt);
+ if (r < 0)
+ return r;
+
+ /*
+ * Allocate derived key storage space.
+ */
+ derived_key = crypt_alloc_volume_key(keyslot_key_len, NULL);
+ if (!derived_key) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ AFEKSize = AF_split_sectors(volume_key_len, LUKS_STRIPES) * SECTOR_SIZE;
+ AfKey = crypt_safe_alloc(AFEKSize);
+ if (!AfKey) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * If requested, serialize unlocking for memory-hard KDF. Usually NOOP.
+ */
+ if (pbkdf.max_memory_kb > MIN_MEMORY_FOR_SERIALIZE_LOCK_KB)
+ try_serialize_lock = true;
+ if (try_serialize_lock && (r = crypt_serialize_lock(cd)))
+ goto out;
+
+ /*
+ * Calculate derived key, decrypt keyslot content and merge it.
+ */
+ log_dbg(cd, "Running keyslot key derivation.");
+ r = crypt_pbkdf(pbkdf.type, pbkdf.hash, password, passwordLen,
+ salt, LUKS_SALTSIZE,
+ derived_key->key, derived_key->keylength,
+ pbkdf.iterations, pbkdf.max_memory_kb,
+ pbkdf.parallel_threads);
+
+ if (try_serialize_lock)
+ crypt_serialize_unlock(cd);
+
+ if (r == 0) {
+ log_dbg(cd, "Reading keyslot area [0x%04" PRIx64 "].", area_offset);
+ /* FIXME: sector_offset should be size_t, fix LUKS_decrypt... accordingly */
+ r = luks2_decrypt_from_storage(AfKey, AFEKSize, cipher, cipher_mode,
+ derived_key, (unsigned)(area_offset / SECTOR_SIZE), cd);
+ }
+
+ if (r == 0) {
+ r = crypt_hash_size(af_hash);
+ if (r < 0)
+ log_err(cd, _("Hash algorithm %s is not available."), af_hash);
+ else
+ r = AF_merge(AfKey, volume_key, volume_key_len, LUKS_STRIPES, af_hash);
+ }
+out:
+ free(salt);
+ crypt_free_volume_key(derived_key);
+ crypt_safe_free(AfKey);
+
+ return r;
+}
+
+/*
+ * currently we support update of only:
+ *
+ * - af hash function
+ * - kdf params
+ */
+static int luks2_keyslot_update_json(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ const struct luks2_keyslot_params *params)
+{
+ const struct crypt_pbkdf_type *pbkdf;
+ json_object *jobj_af, *jobj_area, *jobj_kdf;
+ char salt[LUKS_SALTSIZE], *salt_base64 = NULL;
+ int r;
+
+ /* jobj_keyslot is not yet validated */
+
+ if (!json_object_object_get_ex(jobj_keyslot, "af", &jobj_af) ||
+ !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+
+ /* update area encryption parameters */
+ json_object_object_add(jobj_area, "encryption", json_object_new_string(params->area.raw.encryption));
+ json_object_object_add(jobj_area, "key_size", json_object_new_int(params->area.raw.key_size));
+
+ pbkdf = crypt_get_pbkdf_type(cd);
+ if (!pbkdf)
+ return -EINVAL;
+
+ r = crypt_benchmark_pbkdf_internal(cd, CONST_CAST(struct crypt_pbkdf_type *)pbkdf, params->area.raw.key_size);
+ if (r < 0)
+ return r;
+
+ /* refresh whole 'kdf' object */
+ jobj_kdf = json_object_new_object();
+ if (!jobj_kdf)
+ return -ENOMEM;
+ json_object_object_add(jobj_kdf, "type", json_object_new_string(pbkdf->type));
+ if (!strcmp(pbkdf->type, CRYPT_KDF_PBKDF2)) {
+ json_object_object_add(jobj_kdf, "hash", json_object_new_string(pbkdf->hash));
+ json_object_object_add(jobj_kdf, "iterations", json_object_new_int(pbkdf->iterations));
+ } else {
+ json_object_object_add(jobj_kdf, "time", json_object_new_int(pbkdf->iterations));
+ json_object_object_add(jobj_kdf, "memory", json_object_new_int(pbkdf->max_memory_kb));
+ json_object_object_add(jobj_kdf, "cpus", json_object_new_int(pbkdf->parallel_threads));
+ }
+ json_object_object_add(jobj_keyslot, "kdf", jobj_kdf);
+
+ /*
+ * Regenerate salt and add it in 'kdf' object
+ */
+ r = crypt_random_get(cd, salt, LUKS_SALTSIZE, CRYPT_RND_SALT);
+ if (r < 0)
+ return r;
+ r = crypt_base64_encode(&salt_base64, NULL, salt, LUKS_SALTSIZE);
+ if (r < 0)
+ return r;
+ json_object_object_add(jobj_kdf, "salt", json_object_new_string(salt_base64));
+ free(salt_base64);
+
+ /* update 'af' hash */
+ json_object_object_add(jobj_af, "hash", json_object_new_string(params->af.luks1.hash));
+
+ JSON_DBG(cd, jobj_keyslot, "Keyslot JSON:");
+ return 0;
+}
+
+static int luks2_keyslot_alloc(struct crypt_device *cd,
+ int keyslot,
+ size_t volume_key_len,
+ const struct luks2_keyslot_params *params)
+{
+ struct luks2_hdr *hdr;
+ uint64_t area_offset, area_length;
+ json_object *jobj_keyslots, *jobj_keyslot, *jobj_af, *jobj_area;
+ int r;
+
+ log_dbg(cd, "Trying to allocate LUKS2 keyslot %d.", keyslot);
+
+ if (!params || params->area_type != LUKS2_KEYSLOT_AREA_RAW ||
+ params->af_type != LUKS2_KEYSLOT_AF_LUKS1) {
+ log_dbg(cd, "Invalid LUKS2 keyslot parameters.");
+ return -EINVAL;
+ }
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ if (keyslot == CRYPT_ANY_SLOT)
+ keyslot = LUKS2_keyslot_find_empty(cd, hdr, 0);
+
+ if (keyslot < 0 || keyslot >= LUKS2_KEYSLOTS_MAX)
+ return -ENOMEM;
+
+ if (LUKS2_get_keyslot_jobj(hdr, keyslot)) {
+ log_dbg(cd, "Cannot modify already active keyslot %d.", keyslot);
+ return -EINVAL;
+ }
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots))
+ return -EINVAL;
+
+ r = LUKS2_find_area_gap(cd, hdr, volume_key_len, &area_offset, &area_length);
+ if (r < 0) {
+ log_err(cd, _("No space for new keyslot."));
+ return r;
+ }
+
+ jobj_keyslot = json_object_new_object();
+ json_object_object_add(jobj_keyslot, "type", json_object_new_string("luks2"));
+ json_object_object_add(jobj_keyslot, "key_size", json_object_new_int(volume_key_len));
+
+ /* AF object */
+ jobj_af = json_object_new_object();
+ json_object_object_add(jobj_af, "type", json_object_new_string("luks1"));
+ json_object_object_add(jobj_af, "stripes", json_object_new_int(params->af.luks1.stripes));
+ json_object_object_add(jobj_keyslot, "af", jobj_af);
+
+ /* Area object */
+ jobj_area = json_object_new_object();
+ json_object_object_add(jobj_area, "type", json_object_new_string("raw"));
+ json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(area_offset));
+ json_object_object_add(jobj_area, "size", crypt_jobj_new_uint64(area_length));
+ json_object_object_add(jobj_keyslot, "area", jobj_area);
+
+ json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot);
+
+ r = luks2_keyslot_update_json(cd, jobj_keyslot, params);
+
+ if (!r && LUKS2_check_json_size(cd, hdr)) {
+ log_dbg(cd, "Not enough space in header json area for new keyslot.");
+ r = -ENOSPC;
+ }
+
+ if (r)
+ json_object_object_del_by_uint(jobj_keyslots, keyslot);
+
+ return r;
+}
+
+static int luks2_keyslot_open(struct crypt_device *cd,
+ int keyslot,
+ const char *password,
+ size_t password_len,
+ char *volume_key,
+ size_t volume_key_len)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj_keyslot;
+
+ log_dbg(cd, "Trying to open LUKS2 keyslot %d.", keyslot);
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ return luks2_keyslot_get_key(cd, jobj_keyslot,
+ password, password_len,
+ volume_key, volume_key_len);
+}
+
+/*
+ * This function must not modify json.
+ * It's called after luks2 keyslot validation.
+ */
+static int luks2_keyslot_store(struct crypt_device *cd,
+ int keyslot,
+ const char *password,
+ size_t password_len,
+ const char *volume_key,
+ size_t volume_key_len)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj_keyslot;
+ int r;
+
+ log_dbg(cd, "Calculating attributes for LUKS2 keyslot %d.", keyslot);
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
+ if(r)
+ return r;
+
+ r = luks2_keyslot_set_key(cd, jobj_keyslot,
+ password, password_len,
+ volume_key, volume_key_len);
+ if (!r)
+ r = LUKS2_hdr_write(cd, hdr);
+
+ device_write_unlock(cd, crypt_metadata_device(cd));
+
+ return r < 0 ? r : keyslot;
+}
+
+static int luks2_keyslot_wipe(struct crypt_device *cd, int keyslot)
+{
+ struct luks2_hdr *hdr;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ /* Remove any reference of deleted keyslot from digests and tokens */
+ LUKS2_digest_assign(cd, hdr, keyslot, CRYPT_ANY_DIGEST, 0, 0);
+ LUKS2_token_assign(cd, hdr, keyslot, CRYPT_ANY_TOKEN, 0, 0);
+
+ return 0;
+}
+
+static int luks2_keyslot_dump(struct crypt_device *cd, int keyslot)
+{
+ json_object *jobj_keyslot, *jobj1, *jobj_kdf, *jobj_af, *jobj_area;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(crypt_get_hdr(cd, CRYPT_LUKS2), keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf) ||
+ !json_object_object_get_ex(jobj_keyslot, "af", &jobj_af) ||
+ !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_area, "encryption", &jobj1);
+ log_std(cd, "\tCipher: %s\n", json_object_get_string(jobj1));
+
+ json_object_object_get_ex(jobj_area, "key_size", &jobj1);
+ log_std(cd, "\tCipher key: %u bits\n", crypt_jobj_get_uint32(jobj1) * 8);
+
+ json_object_object_get_ex(jobj_kdf, "type", &jobj1);
+ log_std(cd, "\tPBKDF: %s\n", json_object_get_string(jobj1));
+
+ if (!strcmp(json_object_get_string(jobj1), CRYPT_KDF_PBKDF2)) {
+ json_object_object_get_ex(jobj_kdf, "hash", &jobj1);
+ log_std(cd, "\tHash: %s\n", json_object_get_string(jobj1));
+
+ json_object_object_get_ex(jobj_kdf, "iterations", &jobj1);
+ log_std(cd, "\tIterations: %" PRIu64 "\n", crypt_jobj_get_uint64(jobj1));
+ } else {
+ json_object_object_get_ex(jobj_kdf, "time", &jobj1);
+ log_std(cd, "\tTime cost: %" PRIu64 "\n", json_object_get_int64(jobj1));
+
+ json_object_object_get_ex(jobj_kdf, "memory", &jobj1);
+ log_std(cd, "\tMemory: %" PRIu64 "\n", json_object_get_int64(jobj1));
+
+ json_object_object_get_ex(jobj_kdf, "cpus", &jobj1);
+ log_std(cd, "\tThreads: %" PRIu64 "\n", json_object_get_int64(jobj1));
+ }
+ json_object_object_get_ex(jobj_kdf, "salt", &jobj1);
+ log_std(cd, "\tSalt: ");
+ hexprint_base64(cd, jobj1, " ", " ");
+
+
+ json_object_object_get_ex(jobj_af, "stripes", &jobj1);
+ log_std(cd, "\tAF stripes: %u\n", json_object_get_int(jobj1));
+
+ json_object_object_get_ex(jobj_af, "hash", &jobj1);
+ log_std(cd, "\tAF hash: %s\n", json_object_get_string(jobj1));
+
+ json_object_object_get_ex(jobj_area, "offset", &jobj1);
+ log_std(cd, "\tArea offset:%" PRIu64 " [bytes]\n", crypt_jobj_get_uint64(jobj1));
+
+ json_object_object_get_ex(jobj_area, "size", &jobj1);
+ log_std(cd, "\tArea length:%" PRIu64 " [bytes]\n", crypt_jobj_get_uint64(jobj1));
+
+ return 0;
+}
+
+static int luks2_keyslot_validate(struct crypt_device *cd, json_object *jobj_keyslot)
+{
+ json_object *jobj_kdf, *jobj_af, *jobj_area, *jobj1;
+ const char *type;
+ int count;
+
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ if (!(jobj_kdf = json_contains(cd, jobj_keyslot, "", "keyslot", "kdf", json_type_object)) ||
+ !(jobj_af = json_contains(cd, jobj_keyslot, "", "keyslot", "af", json_type_object)) ||
+ !(jobj_area = json_contains(cd, jobj_keyslot, "", "keyslot", "area", json_type_object)))
+ return -EINVAL;
+
+ count = json_object_object_length(jobj_kdf);
+
+ jobj1 = json_contains_string(cd, jobj_kdf, "", "kdf section", "type");
+ if (!jobj1)
+ return -EINVAL;
+ type = json_object_get_string(jobj1);
+
+ if (!strcmp(type, CRYPT_KDF_PBKDF2)) {
+ if (count != 4 || /* type, salt, hash, iterations only */
+ !json_contains_string(cd, jobj_kdf, "kdf type", type, "hash") ||
+ !json_contains(cd, jobj_kdf, "kdf type", type, "iterations", json_type_int) ||
+ !json_contains_string(cd, jobj_kdf, "kdf type", type, "salt"))
+ return -EINVAL;
+ } else if (!strcmp(type, CRYPT_KDF_ARGON2I) || !strcmp(type, CRYPT_KDF_ARGON2ID)) {
+ if (count != 5 || /* type, salt, time, memory, cpus only */
+ !json_contains(cd, jobj_kdf, "kdf type", type, "time", json_type_int) ||
+ !json_contains(cd, jobj_kdf, "kdf type", type, "memory", json_type_int) ||
+ !json_contains(cd, jobj_kdf, "kdf type", type, "cpus", json_type_int) ||
+ !json_contains_string(cd, jobj_kdf, "kdf type", type, "salt"))
+ return -EINVAL;
+ }
+
+ jobj1 = json_contains_string(cd, jobj_af, "", "af section", "type");
+ if (!jobj1)
+ return -EINVAL;
+ type = json_object_get_string(jobj1);
+
+ if (!strcmp(type, "luks1")) {
+ if (!json_contains_string(cd, jobj_af, "", "luks1 af", "hash") ||
+ !json_contains(cd, jobj_af, "", "luks1 af", "stripes", json_type_int))
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ // FIXME check numbered
+ jobj1 = json_contains_string(cd, jobj_area, "", "area section", "type");
+ if (!jobj1)
+ return -EINVAL;
+ type = json_object_get_string(jobj1);
+
+ if (!strcmp(type, "raw")) {
+ if (!json_contains_string(cd, jobj_area, "area", "raw type", "encryption") ||
+ !json_contains(cd, jobj_area, "area", "raw type", "key_size", json_type_int) ||
+ !json_contains_string(cd, jobj_area, "area", "raw type", "offset") ||
+ !json_contains_string(cd, jobj_area, "area", "raw type", "size"))
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int luks2_keyslot_update(struct crypt_device *cd,
+ int keyslot,
+ const struct luks2_keyslot_params *params)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj_keyslot;
+ int r;
+
+ log_dbg(cd, "Updating LUKS2 keyslot %d.", keyslot);
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ r = luks2_keyslot_update_json(cd, jobj_keyslot, params);
+
+ if (!r && LUKS2_check_json_size(cd, hdr)) {
+ log_dbg(cd, "Not enough space in header json area for updated keyslot %d.", keyslot);
+ r = -ENOSPC;
+ }
+
+ return r;
+}
+
+static void luks2_keyslot_repair(json_object *jobj_keyslot)
+{
+ const char *type;
+ json_object *jobj_kdf, *jobj_type;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf) ||
+ !json_object_is_type(jobj_kdf, json_type_object))
+ return;
+
+ if (!json_object_object_get_ex(jobj_kdf, "type", &jobj_type) ||
+ !json_object_is_type(jobj_type, json_type_string))
+ return;
+
+ type = json_object_get_string(jobj_type);
+
+ if (!strcmp(type, CRYPT_KDF_PBKDF2)) {
+ /* type, salt, hash, iterations only */
+ json_object_object_foreach(jobj_kdf, key, val) {
+ UNUSED(val);
+ if (!strcmp(key, "type") || !strcmp(key, "salt") ||
+ !strcmp(key, "hash") || !strcmp(key, "iterations"))
+ continue;
+ json_object_object_del(jobj_kdf, key);
+ }
+ } else if (!strcmp(type, CRYPT_KDF_ARGON2I) || !strcmp(type, CRYPT_KDF_ARGON2ID)) {
+ /* type, salt, time, memory, cpus only */
+ json_object_object_foreach(jobj_kdf, key, val) {
+ UNUSED(val);
+ if (!strcmp(key, "type") || !strcmp(key, "salt") ||
+ !strcmp(key, "time") || !strcmp(key, "memory") ||
+ !strcmp(key, "cpus"))
+ continue;
+ json_object_object_del(jobj_kdf, key);
+ }
+ }
+}
+
+const keyslot_handler luks2_keyslot = {
+ .name = "luks2",
+ .alloc = luks2_keyslot_alloc,
+ .update = luks2_keyslot_update,
+ .open = luks2_keyslot_open,
+ .store = luks2_keyslot_store,
+ .wipe = luks2_keyslot_wipe,
+ .dump = luks2_keyslot_dump,
+ .validate = luks2_keyslot_validate,
+ .repair = luks2_keyslot_repair
+};
diff --git a/lib/luks2/luks2_keyslot_reenc.c b/lib/luks2/luks2_keyslot_reenc.c
new file mode 100644
index 0000000..4291d0c
--- /dev/null
+++ b/lib/luks2/luks2_keyslot_reenc.c
@@ -0,0 +1,752 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, reencryption keyslot handler
+ *
+ * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+static int reenc_keyslot_open(struct crypt_device *cd __attribute__((unused)),
+ int keyslot __attribute__((unused)),
+ const char *password __attribute__((unused)),
+ size_t password_len __attribute__((unused)),
+ char *volume_key __attribute__((unused)),
+ size_t volume_key_len __attribute__((unused)))
+{
+ return -ENOENT;
+}
+
+static json_object *reencrypt_keyslot_area_jobj(struct crypt_device *cd,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment,
+ uint64_t area_offset,
+ uint64_t area_length)
+{
+ json_object *jobj_area = json_object_new_object();
+
+ if (!jobj_area || !params || !params->resilience)
+ return NULL;
+
+ json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(area_offset));
+ json_object_object_add(jobj_area, "size", crypt_jobj_new_uint64(area_length));
+ json_object_object_add(jobj_area, "type", json_object_new_string(params->resilience));
+
+ if (!strcmp(params->resilience, "checksum")) {
+ log_dbg(cd, "Setting reencrypt keyslot for checksum protection.");
+ json_object_object_add(jobj_area, "hash", json_object_new_string(params->hash));
+ json_object_object_add(jobj_area, "sector_size", json_object_new_int64(alignment));
+ } else if (!strcmp(params->resilience, "journal")) {
+ log_dbg(cd, "Setting reencrypt keyslot for journal protection.");
+ } else if (!strcmp(params->resilience, "none")) {
+ log_dbg(cd, "Setting reencrypt keyslot for none protection.");
+ } else if (!strcmp(params->resilience, "datashift")) {
+ log_dbg(cd, "Setting reencrypt keyslot for datashift protection.");
+ json_object_object_add(jobj_area, "shift_size",
+ crypt_jobj_new_uint64(params->data_shift << SECTOR_SHIFT));
+ } else if (!strcmp(params->resilience, "datashift-checksum")) {
+ log_dbg(cd, "Setting reencrypt keyslot for datashift and checksum protection.");
+ json_object_object_add(jobj_area, "hash", json_object_new_string(params->hash));
+ json_object_object_add(jobj_area, "sector_size", json_object_new_int64(alignment));
+ json_object_object_add(jobj_area, "shift_size",
+ crypt_jobj_new_uint64(params->data_shift << SECTOR_SHIFT));
+ } else if (!strcmp(params->resilience, "datashift-journal")) {
+ log_dbg(cd, "Setting reencrypt keyslot for datashift and journal protection.");
+ json_object_object_add(jobj_area, "shift_size",
+ crypt_jobj_new_uint64(params->data_shift << SECTOR_SHIFT));
+ } else {
+ json_object_put(jobj_area);
+ return NULL;
+ }
+
+ return jobj_area;
+}
+
+static json_object *reencrypt_keyslot_area_jobj_update_block_size(struct crypt_device *cd,
+ json_object *jobj_area, size_t alignment)
+{
+ json_object *jobj_type, *jobj_area_new = NULL;
+
+ if (!jobj_area ||
+ !json_object_object_get_ex(jobj_area, "type", &jobj_type) ||
+ (strcmp(json_object_get_string(jobj_type), "checksum") &&
+ strcmp(json_object_get_string(jobj_type), "datashift-checksum")))
+ return NULL;
+
+ if (json_object_copy(jobj_area, &jobj_area_new))
+ return NULL;
+
+ log_dbg(cd, "Updating reencrypt resilience checksum block size.");
+
+ json_object_object_add(jobj_area_new, "sector_size", json_object_new_int64(alignment));
+
+ return jobj_area_new;
+}
+
+static int reenc_keyslot_alloc(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment)
+{
+ int r;
+ json_object *jobj_keyslots, *jobj_keyslot, *jobj_area;
+ uint64_t area_offset, area_length;
+
+ log_dbg(cd, "Allocating reencrypt keyslot %d.", keyslot);
+
+ if (!params || !params->resilience || params->direction > CRYPT_REENCRYPT_BACKWARD)
+ return -EINVAL;
+
+ if (keyslot < 0 || keyslot >= LUKS2_KEYSLOTS_MAX)
+ return -ENOMEM;
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots))
+ return -EINVAL;
+
+ /* only plain datashift resilience mode does not require additional storage */
+ if (!strcmp(params->resilience, "datashift"))
+ r = LUKS2_find_area_gap(cd, hdr, 1, &area_offset, &area_length);
+ else
+ r = LUKS2_find_area_max_gap(cd, hdr, &area_offset, &area_length);
+ if (r < 0)
+ return r;
+
+ jobj_area = reencrypt_keyslot_area_jobj(cd, params, alignment, area_offset, area_length);
+ if (!jobj_area)
+ return -EINVAL;
+
+ jobj_keyslot = json_object_new_object();
+ if (!jobj_keyslot) {
+ json_object_put(jobj_area);
+ return -ENOMEM;
+ }
+ json_object_object_add(jobj_keyslot, "area", jobj_area);
+
+ json_object_object_add(jobj_keyslot, "type", json_object_new_string("reencrypt"));
+ json_object_object_add(jobj_keyslot, "key_size", json_object_new_int(1)); /* useless but mandatory */
+ json_object_object_add(jobj_keyslot, "mode", json_object_new_string(crypt_reencrypt_mode_to_str(params->mode)));
+ if (params->direction == CRYPT_REENCRYPT_FORWARD)
+ json_object_object_add(jobj_keyslot, "direction", json_object_new_string("forward"));
+ else
+ json_object_object_add(jobj_keyslot, "direction", json_object_new_string("backward"));
+
+ json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot);
+ if (LUKS2_check_json_size(cd, hdr)) {
+ log_dbg(cd, "New keyslot too large to fit in free metadata space.");
+ json_object_object_del_by_uint(jobj_keyslots, keyslot);
+ return -ENOSPC;
+ }
+
+ JSON_DBG(cd, hdr->jobj, "JSON:");
+
+ return 0;
+}
+
+static int reenc_keyslot_store_data(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ const void *buffer, size_t buffer_len)
+{
+ int devfd, r;
+ json_object *jobj_area, *jobj_offset, *jobj_length;
+ uint64_t area_offset, area_length;
+ struct device *device = crypt_metadata_device(cd);
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area) ||
+ !json_object_object_get_ex(jobj_area, "offset", &jobj_offset) ||
+ !json_object_object_get_ex(jobj_area, "size", &jobj_length))
+ return -EINVAL;
+
+ area_offset = crypt_jobj_get_uint64(jobj_offset);
+ area_length = crypt_jobj_get_uint64(jobj_length);
+
+ if (!area_offset || !area_length || ((uint64_t)buffer_len > area_length))
+ return -EINVAL;
+
+ devfd = device_open_locked(cd, device, O_RDWR);
+ if (devfd >= 0) {
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), CONST_CAST(void *)buffer,
+ buffer_len, area_offset) < 0)
+ r = -EIO;
+ else
+ r = 0;
+ } else
+ r = -EINVAL;
+
+ if (r)
+ log_err(cd, _("IO error while encrypting keyslot."));
+
+ return r;
+}
+
+static int reenc_keyslot_store(struct crypt_device *cd,
+ int keyslot,
+ const char *password __attribute__((unused)),
+ size_t password_len __attribute__((unused)),
+ const char *buffer,
+ size_t buffer_len)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj_keyslot;
+ int r = 0;
+
+ if (!cd || !buffer || !buffer_len)
+ return -EINVAL;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ log_dbg(cd, "Reencrypt keyslot %d store.", keyslot);
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
+ if (r)
+ return r;
+
+ r = reenc_keyslot_store_data(cd, jobj_keyslot, buffer, buffer_len);
+ if (r < 0) {
+ device_write_unlock(cd, crypt_metadata_device(cd));
+ return r;
+ }
+
+ r = LUKS2_hdr_write(cd, hdr);
+
+ device_write_unlock(cd, crypt_metadata_device(cd));
+
+ return r < 0 ? r : keyslot;
+}
+
+static int reenc_keyslot_wipe(struct crypt_device *cd,
+ int keyslot)
+{
+ struct luks2_hdr *hdr;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ /* remove reencryption verification data */
+ LUKS2_digest_assign(cd, hdr, keyslot, CRYPT_ANY_DIGEST, 0, 0);
+
+ return 0;
+}
+
+static int reenc_keyslot_dump(struct crypt_device *cd, int keyslot)
+{
+ json_object *jobj_keyslot, *jobj_area, *jobj_direction, *jobj_mode, *jobj_resilience,
+ *jobj1;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(crypt_get_hdr(cd, CRYPT_LUKS2), keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "direction", &jobj_direction) ||
+ !json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode) ||
+ !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area) ||
+ !json_object_object_get_ex(jobj_area, "type", &jobj_resilience))
+ return -EINVAL;
+
+ log_std(cd, "\t%-12s%s\n", "Mode:", json_object_get_string(jobj_mode));
+ log_std(cd, "\t%-12s%s\n", "Direction:", json_object_get_string(jobj_direction));
+ log_std(cd, "\t%-12s%s\n", "Resilience:", json_object_get_string(jobj_resilience));
+
+ if (!strcmp(json_object_get_string(jobj_resilience), "checksum")) {
+ json_object_object_get_ex(jobj_area, "hash", &jobj1);
+ log_std(cd, "\t%-12s%s\n", "Hash:", json_object_get_string(jobj1));
+ json_object_object_get_ex(jobj_area, "sector_size", &jobj1);
+ log_std(cd, "\t%-12s%d [bytes]\n", "Hash data:", json_object_get_int(jobj1));
+ } else if (!strcmp(json_object_get_string(jobj_resilience), "datashift")) {
+ json_object_object_get_ex(jobj_area, "shift_size", &jobj1);
+ log_std(cd, "\t%-12s%" PRIu64 "[bytes]\n", "Shift size:", crypt_jobj_get_uint64(jobj1));
+ }
+
+ json_object_object_get_ex(jobj_area, "offset", &jobj1);
+ log_std(cd, "\tArea offset:%" PRIu64 " [bytes]\n", crypt_jobj_get_uint64(jobj1));
+
+ json_object_object_get_ex(jobj_area, "size", &jobj1);
+ log_std(cd, "\tArea length:%" PRIu64 " [bytes]\n", crypt_jobj_get_uint64(jobj1));
+
+ return 0;
+}
+
+static int reenc_keyslot_validate(struct crypt_device *cd, json_object *jobj_keyslot)
+{
+ json_object *jobj_mode, *jobj_area, *jobj_type, *jobj_shift_size, *jobj_hash,
+ *jobj_sector_size, *jobj_direction, *jobj_key_size;
+ const char *mode, *type, *direction;
+ uint32_t sector_size;
+ uint64_t shift_size;
+
+ /* mode (string: encrypt,reencrypt,decrypt)
+ * direction (string:)
+ * area {
+ * type: (string: datashift, journal, checksum, none, datashift-journal, datashift-checksum)
+ * hash: (string: checksum and datashift-checksum types)
+ * sector_size (uint32: checksum and datashift-checksum types)
+ * shift_size (uint64: all datashift based types)
+ * }
+ */
+
+ /* area and area type are validated in general validation code */
+ if (!jobj_keyslot || !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area) ||
+ !json_object_object_get_ex(jobj_area, "type", &jobj_type))
+ return -EINVAL;
+
+ jobj_key_size = json_contains(cd, jobj_keyslot, "", "reencrypt keyslot", "key_size", json_type_int);
+ jobj_mode = json_contains_string(cd, jobj_keyslot, "", "reencrypt keyslot", "mode");
+ jobj_direction = json_contains_string(cd, jobj_keyslot, "", "reencrypt keyslot", "direction");
+
+ if (!jobj_mode || !jobj_direction || !jobj_key_size)
+ return -EINVAL;
+
+ if (!validate_json_uint32(jobj_key_size) || crypt_jobj_get_uint32(jobj_key_size) != 1) {
+ log_dbg(cd, "Illegal reencrypt key size.");
+ return -EINVAL;
+ }
+
+ mode = json_object_get_string(jobj_mode);
+ type = json_object_get_string(jobj_type);
+ direction = json_object_get_string(jobj_direction);
+
+ if (strcmp(mode, "reencrypt") && strcmp(mode, "encrypt") &&
+ strcmp(mode, "decrypt")) {
+ log_dbg(cd, "Illegal reencrypt mode %s.", mode);
+ return -EINVAL;
+ }
+
+ if (strcmp(direction, "forward") && strcmp(direction, "backward")) {
+ log_dbg(cd, "Illegal reencrypt direction %s.", direction);
+ return -EINVAL;
+ }
+
+ if (!strcmp(type, "checksum") || !strcmp(type, "datashift-checksum")) {
+ jobj_hash = json_contains_string(cd, jobj_area, "type:checksum",
+ "Keyslot area", "hash");
+ jobj_sector_size = json_contains(cd, jobj_area, "type:checksum",
+ "Keyslot area", "sector_size", json_type_int);
+ if (!jobj_hash || !jobj_sector_size)
+ return -EINVAL;
+ if (!validate_json_uint32(jobj_sector_size))
+ return -EINVAL;
+ sector_size = crypt_jobj_get_uint32(jobj_sector_size);
+ if (sector_size < SECTOR_SIZE || NOTPOW2(sector_size)) {
+ log_dbg(cd, "Invalid sector_size (%" PRIu32 ") for checksum resilience mode.",
+ sector_size);
+ return -EINVAL;
+ }
+ } else if (!strcmp(type, "datashift") ||
+ !strcmp(type, "datashift-checksum") ||
+ !strcmp(type, "datashift-journal")) {
+ if (!(jobj_shift_size = json_contains_string(cd, jobj_area, "type:datashift",
+ "Keyslot area", "shift_size")))
+ return -EINVAL;
+
+ shift_size = crypt_jobj_get_uint64(jobj_shift_size);
+ if (!shift_size)
+ return -EINVAL;
+
+ if (MISALIGNED_512(shift_size)) {
+ log_dbg(cd, "Shift size field has to be aligned to 512 bytes.");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int reenc_keyslot_update_needed(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment)
+{
+ const char *type;
+ json_object *jobj_area, *jobj_type, *jobj;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area) ||
+ !json_object_object_get_ex(jobj_area, "type", &jobj_type) ||
+ !(type = json_object_get_string(jobj_type)))
+ return -EINVAL;
+
+ /*
+ * If no resilience mode change is requested and effective
+ * resilience mode is 'checksum' then check alignment matches
+ * stored checksum block size.
+ */
+ if (!params || !params->resilience) {
+ if (!strcmp(json_object_get_string(jobj_type), "checksum") ||
+ !strcmp(json_object_get_string(jobj_type), "datashift-checksum"))
+ return (json_object_object_get_ex(jobj_area, "sector_size", &jobj) ||
+ alignment != crypt_jobj_get_uint32(jobj));
+ return 0;
+ }
+
+ if (strcmp(params->resilience, type))
+ return 1;
+
+ if (!strcmp(type, "checksum") ||
+ !strcmp(type, "datashift-checksum")) {
+ if (!params->hash)
+ return -EINVAL;
+ if (!json_object_object_get_ex(jobj_area, "hash", &jobj) ||
+ strcmp(json_object_get_string(jobj), params->hash) ||
+ !json_object_object_get_ex(jobj_area, "sector_size", &jobj) ||
+ crypt_jobj_get_uint32(jobj) != alignment)
+ return 1;
+ }
+
+ if (!strncmp(type, "datashift", 9)) {
+ if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj))
+ return -EINVAL;
+ if ((params->data_shift << SECTOR_SHIFT) != crypt_jobj_get_uint64(jobj))
+ return 1;
+ }
+
+ /* nothing to compare with 'none' and 'journal' */
+ return 0;
+}
+
+static int load_checksum_protection(struct crypt_device *cd,
+ json_object *jobj_area,
+ uint64_t area_length,
+ struct reenc_protection *rp)
+{
+ int r;
+ json_object *jobj_hash, *jobj_block_size;
+
+ if (!jobj_area || !rp ||
+ !json_object_object_get_ex(jobj_area, "hash", &jobj_hash) ||
+ !json_object_object_get_ex(jobj_area, "sector_size", &jobj_block_size))
+ return -EINVAL;
+
+ r = snprintf(rp->p.csum.hash, sizeof(rp->p.csum.hash), "%s", json_object_get_string(jobj_hash));
+ if (r < 0 || (size_t)r >= sizeof(rp->p.csum.hash))
+ return -EINVAL;
+
+ if (crypt_hash_init(&rp->p.csum.ch, rp->p.csum.hash)) {
+ log_err(cd, _("Hash algorithm %s is not available."), rp->p.csum.hash);
+ return -EINVAL;
+ }
+
+ r = crypt_hash_size(rp->p.csum.hash);
+ if (r <= 0) {
+ crypt_hash_destroy(rp->p.csum.ch);
+ rp->p.csum.ch = NULL;
+ log_dbg(cd, "Invalid hash size");
+ return -EINVAL;
+ }
+
+ rp->p.csum.hash_size = r;
+ rp->p.csum.block_size = crypt_jobj_get_uint32(jobj_block_size);
+ rp->p.csum.checksums_len = area_length;
+
+ rp->type = REENC_PROTECTION_CHECKSUM;
+ return 0;
+}
+
+static int reenc_keyslot_load_resilience_primary(struct crypt_device *cd,
+ const char *type,
+ json_object *jobj_area,
+ uint64_t area_length,
+ struct reenc_protection *rp)
+{
+ json_object *jobj;
+
+ if (!strcmp(type, "checksum")) {
+ log_dbg(cd, "Initializing checksum resilience mode.");
+ return load_checksum_protection(cd, jobj_area, area_length, rp);
+ } else if (!strcmp(type, "journal")) {
+ log_dbg(cd, "Initializing journal resilience mode.");
+ rp->type = REENC_PROTECTION_JOURNAL;
+ } else if (!strcmp(type, "none")) {
+ log_dbg(cd, "Initializing none resilience mode.");
+ rp->type = REENC_PROTECTION_NONE;
+ } else if (!strcmp(type, "datashift") ||
+ !strcmp(type, "datashift-checksum") ||
+ !strcmp(type, "datashift-journal")) {
+ log_dbg(cd, "Initializing datashift resilience mode.");
+ if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj))
+ return -EINVAL;
+ rp->type = REENC_PROTECTION_DATASHIFT;
+ rp->p.ds.data_shift = crypt_jobj_get_uint64(jobj);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int reenc_keyslot_load_resilience_secondary(struct crypt_device *cd,
+ const char *type,
+ json_object *jobj_area,
+ uint64_t area_length,
+ struct reenc_protection *rp)
+{
+ if (!strcmp(type, "datashift-checksum")) {
+ log_dbg(cd, "Initializing checksum resilience mode.");
+ return load_checksum_protection(cd, jobj_area, area_length, rp);
+ } else if (!strcmp(type, "datashift-journal")) {
+ log_dbg(cd, "Initializing journal resilience mode.");
+ rp->type = REENC_PROTECTION_JOURNAL;
+ } else
+ rp->type = REENC_PROTECTION_NOT_SET;
+
+ return 0;
+}
+
+static int reenc_keyslot_load_resilience(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ struct reenc_protection *rp,
+ bool primary)
+{
+ const char *type;
+ int r;
+ json_object *jobj_area, *jobj_type;
+ uint64_t dummy, area_length;
+
+ if (!rp || !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area) ||
+ !json_object_object_get_ex(jobj_area, "type", &jobj_type))
+ return -EINVAL;
+
+ r = LUKS2_keyslot_jobj_area(jobj_keyslot, &dummy, &area_length);
+ if (r < 0)
+ return r;
+
+ type = json_object_get_string(jobj_type);
+ if (!type)
+ return -EINVAL;
+
+ if (primary)
+ return reenc_keyslot_load_resilience_primary(cd, type, jobj_area, area_length, rp);
+ else
+ return reenc_keyslot_load_resilience_secondary(cd, type, jobj_area, area_length, rp);
+}
+
+static bool reenc_keyslot_update_is_valid(struct crypt_device *cd,
+ json_object *jobj_area,
+ const struct crypt_params_reencrypt *params)
+{
+ const char *type;
+ json_object *jobj_type, *jobj;
+
+ if (!json_object_object_get_ex(jobj_area, "type", &jobj_type) ||
+ !(type = json_object_get_string(jobj_type)))
+ return false;
+
+ /* do not allow switch to/away from datashift resilience type */
+ if ((strcmp(params->resilience, "datashift") && !strcmp(type, "datashift")) ||
+ (!strcmp(params->resilience, "datashift") && strcmp(type, "datashift")))
+ return false;
+
+ /* do not allow switch to/away from datashift- resilience subvariants */
+ if ((strncmp(params->resilience, "datashift-", 10) &&
+ !strncmp(type, "datashift-", 10)) ||
+ (!strncmp(params->resilience, "datashift-", 10) &&
+ strncmp(type, "datashift-", 10)))
+ return false;
+
+ /* datashift value is also immutable */
+ if (!strncmp(type, "datashift", 9)) {
+ if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj))
+ return false;
+ return (params->data_shift << SECTOR_SHIFT) == crypt_jobj_get_uint64(jobj);
+ }
+
+ return true;
+}
+
+static int reenc_keyslot_update(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment)
+{
+ int r;
+ json_object *jobj_area, *jobj_area_new;
+ uint64_t area_offset, area_length;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+
+ r = LUKS2_keyslot_jobj_area(jobj_keyslot, &area_offset, &area_length);
+ if (r < 0)
+ return r;
+
+ if (!params || !params->resilience)
+ jobj_area_new = reencrypt_keyslot_area_jobj_update_block_size(cd, jobj_area, alignment);
+ else {
+ if (!reenc_keyslot_update_is_valid(cd, jobj_area, params)) {
+ log_err(cd, _("Invalid reencryption resilience mode change requested."));
+ return -EINVAL;
+ }
+
+ jobj_area_new = reencrypt_keyslot_area_jobj(cd, params, alignment,
+ area_offset, area_length);
+ }
+
+ if (!jobj_area_new)
+ return -EINVAL;
+
+ /* increase refcount for validation purposes */
+ json_object_get(jobj_area);
+
+ json_object_object_add(jobj_keyslot, "area", jobj_area_new);
+
+ r = reenc_keyslot_validate(cd, jobj_keyslot);
+ if (r) {
+ /* replace invalid object with previous valid one */
+ json_object_object_add(jobj_keyslot, "area", jobj_area);
+ return -EINVAL;
+ }
+
+ /* previous area object is no longer needed */
+ json_object_put(jobj_area);
+
+ return 0;
+}
+
+int LUKS2_keyslot_reencrypt_allocate(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment)
+{
+ int r;
+
+ if (keyslot == CRYPT_ANY_SLOT)
+ return -EINVAL;
+
+ r = reenc_keyslot_alloc(cd, hdr, keyslot, params, alignment);
+ if (r < 0)
+ return r;
+
+ r = LUKS2_keyslot_priority_set(cd, hdr, keyslot, CRYPT_SLOT_PRIORITY_IGNORE, 0);
+ if (r < 0)
+ return r;
+
+ r = reenc_keyslot_validate(cd, LUKS2_get_keyslot_jobj(hdr, keyslot));
+ if (r) {
+ log_dbg(cd, "Keyslot validation failed.");
+ return r;
+ }
+
+ return 0;
+}
+
+int LUKS2_keyslot_reencrypt_update_needed(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment)
+{
+ int r;
+ json_object *jobj_type, *jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+
+ if (!jobj_keyslot ||
+ !json_object_object_get_ex(jobj_keyslot, "type", &jobj_type) ||
+ strcmp(json_object_get_string(jobj_type), "reencrypt"))
+ return -EINVAL;
+
+ r = reenc_keyslot_update_needed(cd, jobj_keyslot, params, alignment);
+ if (!r)
+ log_dbg(cd, "No update of reencrypt keyslot needed.");
+
+ return r;
+}
+
+int LUKS2_keyslot_reencrypt_update(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment,
+ struct volume_key *vks)
+{
+ int r;
+ uint8_t version;
+ uint64_t max_size, moved_segment_size;
+ json_object *jobj_type, *jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ struct reenc_protection check_rp = {};
+
+ if (!jobj_keyslot ||
+ !json_object_object_get_ex(jobj_keyslot, "type", &jobj_type) ||
+ strcmp(json_object_get_string(jobj_type), "reencrypt"))
+ return -EINVAL;
+
+ if (LUKS2_config_get_reencrypt_version(hdr, &version))
+ return -EINVAL;
+
+ /* verify existing reencryption metadata before updating */
+ r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
+ if (r < 0)
+ return r;
+
+ r = reenc_keyslot_update(cd, jobj_keyslot, params, alignment);
+ if (r < 0)
+ return r;
+
+ r = reenc_keyslot_load_resilience(cd, jobj_keyslot, &check_rp, false);
+ if (r < 0)
+ return r;
+
+ if (check_rp.type != REENC_PROTECTION_NOT_SET) {
+ r = LUKS2_reencrypt_max_hotzone_size(cd, hdr, &check_rp, keyslot, &max_size);
+ LUKS2_reencrypt_protection_erase(&check_rp);
+ if (r < 0)
+ return r;
+ moved_segment_size = json_segment_get_size(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), 0);
+ if (!moved_segment_size)
+ return -EINVAL;
+ if (moved_segment_size > max_size) {
+ log_err(cd, _("Can not update resilience type. "
+ "New type only provides %" PRIu64 " bytes, "
+ "required space is: %" PRIu64 " bytes."),
+ max_size, moved_segment_size);
+ return -EINVAL;
+ }
+ }
+
+ r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, version, vks);
+ if (r < 0)
+ log_err(cd, _("Failed to refresh reencryption verification digest."));
+
+ return r ?: LUKS2_hdr_write(cd, hdr);
+}
+
+int LUKS2_keyslot_reencrypt_load(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ struct reenc_protection *rp,
+ bool primary)
+{
+ json_object *jobj_type, *jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+
+ if (!jobj_keyslot ||
+ !json_object_object_get_ex(jobj_keyslot, "type", &jobj_type) ||
+ strcmp(json_object_get_string(jobj_type), "reencrypt"))
+ return -EINVAL;
+
+ return reenc_keyslot_load_resilience(cd, jobj_keyslot, rp, primary);
+}
+
+const keyslot_handler reenc_keyslot = {
+ .name = "reencrypt",
+ .open = reenc_keyslot_open,
+ .store = reenc_keyslot_store, /* initialization only or also per every chunk write */
+ .wipe = reenc_keyslot_wipe,
+ .dump = reenc_keyslot_dump,
+ .validate = reenc_keyslot_validate
+};
diff --git a/lib/luks2/luks2_luks1_convert.c b/lib/luks2/luks2_luks1_convert.c
new file mode 100644
index 0000000..6d3fa1e
--- /dev/null
+++ b/lib/luks2/luks2_luks1_convert.c
@@ -0,0 +1,945 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, LUKS1 conversion code
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Ondrej Kozina
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+#include "../luks1/luks.h"
+#include "../luks1/af.h"
+
+/* This differs from LUKS_check_cipher() that it does not check dm-crypt fallback. */
+int LUKS2_check_cipher(struct crypt_device *cd,
+ size_t keylength,
+ const char *cipher,
+ const char *cipher_mode)
+{
+ int r;
+ struct crypt_storage *s;
+ char buf[SECTOR_SIZE], *empty_key;
+
+ log_dbg(cd, "Checking if cipher %s-%s is usable (storage wrapper).", cipher, cipher_mode);
+
+ empty_key = crypt_safe_alloc(keylength);
+ if (!empty_key)
+ return -ENOMEM;
+
+ /* No need to get KEY quality random but it must avoid known weak keys. */
+ r = crypt_random_get(cd, empty_key, keylength, CRYPT_RND_NORMAL);
+ if (r < 0)
+ goto out;
+
+ r = crypt_storage_init(&s, SECTOR_SIZE, cipher, cipher_mode, empty_key, keylength, false);
+ if (r < 0)
+ goto out;
+
+ memset(buf, 0, sizeof(buf));
+ r = crypt_storage_decrypt(s, 0, sizeof(buf), buf);
+ crypt_storage_destroy(s);
+out:
+ crypt_safe_free(empty_key);
+ crypt_safe_memzero(buf, sizeof(buf));
+ return r;
+}
+
+static int json_luks1_keyslot(const struct luks_phdr *hdr_v1, int keyslot, struct json_object **keyslot_object)
+{
+ char *base64_str, cipher[LUKS_CIPHERNAME_L+LUKS_CIPHERMODE_L];
+ size_t base64_len;
+ struct json_object *keyslot_obj, *field, *jobj_kdf, *jobj_af, *jobj_area;
+ uint64_t offset, area_size, length;
+ int r;
+
+ keyslot_obj = json_object_new_object();
+ json_object_object_add(keyslot_obj, "type", json_object_new_string("luks2"));
+ json_object_object_add(keyslot_obj, "key_size", json_object_new_int64(hdr_v1->keyBytes));
+
+ /* KDF */
+ jobj_kdf = json_object_new_object();
+ json_object_object_add(jobj_kdf, "type", json_object_new_string(CRYPT_KDF_PBKDF2));
+ json_object_object_add(jobj_kdf, "hash", json_object_new_string(hdr_v1->hashSpec));
+ json_object_object_add(jobj_kdf, "iterations", json_object_new_int64(hdr_v1->keyblock[keyslot].passwordIterations));
+ /* salt field */
+ r = crypt_base64_encode(&base64_str, &base64_len, hdr_v1->keyblock[keyslot].passwordSalt, LUKS_SALTSIZE);
+ if (r < 0) {
+ json_object_put(keyslot_obj);
+ json_object_put(jobj_kdf);
+ return r;
+ }
+ field = json_object_new_string_len(base64_str, base64_len);
+ free(base64_str);
+ json_object_object_add(jobj_kdf, "salt", field);
+ json_object_object_add(keyslot_obj, "kdf", jobj_kdf);
+
+ /* AF */
+ jobj_af = json_object_new_object();
+ json_object_object_add(jobj_af, "type", json_object_new_string("luks1"));
+ json_object_object_add(jobj_af, "hash", json_object_new_string(hdr_v1->hashSpec));
+ /* stripes field ignored, fixed to LUKS_STRIPES (4000) */
+ json_object_object_add(jobj_af, "stripes", json_object_new_int(LUKS_STRIPES));
+ json_object_object_add(keyslot_obj, "af", jobj_af);
+
+ /* Area */
+ jobj_area = json_object_new_object();
+ json_object_object_add(jobj_area, "type", json_object_new_string("raw"));
+
+ /* encryption algorithm field */
+ if (*hdr_v1->cipherMode != '\0') {
+ if (snprintf(cipher, sizeof(cipher), "%s-%s", hdr_v1->cipherName, hdr_v1->cipherMode) < 0) {
+ json_object_put(keyslot_obj);
+ json_object_put(jobj_area);
+ return -EINVAL;
+ }
+ json_object_object_add(jobj_area, "encryption", json_object_new_string(cipher));
+ } else
+ json_object_object_add(jobj_area, "encryption", json_object_new_string(hdr_v1->cipherName));
+
+ /* area */
+ if (LUKS_keyslot_area(hdr_v1, keyslot, &offset, &length)) {
+ json_object_put(keyslot_obj);
+ json_object_put(jobj_area);
+ return -EINVAL;
+ }
+ area_size = size_round_up(length, 4096);
+ json_object_object_add(jobj_area, "key_size", json_object_new_int(hdr_v1->keyBytes));
+ json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(offset));
+ json_object_object_add(jobj_area, "size", crypt_jobj_new_uint64(area_size));
+ json_object_object_add(keyslot_obj, "area", jobj_area);
+
+ *keyslot_object = keyslot_obj;
+ return 0;
+}
+
+static int json_luks1_keyslots(const struct luks_phdr *hdr_v1, struct json_object **keyslots_object)
+{
+ int keyslot, r;
+ struct json_object *keyslot_obj, *field;
+
+ keyslot_obj = json_object_new_object();
+ if (!keyslot_obj)
+ return -ENOMEM;
+
+ for (keyslot = 0; keyslot < LUKS_NUMKEYS; keyslot++) {
+ if (hdr_v1->keyblock[keyslot].active != LUKS_KEY_ENABLED)
+ continue;
+ r = json_luks1_keyslot(hdr_v1, keyslot, &field);
+ if (r) {
+ json_object_put(keyslot_obj);
+ return r;
+ }
+ json_object_object_add_by_uint(keyslot_obj, keyslot, field);
+ }
+
+ *keyslots_object = keyslot_obj;
+ return 0;
+}
+
+static int json_luks1_segment(const struct luks_phdr *hdr_v1, struct json_object **segment_object)
+{
+ const char *c;
+ char cipher[LUKS_CIPHERNAME_L+LUKS_CIPHERMODE_L];
+ struct json_object *segment_obj, *field;
+ uint64_t number;
+
+ segment_obj = json_object_new_object();
+ if (!segment_obj)
+ return -ENOMEM;
+
+ /* type field */
+ field = json_object_new_string("crypt");
+ if (!field) {
+ json_object_put(segment_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(segment_obj, "type", field);
+
+ /* offset field */
+ number = (uint64_t)hdr_v1->payloadOffset * SECTOR_SIZE;
+
+ field = crypt_jobj_new_uint64(number);
+ if (!field) {
+ json_object_put(segment_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(segment_obj, "offset", field);
+
+ /* iv_tweak field */
+ field = json_object_new_string("0");
+ if (!field) {
+ json_object_put(segment_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(segment_obj, "iv_tweak", field);
+
+ /* length field */
+ field = json_object_new_string("dynamic");
+ if (!field) {
+ json_object_put(segment_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(segment_obj, "size", field);
+
+ /* cipher field */
+ if (*hdr_v1->cipherMode != '\0') {
+ if (snprintf(cipher, sizeof(cipher), "%s-%s", hdr_v1->cipherName, hdr_v1->cipherMode) < 0) {
+ json_object_put(segment_obj);
+ return -EINVAL;
+ }
+ c = cipher;
+ } else
+ c = hdr_v1->cipherName;
+
+ field = json_object_new_string(c);
+ if (!field) {
+ json_object_put(segment_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(segment_obj, "encryption", field);
+
+ /* block field */
+ field = json_object_new_int(SECTOR_SIZE);
+ if (!field) {
+ json_object_put(segment_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(segment_obj, "sector_size", field);
+
+ *segment_object = segment_obj;
+ return 0;
+}
+
+static int json_luks1_segments(const struct luks_phdr *hdr_v1, struct json_object **segments_object)
+{
+ int r;
+ struct json_object *segments_obj, *field;
+
+ segments_obj = json_object_new_object();
+ if (!segments_obj)
+ return -ENOMEM;
+
+ r = json_luks1_segment(hdr_v1, &field);
+ if (r) {
+ json_object_put(segments_obj);
+ return r;
+ }
+ json_object_object_add_by_uint(segments_obj, 0, field);
+
+ *segments_object = segments_obj;
+ return 0;
+}
+
+static int json_luks1_digest(const struct luks_phdr *hdr_v1, struct json_object **digest_object)
+{
+ char keyslot_str[16], *base64_str;
+ int r, ks;
+ size_t base64_len;
+ struct json_object *digest_obj, *array, *field;
+
+ digest_obj = json_object_new_object();
+ if (!digest_obj)
+ return -ENOMEM;
+
+ /* type field */
+ field = json_object_new_string("pbkdf2");
+ if (!field) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "type", field);
+
+ /* keyslots array */
+ array = json_object_new_array();
+ if (!array) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "keyslots", json_object_get(array));
+
+ for (ks = 0; ks < LUKS_NUMKEYS; ks++) {
+ if (hdr_v1->keyblock[ks].active != LUKS_KEY_ENABLED)
+ continue;
+ if (snprintf(keyslot_str, sizeof(keyslot_str), "%d", ks) < 0) {
+ json_object_put(field);
+ json_object_put(array);
+ json_object_put(digest_obj);
+ return -EINVAL;
+ }
+
+ field = json_object_new_string(keyslot_str);
+ if (!field || json_object_array_add(array, field) < 0) {
+ json_object_put(field);
+ json_object_put(array);
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ }
+
+ json_object_put(array);
+
+ /* segments array */
+ array = json_object_new_array();
+ if (!array) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "segments", json_object_get(array));
+
+ field = json_object_new_string("0");
+ if (!field || json_object_array_add(array, field) < 0) {
+ json_object_put(field);
+ json_object_put(array);
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+
+ json_object_put(array);
+
+ /* hash field */
+ field = json_object_new_string(hdr_v1->hashSpec);
+ if (!field) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "hash", field);
+
+ /* salt field */
+ r = crypt_base64_encode(&base64_str, &base64_len, hdr_v1->mkDigestSalt, LUKS_SALTSIZE);
+ if (r < 0) {
+ json_object_put(digest_obj);
+ return r;
+ }
+
+ field = json_object_new_string_len(base64_str, base64_len);
+ free(base64_str);
+ if (!field) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "salt", field);
+
+ /* digest field */
+ r = crypt_base64_encode(&base64_str, &base64_len, hdr_v1->mkDigest, LUKS_DIGESTSIZE);
+ if (r < 0) {
+ json_object_put(digest_obj);
+ return r;
+ }
+
+ field = json_object_new_string_len(base64_str, base64_len);
+ free(base64_str);
+ if (!field) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "digest", field);
+
+ /* iterations field */
+ field = json_object_new_int64(hdr_v1->mkDigestIterations);
+ if (!field) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "iterations", field);
+
+ *digest_object = digest_obj;
+ return 0;
+}
+
+static int json_luks1_digests(const struct luks_phdr *hdr_v1, struct json_object **digests_object)
+{
+ int r;
+ struct json_object *digests_obj, *field;
+
+ digests_obj = json_object_new_object();
+ if (!digests_obj)
+ return -ENOMEM;
+
+ r = json_luks1_digest(hdr_v1, &field);
+ if (r) {
+ json_object_put(digests_obj);
+ return r;
+ }
+ json_object_object_add(digests_obj, "0", field);
+
+ *digests_object = digests_obj;
+ return 0;
+}
+
+static int json_luks1_object(struct luks_phdr *hdr_v1, struct json_object **luks1_object, uint64_t keyslots_size)
+{
+ int r;
+ struct json_object *luks1_obj, *field;
+ uint64_t json_size;
+
+ luks1_obj = json_object_new_object();
+ if (!luks1_obj)
+ return -ENOMEM;
+
+ /* keyslots field */
+ r = json_luks1_keyslots(hdr_v1, &field);
+ if (r) {
+ json_object_put(luks1_obj);
+ return r;
+ }
+ json_object_object_add(luks1_obj, "keyslots", field);
+
+ /* tokens field */
+ field = json_object_new_object();
+ if (!field) {
+ json_object_put(luks1_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(luks1_obj, "tokens", field);
+
+ /* segments field */
+ r = json_luks1_segments(hdr_v1, &field);
+ if (r) {
+ json_object_put(luks1_obj);
+ return r;
+ }
+ json_object_object_add(luks1_obj, "segments", field);
+
+ /* digests field */
+ r = json_luks1_digests(hdr_v1, &field);
+ if (r) {
+ json_object_put(luks1_obj);
+ return r;
+ }
+ json_object_object_add(luks1_obj, "digests", field);
+
+ /* config field */
+ /* anything else? */
+ field = json_object_new_object();
+ if (!field) {
+ json_object_put(luks1_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(luks1_obj, "config", field);
+
+ json_size = LUKS2_HDR_16K_LEN - LUKS2_HDR_BIN_LEN;
+ json_object_object_add(field, "json_size", crypt_jobj_new_uint64(json_size));
+ keyslots_size -= (keyslots_size % 4096);
+ json_object_object_add(field, "keyslots_size", crypt_jobj_new_uint64(keyslots_size));
+
+ *luks1_object = luks1_obj;
+ return 0;
+}
+
+static void move_keyslot_offset(json_object *jobj, int offset_add)
+{
+ json_object *jobj1, *jobj2, *jobj_area;
+ uint64_t offset = 0;
+
+ json_object_object_get_ex(jobj, "keyslots", &jobj1);
+ json_object_object_foreach(jobj1, key, val) {
+ UNUSED(key);
+ json_object_object_get_ex(val, "area", &jobj_area);
+ json_object_object_get_ex(jobj_area, "offset", &jobj2);
+ offset = crypt_jobj_get_uint64(jobj2) + offset_add;
+ json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(offset));
+ }
+}
+
+static int move_keyslot_areas(struct crypt_device *cd, off_t offset_from,
+ off_t offset_to, size_t buf_size)
+{
+ int devfd, r = -EIO;
+ struct device *device = crypt_metadata_device(cd);
+ void *buf = NULL;
+
+ log_dbg(cd, "Moving keyslot areas of size %zu from %jd to %jd.",
+ buf_size, (intmax_t)offset_from, (intmax_t)offset_to);
+
+ if (posix_memalign(&buf, crypt_getpagesize(), buf_size))
+ return -ENOMEM;
+
+ devfd = device_open(cd, device, O_RDWR);
+ if (devfd < 0) {
+ free(buf);
+ return -EIO;
+ }
+
+ /* This can safely fail (for block devices). It only allocates space if it is possible. */
+ if (posix_fallocate(devfd, offset_to, buf_size))
+ log_dbg(cd, "Preallocation (fallocate) of new keyslot area not available.");
+
+ /* Try to read *new* area to check that area is there (trimmed backup). */
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), buf, buf_size,
+ offset_to)!= (ssize_t)buf_size)
+ goto out;
+
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), buf, buf_size,
+ offset_from)!= (ssize_t)buf_size)
+ goto out;
+
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), buf, buf_size,
+ offset_to) != (ssize_t)buf_size)
+ goto out;
+
+ r = 0;
+out:
+ device_sync(cd, device);
+ crypt_safe_memzero(buf, buf_size);
+ free(buf);
+
+ return r;
+}
+
+static int luks_header_in_use(struct crypt_device *cd)
+{
+ int r;
+
+ r = lookup_dm_dev_by_uuid(cd, crypt_get_uuid(cd), crypt_get_type(cd));
+ if (r < 0)
+ log_err(cd, _("Cannot check status of device with uuid: %s."), crypt_get_uuid(cd));
+
+ return r;
+}
+
+/* Check if there is a luksmeta area (foreign metadata created by the luksmeta package) */
+static int luksmeta_header_present(struct crypt_device *cd, off_t luks1_size)
+{
+ int devfd, r = 0;
+ static const uint8_t LM_MAGIC[] = { 'L', 'U', 'K', 'S', 'M', 'E', 'T', 'A' };
+ struct device *device = crypt_metadata_device(cd);
+ void *buf = NULL;
+
+ if (posix_memalign(&buf, crypt_getpagesize(), sizeof(LM_MAGIC)))
+ return -ENOMEM;
+
+ devfd = device_open(cd, device, O_RDONLY);
+ if (devfd < 0) {
+ free(buf);
+ return -EIO;
+ }
+
+ /* Note: we must not detect failure as problem here, header can be trimmed. */
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device), device_alignment(device),
+ buf, sizeof(LM_MAGIC), luks1_size) == (ssize_t)sizeof(LM_MAGIC) &&
+ !memcmp(LM_MAGIC, buf, sizeof(LM_MAGIC))) {
+ log_err(cd, _("Unable to convert header with LUKSMETA additional metadata."));
+ r = -EBUSY;
+ }
+
+ free(buf);
+ return r;
+}
+
+/* Convert LUKS1 -> LUKS2 */
+int LUKS2_luks1_to_luks2(struct crypt_device *cd, struct luks_phdr *hdr1, struct luks2_hdr *hdr2)
+{
+ int r;
+ json_object *jobj = NULL;
+ size_t buf_size, buf_offset, luks1_size, luks1_shift = 2 * LUKS2_HDR_16K_LEN - LUKS_ALIGN_KEYSLOTS;
+ uint64_t required_size, max_size = crypt_get_data_offset(cd) * SECTOR_SIZE;
+
+ /* for detached headers max size == device size */
+ if (!max_size && (r = device_size(crypt_metadata_device(cd), &max_size)))
+ return r;
+
+ luks1_size = LUKS_device_sectors(hdr1) << SECTOR_SHIFT;
+ luks1_size = size_round_up(luks1_size, LUKS_ALIGN_KEYSLOTS);
+ if (!luks1_size)
+ return -EINVAL;
+
+ if (LUKS_keyslots_offset(hdr1) != (LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE)) {
+ log_dbg(cd, "Unsupported keyslots material offset: %zu.", LUKS_keyslots_offset(hdr1));
+ return -EINVAL;
+ }
+
+ if (LUKS2_check_cipher(cd, hdr1->keyBytes, hdr1->cipherName, hdr1->cipherMode)) {
+ log_err(cd, _("Unable to use cipher specification %s-%s for LUKS2."),
+ hdr1->cipherName, hdr1->cipherMode);
+ return -EINVAL;
+ }
+
+ if (luksmeta_header_present(cd, luks1_size))
+ return -EINVAL;
+
+ log_dbg(cd, "Max size: %" PRIu64 ", LUKS1 (full) header size %zu , required shift: %zu",
+ max_size, luks1_size, luks1_shift);
+
+ required_size = luks1_size + luks1_shift;
+
+ if ((max_size < required_size) &&
+ device_fallocate(crypt_metadata_device(cd), required_size)) {
+ log_err(cd, _("Unable to move keyslot area. Not enough space."));
+ return -EINVAL;
+ }
+
+ if (max_size < required_size)
+ max_size = required_size;
+
+ r = json_luks1_object(hdr1, &jobj, max_size - 2 * LUKS2_HDR_16K_LEN);
+ if (r < 0)
+ return r;
+
+ move_keyslot_offset(jobj, luks1_shift);
+
+ /* Create and fill LUKS2 hdr */
+ memset(hdr2, 0, sizeof(*hdr2));
+ hdr2->hdr_size = LUKS2_HDR_16K_LEN;
+ hdr2->seqid = 1;
+ hdr2->version = 2;
+ strncpy(hdr2->checksum_alg, "sha256", LUKS2_CHECKSUM_ALG_L);
+ crypt_random_get(cd, (char*)hdr2->salt1, sizeof(hdr2->salt1), CRYPT_RND_SALT);
+ crypt_random_get(cd, (char*)hdr2->salt2, sizeof(hdr2->salt2), CRYPT_RND_SALT);
+ strncpy(hdr2->uuid, crypt_get_uuid(cd), LUKS2_UUID_L-1); /* UUID should be max 36 chars */
+ hdr2->jobj = jobj;
+
+ /*
+ * It duplicates check in LUKS2_hdr_write() but we don't want to move
+ * keyslot areas in case it would fail later
+ */
+ if (max_size < LUKS2_hdr_and_areas_size(hdr2)) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* check future LUKS2 metadata before moving keyslots area */
+ if (LUKS2_hdr_validate(cd, hdr2->jobj, hdr2->hdr_size - LUKS2_HDR_BIN_LEN)) {
+ log_err(cd, _("Cannot convert to LUKS2 format - invalid metadata."));
+ r = -EINVAL;
+ goto out;
+ }
+
+ if ((r = luks_header_in_use(cd))) {
+ if (r > 0)
+ r = -EBUSY;
+ goto out;
+ }
+
+ /* move keyslots 4k -> 32k offset */
+ buf_offset = 2 * LUKS2_HDR_16K_LEN;
+ buf_size = luks1_size - LUKS_ALIGN_KEYSLOTS;
+
+ /* check future LUKS2 keyslots area is at least as large as LUKS1 keyslots area */
+ if (buf_size > LUKS2_keyslots_size(hdr2)) {
+ log_err(cd, _("Unable to move keyslot area. LUKS2 keyslots area too small."));
+ r = -EINVAL;
+ goto out;
+ }
+
+ if ((r = move_keyslot_areas(cd, 8 * SECTOR_SIZE, buf_offset, buf_size)) < 0) {
+ log_err(cd, _("Unable to move keyslot area."));
+ goto out;
+ }
+
+ /* Write new LUKS2 JSON */
+ r = LUKS2_hdr_write(cd, hdr2);
+out:
+ LUKS2_hdr_free(cd, hdr2);
+
+ return r;
+}
+
+static int keyslot_LUKS1_compatible(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, uint32_t key_size, const char *hash)
+{
+ json_object *jobj_keyslot, *jobj, *jobj_kdf, *jobj_af;
+ uint64_t l2_offset, l2_length;
+ size_t ks_key_size;
+ const char *ks_cipher, *data_cipher;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return 1;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "type", &jobj) ||
+ strcmp(json_object_get_string(jobj), "luks2"))
+ return 0;
+
+ /* Using PBKDF2, this implies memory and parallel is not used. */
+ jobj = NULL;
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf) ||
+ !json_object_object_get_ex(jobj_kdf, "type", &jobj) ||
+ strcmp(json_object_get_string(jobj), CRYPT_KDF_PBKDF2) ||
+ !json_object_object_get_ex(jobj_kdf, "hash", &jobj) ||
+ strcmp(json_object_get_string(jobj), hash))
+ return 0;
+
+ jobj = NULL;
+ if (!json_object_object_get_ex(jobj_keyslot, "af", &jobj_af) ||
+ !json_object_object_get_ex(jobj_af, "stripes", &jobj) ||
+ json_object_get_int(jobj) != LUKS_STRIPES)
+ return 0;
+
+ jobj = NULL;
+ if (!json_object_object_get_ex(jobj_af, "hash", &jobj) ||
+ (crypt_hash_size(json_object_get_string(jobj)) < 0) ||
+ strcmp(json_object_get_string(jobj), hash))
+ return 0;
+
+ ks_cipher = LUKS2_get_keyslot_cipher(hdr, keyslot, &ks_key_size);
+ data_cipher = LUKS2_get_cipher(hdr, CRYPT_DEFAULT_SEGMENT);
+ if (!ks_cipher || !data_cipher || key_size != ks_key_size || strcmp(ks_cipher, data_cipher)) {
+ log_dbg(cd, "Cipher in keyslot %d is different from volume key encryption.", keyslot);
+ return 0;
+ }
+
+ if (LUKS2_keyslot_area(hdr, keyslot, &l2_offset, &l2_length))
+ return 0;
+
+ if (l2_length != (size_round_up(AF_split_sectors(key_size, LUKS_STRIPES) * SECTOR_SIZE, 4096))) {
+ log_dbg(cd, "Area length in LUKS2 keyslot (%d) is not compatible with LUKS1", keyslot);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Convert LUKS2 -> LUKS1 */
+int LUKS2_luks2_to_luks1(struct crypt_device *cd, struct luks2_hdr *hdr2, struct luks_phdr *hdr1)
+{
+ size_t buf_size, buf_offset;
+ char cipher[LUKS_CIPHERNAME_L], cipher_mode[LUKS_CIPHERMODE_L];
+ char *digest, *digest_salt;
+ const char *hash;
+ size_t len;
+ json_object *jobj_keyslot, *jobj_digest, *jobj_segment, *jobj_kdf, *jobj_area, *jobj1, *jobj2;
+ uint32_t key_size;
+ int i, r, last_active = 0;
+ uint64_t offset, area_length;
+ char *buf, luksMagic[] = LUKS_MAGIC;
+
+ jobj_digest = LUKS2_get_digest_jobj(hdr2, 0);
+ if (!jobj_digest)
+ return -EINVAL;
+
+ jobj_segment = LUKS2_get_segment_jobj(hdr2, CRYPT_DEFAULT_SEGMENT);
+ if (!jobj_segment)
+ return -EINVAL;
+
+ if (json_segment_get_sector_size(jobj_segment) != SECTOR_SIZE) {
+ log_err(cd, _("Cannot convert to LUKS1 format - default segment encryption sector size is not 512 bytes."));
+ return -EINVAL;
+ }
+
+ json_object_object_get_ex(hdr2->jobj, "digests", &jobj1);
+ if (!json_object_object_get_ex(jobj_digest, "type", &jobj2) ||
+ strcmp(json_object_get_string(jobj2), "pbkdf2") ||
+ json_object_object_length(jobj1) != 1) {
+ log_err(cd, _("Cannot convert to LUKS1 format - key slot digests are not LUKS1 compatible."));
+ return -EINVAL;
+ }
+ if (!json_object_object_get_ex(jobj_digest, "hash", &jobj2))
+ return -EINVAL;
+ hash = json_object_get_string(jobj2);
+
+ r = crypt_parse_name_and_mode(LUKS2_get_cipher(hdr2, CRYPT_DEFAULT_SEGMENT), cipher, NULL, cipher_mode);
+ if (r < 0)
+ return r;
+
+ if (crypt_cipher_wrapped_key(cipher, cipher_mode)) {
+ log_err(cd, _("Cannot convert to LUKS1 format - device uses wrapped key cipher %s."), cipher);
+ return -EINVAL;
+ }
+
+ if (json_segments_count(LUKS2_get_segments_jobj(hdr2)) != 1) {
+ log_err(cd, _("Cannot convert to LUKS1 format - device uses more segments."));
+ return -EINVAL;
+ }
+
+ r = LUKS2_tokens_count(hdr2);
+ if (r < 0)
+ return r;
+ if (r > 0) {
+ log_err(cd, _("Cannot convert to LUKS1 format - LUKS2 header contains %u token(s)."), r);
+ return -EINVAL;
+ }
+
+ r = LUKS2_get_volume_key_size(hdr2, 0);
+ if (r < 0)
+ return -EINVAL;
+ key_size = r;
+
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ if (LUKS2_keyslot_info(hdr2, i) == CRYPT_SLOT_INACTIVE)
+ continue;
+
+ if (LUKS2_keyslot_info(hdr2, i) == CRYPT_SLOT_INVALID) {
+ log_err(cd, _("Cannot convert to LUKS1 format - keyslot %u is in invalid state."), i);
+ return -EINVAL;
+ }
+
+ if (i >= LUKS_NUMKEYS) {
+ log_err(cd, _("Cannot convert to LUKS1 format - slot %u (over maximum slots) is still active."), i);
+ return -EINVAL;
+ }
+
+ if (!keyslot_LUKS1_compatible(cd, hdr2, i, key_size, hash)) {
+ log_err(cd, _("Cannot convert to LUKS1 format - keyslot %u is not LUKS1 compatible."), i);
+ return -EINVAL;
+ }
+ }
+
+ memset(hdr1, 0, sizeof(*hdr1));
+
+ for (i = 0; i < LUKS_NUMKEYS; i++) {
+ hdr1->keyblock[i].active = LUKS_KEY_DISABLED;
+ hdr1->keyblock[i].stripes = LUKS_STRIPES;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr2, i);
+
+ if (jobj_keyslot) {
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+ if (!json_object_object_get_ex(jobj_area, "offset", &jobj1))
+ return -EINVAL;
+ offset = crypt_jobj_get_uint64(jobj1);
+ } else {
+ if (LUKS2_find_area_gap(cd, hdr2, key_size, &offset, &area_length))
+ return -EINVAL;
+ /*
+ * We have to create placeholder luks2 keyslots in place of all
+ * inactive keyslots. Otherwise we would allocate all
+ * inactive luks1 keyslots over same binary keyslot area.
+ */
+ if (placeholder_keyslot_alloc(cd, i, offset, area_length))
+ return -EINVAL;
+ }
+
+ offset /= SECTOR_SIZE;
+ if (offset > UINT32_MAX)
+ return -EINVAL;
+
+ hdr1->keyblock[i].keyMaterialOffset = offset;
+ hdr1->keyblock[i].keyMaterialOffset -=
+ ((2 * LUKS2_HDR_16K_LEN - LUKS_ALIGN_KEYSLOTS) / SECTOR_SIZE);
+
+ if (!jobj_keyslot)
+ continue;
+
+ hdr1->keyblock[i].active = LUKS_KEY_ENABLED;
+ last_active = i;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf))
+ continue;
+
+ if (!json_object_object_get_ex(jobj_kdf, "iterations", &jobj1))
+ continue;
+ hdr1->keyblock[i].passwordIterations = crypt_jobj_get_uint32(jobj1);
+
+ if (!json_object_object_get_ex(jobj_kdf, "salt", &jobj1))
+ continue;
+
+ if (crypt_base64_decode(&buf, &len, json_object_get_string(jobj1),
+ json_object_get_string_len(jobj1)))
+ continue;
+ if (len > 0 && len != LUKS_SALTSIZE) {
+ free(buf);
+ continue;
+ }
+ memcpy(hdr1->keyblock[i].passwordSalt, buf, LUKS_SALTSIZE);
+ free(buf);
+ }
+
+ if (!jobj_keyslot) {
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr2, last_active);
+ if (!jobj_keyslot)
+ return -EINVAL;
+ }
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+ if (!json_object_object_get_ex(jobj_area, "encryption", &jobj1))
+ return -EINVAL;
+ r = crypt_parse_name_and_mode(json_object_get_string(jobj1), cipher, NULL, cipher_mode);
+ if (r < 0)
+ return r;
+
+ strncpy(hdr1->cipherName, cipher, LUKS_CIPHERNAME_L - 1);
+ hdr1->cipherName[LUKS_CIPHERNAME_L-1] = '\0';
+ strncpy(hdr1->cipherMode, cipher_mode, LUKS_CIPHERMODE_L - 1);
+ hdr1->cipherMode[LUKS_CIPHERMODE_L-1] = '\0';
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf))
+ return -EINVAL;
+ if (!json_object_object_get_ex(jobj_kdf, "hash", &jobj1))
+ return -EINVAL;
+ strncpy(hdr1->hashSpec, json_object_get_string(jobj1), sizeof(hdr1->hashSpec) - 1);
+
+ hdr1->keyBytes = key_size;
+
+ if (!json_object_object_get_ex(jobj_digest, "iterations", &jobj1))
+ return -EINVAL;
+ hdr1->mkDigestIterations = crypt_jobj_get_uint32(jobj1);
+
+ if (!json_object_object_get_ex(jobj_digest, "digest", &jobj1))
+ return -EINVAL;
+ r = crypt_base64_decode(&digest, &len, json_object_get_string(jobj1),
+ json_object_get_string_len(jobj1));
+ if (r < 0)
+ return r;
+ /* We can store full digest here, not only sha1 length */
+ if (len < LUKS_DIGESTSIZE) {
+ free(digest);
+ return -EINVAL;
+ }
+ memcpy(hdr1->mkDigest, digest, LUKS_DIGESTSIZE);
+ free(digest);
+
+ if (!json_object_object_get_ex(jobj_digest, "salt", &jobj1))
+ return -EINVAL;
+ r = crypt_base64_decode(&digest_salt, &len, json_object_get_string(jobj1),
+ json_object_get_string_len(jobj1));
+ if (r < 0)
+ return r;
+ if (len != LUKS_SALTSIZE) {
+ free(digest_salt);
+ return -EINVAL;
+ }
+ memcpy(hdr1->mkDigestSalt, digest_salt, LUKS_SALTSIZE);
+ free(digest_salt);
+
+ if (!json_object_object_get_ex(jobj_segment, "offset", &jobj1))
+ return -EINVAL;
+ offset = crypt_jobj_get_uint64(jobj1) / SECTOR_SIZE;
+ if (offset > UINT32_MAX)
+ return -EINVAL;
+ hdr1->payloadOffset = offset;
+
+ strncpy(hdr1->uuid, hdr2->uuid, UUID_STRING_L); /* max 36 chars */
+ hdr1->uuid[UUID_STRING_L-1] = '\0';
+
+ memcpy(hdr1->magic, luksMagic, LUKS_MAGIC_L);
+
+ hdr1->version = 1;
+
+ r = luks_header_in_use(cd);
+ if (r)
+ return r > 0 ? -EBUSY : r;
+
+ /* move keyslots 32k -> 4k offset */
+ buf_offset = 2 * LUKS2_HDR_16K_LEN;
+ buf_size = LUKS2_keyslots_size(hdr2);
+ r = move_keyslot_areas(cd, buf_offset, 8 * SECTOR_SIZE, buf_size);
+ if (r < 0) {
+ log_err(cd, _("Unable to move keyslot area."));
+ return r;
+ }
+
+ crypt_wipe_device(cd, crypt_metadata_device(cd), CRYPT_WIPE_ZERO, 0,
+ 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL);
+
+ /* Write new LUKS1 hdr */
+ return LUKS_write_phdr(hdr1, cd);
+}
diff --git a/lib/luks2/luks2_reencrypt.c b/lib/luks2/luks2_reencrypt.c
new file mode 100644
index 0000000..b0dcd6d
--- /dev/null
+++ b/lib/luks2/luks2_reencrypt.c
@@ -0,0 +1,4375 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, reencryption helpers
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+#include "utils_device_locking.h"
+
+struct luks2_reencrypt {
+ /* reencryption window attributes */
+ uint64_t offset;
+ uint64_t progress;
+ uint64_t length;
+ uint64_t device_size;
+ bool online;
+ bool fixed_length;
+ crypt_reencrypt_direction_info direction;
+ crypt_reencrypt_mode_info mode;
+
+ char *device_name;
+ char *hotzone_name;
+ char *overlay_name;
+ uint32_t flags;
+
+ /* reencryption window persistence attributes */
+ struct reenc_protection rp;
+ struct reenc_protection rp_moved_segment;
+
+ int reenc_keyslot;
+
+ /* already running reencryption */
+ json_object *jobj_segs_hot;
+ struct json_object *jobj_segs_post;
+
+ /* backup segments */
+ json_object *jobj_segment_new;
+ int digest_new;
+ json_object *jobj_segment_old;
+ int digest_old;
+ json_object *jobj_segment_moved;
+
+ struct volume_key *vks;
+
+ void *reenc_buffer;
+ ssize_t read;
+
+ struct crypt_storage_wrapper *cw1;
+ struct crypt_storage_wrapper *cw2;
+
+ uint32_t wflags1;
+ uint32_t wflags2;
+
+ struct crypt_lock_handle *reenc_lock;
+};
+#if USE_LUKS2_REENCRYPTION
+static uint64_t data_shift_value(struct reenc_protection *rp)
+{
+ return rp->type == REENC_PROTECTION_DATASHIFT ? rp->p.ds.data_shift : 0;
+}
+
+static json_object *reencrypt_segment(struct luks2_hdr *hdr, unsigned new)
+{
+ return LUKS2_get_segment_by_flag(hdr, new ? "backup-final" : "backup-previous");
+}
+
+static json_object *reencrypt_segment_new(struct luks2_hdr *hdr)
+{
+ return reencrypt_segment(hdr, 1);
+}
+
+static json_object *reencrypt_segment_old(struct luks2_hdr *hdr)
+{
+ return reencrypt_segment(hdr, 0);
+}
+
+static json_object *reencrypt_segments_old(struct luks2_hdr *hdr)
+{
+ json_object *jobj_segments, *jobj = NULL;
+
+ if (json_object_copy(reencrypt_segment_old(hdr), &jobj))
+ return NULL;
+
+ json_segment_remove_flag(jobj, "backup-previous");
+
+ jobj_segments = json_object_new_object();
+ if (!jobj_segments) {
+ json_object_put(jobj);
+ return NULL;
+ }
+
+ if (json_object_object_add_by_uint(jobj_segments, 0, jobj)) {
+ json_object_put(jobj);
+ json_object_put(jobj_segments);
+ return NULL;
+ }
+
+ return jobj_segments;
+}
+
+static const char *reencrypt_segment_cipher_new(struct luks2_hdr *hdr)
+{
+ return json_segment_get_cipher(reencrypt_segment(hdr, 1));
+}
+
+static const char *reencrypt_segment_cipher_old(struct luks2_hdr *hdr)
+{
+ return json_segment_get_cipher(reencrypt_segment(hdr, 0));
+}
+
+static uint32_t reencrypt_get_sector_size_new(struct luks2_hdr *hdr)
+{
+ return json_segment_get_sector_size(reencrypt_segment(hdr, 1));
+}
+
+static uint32_t reencrypt_get_sector_size_old(struct luks2_hdr *hdr)
+{
+ return json_segment_get_sector_size(reencrypt_segment(hdr, 0));
+}
+
+static uint64_t reencrypt_data_offset(struct luks2_hdr *hdr, unsigned new)
+{
+ json_object *jobj = reencrypt_segment(hdr, new);
+ if (jobj)
+ return json_segment_get_offset(jobj, 0);
+
+ return LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
+}
+
+static uint64_t LUKS2_reencrypt_get_data_offset_moved(struct luks2_hdr *hdr)
+{
+ json_object *jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment");
+
+ if (!jobj_segment)
+ return 0;
+
+ return json_segment_get_offset(jobj_segment, 0);
+}
+
+static uint64_t reencrypt_get_data_offset_new(struct luks2_hdr *hdr)
+{
+ return reencrypt_data_offset(hdr, 1);
+}
+
+static uint64_t reencrypt_get_data_offset_old(struct luks2_hdr *hdr)
+{
+ return reencrypt_data_offset(hdr, 0);
+}
+#endif
+static int reencrypt_digest(struct luks2_hdr *hdr, unsigned new)
+{
+ int segment = LUKS2_get_segment_id_by_flag(hdr, new ? "backup-final" : "backup-previous");
+
+ if (segment < 0)
+ return segment;
+
+ return LUKS2_digest_by_segment(hdr, segment);
+}
+
+int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr)
+{
+ return reencrypt_digest(hdr, 1);
+}
+
+int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr)
+{
+ return reencrypt_digest(hdr, 0);
+}
+
+/* none, checksums, journal or shift */
+static const char *reencrypt_resilience_type(struct luks2_hdr *hdr)
+{
+ json_object *jobj_keyslot, *jobj_area, *jobj_type;
+ int ks = LUKS2_find_keyslot(hdr, "reencrypt");
+
+ if (ks < 0)
+ return NULL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
+
+ json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
+ if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
+ return NULL;
+
+ return json_object_get_string(jobj_type);
+}
+
+static const char *reencrypt_resilience_hash(struct luks2_hdr *hdr)
+{
+ json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash;
+ int ks = LUKS2_find_keyslot(hdr, "reencrypt");
+
+ if (ks < 0)
+ return NULL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
+
+ json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
+ if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
+ return NULL;
+ if (strcmp(json_object_get_string(jobj_type), "checksum"))
+ return NULL;
+ if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
+ return NULL;
+
+ return json_object_get_string(jobj_hash);
+}
+#if USE_LUKS2_REENCRYPTION
+static json_object *_enc_create_segments_shift_after(struct luks2_reencrypt *rh, uint64_t data_offset)
+{
+ int reenc_seg, i = 0;
+ json_object *jobj_copy, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object();
+ uint64_t tmp;
+
+ if (!rh->jobj_segs_hot || !jobj_segs_post)
+ goto err;
+
+ if (json_segments_count(rh->jobj_segs_hot) == 0)
+ return jobj_segs_post;
+
+ reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
+ if (reenc_seg < 0)
+ goto err;
+
+ while (i < reenc_seg) {
+ jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, i);
+ if (!jobj_copy)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
+ }
+
+ if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1), &jobj_seg_new)) {
+ if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg), &jobj_seg_new))
+ goto err;
+ json_segment_remove_flag(jobj_seg_new, "in-reencryption");
+ tmp = rh->length;
+ } else {
+ json_object_object_add(jobj_seg_new, "offset", crypt_jobj_new_uint64(rh->offset + data_offset));
+ json_object_object_add(jobj_seg_new, "iv_tweak", crypt_jobj_new_uint64(rh->offset >> SECTOR_SHIFT));
+ tmp = json_segment_get_size(jobj_seg_new, 0) + rh->length;
+ }
+
+ /* alter size of new segment, reenc_seg == 0 we're finished */
+ json_object_object_add(jobj_seg_new, "size", reenc_seg > 0 ? crypt_jobj_new_uint64(tmp) : json_object_new_string("dynamic"));
+ json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new);
+
+ return jobj_segs_post;
+err:
+ json_object_put(jobj_segs_post);
+ return NULL;
+}
+
+static json_object *reencrypt_make_hot_segments_encrypt_shift(struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t data_offset)
+{
+ int sg, crypt_seg, i = 0;
+ uint64_t segment_size;
+ json_object *jobj_seg_shrunk, *jobj_seg_new, *jobj_copy, *jobj_enc_seg = NULL,
+ *jobj_segs_hot = json_object_new_object();
+
+ if (!jobj_segs_hot)
+ return NULL;
+
+ crypt_seg = LUKS2_segment_by_type(hdr, "crypt");
+
+ /* FIXME: This is hack. Find proper way to fix it. */
+ sg = LUKS2_last_segment_by_type(hdr, "linear");
+ if (rh->offset && sg < 0)
+ goto err;
+ if (sg < 0)
+ return jobj_segs_hot;
+
+ jobj_enc_seg = json_segment_create_crypt(data_offset + rh->offset,
+ rh->offset >> SECTOR_SHIFT,
+ &rh->length,
+ reencrypt_segment_cipher_new(hdr),
+ reencrypt_get_sector_size_new(hdr),
+ 1);
+
+ while (i < sg) {
+ jobj_copy = LUKS2_get_segment_jobj(hdr, i);
+ if (!jobj_copy)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy));
+ }
+
+ segment_size = LUKS2_segment_size(hdr, sg, 0);
+ if (segment_size > rh->length) {
+ jobj_seg_shrunk = NULL;
+ if (json_object_copy(LUKS2_get_segment_jobj(hdr, sg), &jobj_seg_shrunk))
+ goto err;
+ json_object_object_add(jobj_seg_shrunk, "size", crypt_jobj_new_uint64(segment_size - rh->length));
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_seg_shrunk);
+ }
+
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_enc_seg);
+ jobj_enc_seg = NULL; /* see err: label */
+
+ /* first crypt segment after encryption ? */
+ if (crypt_seg >= 0) {
+ jobj_seg_new = LUKS2_get_segment_jobj(hdr, crypt_seg);
+ if (!jobj_seg_new)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new));
+ }
+
+ return jobj_segs_hot;
+err:
+ json_object_put(jobj_enc_seg);
+ json_object_put(jobj_segs_hot);
+
+ return NULL;
+}
+
+static json_object *reencrypt_make_segment_new(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct luks2_reencrypt *rh,
+ uint64_t data_offset,
+ uint64_t segment_offset,
+ uint64_t iv_offset,
+ const uint64_t *segment_length)
+{
+ switch (rh->mode) {
+ case CRYPT_REENCRYPT_REENCRYPT:
+ case CRYPT_REENCRYPT_ENCRYPT:
+ return json_segment_create_crypt(data_offset + segment_offset,
+ crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
+ segment_length,
+ reencrypt_segment_cipher_new(hdr),
+ reencrypt_get_sector_size_new(hdr), 0);
+ case CRYPT_REENCRYPT_DECRYPT:
+ return json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
+ }
+
+ return NULL;
+}
+
+static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t data_offset)
+{
+ int reenc_seg;
+ json_object *jobj_new_seg_after, *jobj_old_seg, *jobj_old_seg_copy = NULL,
+ *jobj_segs_post = json_object_new_object();
+ uint64_t fixed_length = rh->offset + rh->length;
+
+ if (!rh->jobj_segs_hot || !jobj_segs_post)
+ goto err;
+
+ reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
+ if (reenc_seg < 0)
+ return NULL;
+
+ jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
+
+ /*
+ * if there's no old segment after reencryption, we're done.
+ * Set size to 'dynamic' again.
+ */
+ jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL);
+ if (!jobj_new_seg_after)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_post, 0, jobj_new_seg_after);
+
+ if (jobj_old_seg) {
+ if (rh->fixed_length) {
+ if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy))
+ goto err;
+ jobj_old_seg = jobj_old_seg_copy;
+ fixed_length = rh->device_size - fixed_length;
+ json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(fixed_length));
+ } else
+ json_object_get(jobj_old_seg);
+ json_object_object_add_by_uint(jobj_segs_post, 1, jobj_old_seg);
+ }
+
+ return jobj_segs_post;
+err:
+ json_object_put(jobj_segs_post);
+ return NULL;
+}
+
+static json_object *reencrypt_make_post_segments_backward(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t data_offset)
+{
+ int reenc_seg;
+ uint64_t fixed_length;
+
+ json_object *jobj_new_seg_after, *jobj_old_seg,
+ *jobj_segs_post = json_object_new_object();
+
+ if (!rh->jobj_segs_hot || !jobj_segs_post)
+ goto err;
+
+ reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
+ if (reenc_seg < 0)
+ return NULL;
+
+ jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg - 1);
+ if (jobj_old_seg)
+ json_object_object_add_by_uint(jobj_segs_post, reenc_seg - 1, json_object_get(jobj_old_seg));
+ if (rh->fixed_length && rh->offset) {
+ fixed_length = rh->device_size - rh->offset;
+ jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length);
+ } else
+ jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL);
+ if (!jobj_new_seg_after)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after);
+
+ return jobj_segs_post;
+err:
+ json_object_put(jobj_segs_post);
+ return NULL;
+}
+
+static json_object *reencrypt_make_segment_reencrypt(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct luks2_reencrypt *rh,
+ uint64_t data_offset,
+ uint64_t segment_offset,
+ uint64_t iv_offset,
+ const uint64_t *segment_length)
+{
+ switch (rh->mode) {
+ case CRYPT_REENCRYPT_REENCRYPT:
+ case CRYPT_REENCRYPT_ENCRYPT:
+ return json_segment_create_crypt(data_offset + segment_offset,
+ crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
+ segment_length,
+ reencrypt_segment_cipher_new(hdr),
+ reencrypt_get_sector_size_new(hdr), 1);
+ case CRYPT_REENCRYPT_DECRYPT:
+ return json_segment_create_linear(data_offset + segment_offset, segment_length, 1);
+ }
+
+ return NULL;
+}
+
+static json_object *reencrypt_make_segment_old(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct luks2_reencrypt *rh,
+ uint64_t data_offset,
+ uint64_t segment_offset,
+ const uint64_t *segment_length)
+{
+ json_object *jobj_old_seg = NULL;
+
+ switch (rh->mode) {
+ case CRYPT_REENCRYPT_REENCRYPT:
+ case CRYPT_REENCRYPT_DECRYPT:
+ jobj_old_seg = json_segment_create_crypt(data_offset + segment_offset,
+ crypt_get_iv_offset(cd) + (segment_offset >> SECTOR_SHIFT),
+ segment_length,
+ reencrypt_segment_cipher_old(hdr),
+ reencrypt_get_sector_size_old(hdr),
+ 0);
+ break;
+ case CRYPT_REENCRYPT_ENCRYPT:
+ jobj_old_seg = json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
+ }
+
+ return jobj_old_seg;
+}
+
+static json_object *reencrypt_make_hot_segments_forward(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t device_size,
+ uint64_t data_offset)
+{
+ json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
+ uint64_t fixed_length, tmp = rh->offset + rh->length;
+ unsigned int sg = 0;
+
+ jobj_segs_hot = json_object_new_object();
+ if (!jobj_segs_hot)
+ return NULL;
+
+ if (rh->offset) {
+ jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, &rh->offset);
+ if (!jobj_new_seg)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
+ }
+
+ jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
+ if (!jobj_reenc_seg)
+ goto err;
+
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
+
+ if (tmp < device_size) {
+ fixed_length = device_size - tmp;
+ jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, data_offset + data_shift_value(&rh->rp),
+ rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
+ if (!jobj_old_seg)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
+ }
+
+ return jobj_segs_hot;
+err:
+ json_object_put(jobj_segs_hot);
+ return NULL;
+}
+
+static json_object *reencrypt_make_hot_segments_decrypt_shift(struct crypt_device *cd,
+ struct luks2_hdr *hdr, struct luks2_reencrypt *rh,
+ uint64_t device_size, uint64_t data_offset)
+{
+ json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
+ uint64_t fixed_length, tmp = rh->offset + rh->length, linear_length = rh->progress;
+ unsigned int sg = 0;
+
+ jobj_segs_hot = json_object_new_object();
+ if (!jobj_segs_hot)
+ return NULL;
+
+ if (rh->offset) {
+ jobj_new_seg = LUKS2_get_segment_jobj(hdr, 0);
+ if (!jobj_new_seg)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, json_object_get(jobj_new_seg));
+
+ if (linear_length) {
+ jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh,
+ data_offset,
+ json_segment_get_size(jobj_new_seg, 0),
+ 0,
+ &linear_length);
+ if (!jobj_new_seg)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
+ }
+ }
+
+ jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset,
+ rh->offset,
+ rh->offset,
+ &rh->length);
+ if (!jobj_reenc_seg)
+ goto err;
+
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
+
+ if (!rh->offset && (jobj_new_seg = LUKS2_get_segment_jobj(hdr, 1)) &&
+ !json_segment_is_backup(jobj_new_seg))
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, json_object_get(jobj_new_seg));
+ else if (tmp < device_size) {
+ fixed_length = device_size - tmp;
+ jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh,
+ data_offset + data_shift_value(&rh->rp),
+ rh->offset + rh->length,
+ rh->fixed_length ? &fixed_length : NULL);
+ if (!jobj_old_seg)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
+ }
+
+ return jobj_segs_hot;
+err:
+ json_object_put(jobj_segs_hot);
+ return NULL;
+}
+
+static json_object *_dec_create_segments_shift_after(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t data_offset)
+{
+ int reenc_seg, i = 0;
+ json_object *jobj_copy, *jobj_seg_old, *jobj_seg_new,
+ *jobj_segs_post = json_object_new_object();
+ unsigned segs;
+ uint64_t tmp;
+
+ if (!rh->jobj_segs_hot || !jobj_segs_post)
+ goto err;
+
+ segs = json_segments_count(rh->jobj_segs_hot);
+ if (segs == 0)
+ return jobj_segs_post;
+
+ reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
+ if (reenc_seg < 0)
+ goto err;
+
+ if (reenc_seg == 0) {
+ jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, NULL);
+ if (!jobj_seg_new)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_post, 0, jobj_seg_new);
+
+ return jobj_segs_post;
+ }
+
+ jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, 0);
+ if (!jobj_copy)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
+
+ jobj_seg_old = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
+
+ tmp = rh->length + rh->progress;
+ jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset,
+ json_segment_get_size(rh->jobj_segment_moved, 0),
+ data_shift_value(&rh->rp),
+ jobj_seg_old ? &tmp : NULL);
+ json_object_object_add_by_uint(jobj_segs_post, i++, jobj_seg_new);
+
+ if (jobj_seg_old)
+ json_object_object_add_by_uint(jobj_segs_post, i, json_object_get(jobj_seg_old));
+
+ return jobj_segs_post;
+err:
+ json_object_put(jobj_segs_post);
+ return NULL;
+}
+
+static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t device_size,
+ uint64_t data_offset)
+{
+ json_object *jobj_reenc_seg, *jobj_new_seg, *jobj_old_seg = NULL,
+ *jobj_segs_hot = json_object_new_object();
+ int sg = 0;
+ uint64_t fixed_length, tmp = rh->offset + rh->length;
+
+ if (!jobj_segs_hot)
+ return NULL;
+
+ if (rh->offset) {
+ if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_old_seg))
+ goto err;
+ json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(rh->offset));
+
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_old_seg);
+ }
+
+ jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
+ if (!jobj_reenc_seg)
+ goto err;
+
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
+
+ if (tmp < device_size) {
+ fixed_length = device_size - tmp;
+ jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length,
+ rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
+ if (!jobj_new_seg)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_new_seg);
+ }
+
+ return jobj_segs_hot;
+err:
+ json_object_put(jobj_segs_hot);
+ return NULL;
+}
+
+static int reencrypt_make_hot_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t device_size,
+ uint64_t data_offset)
+{
+ rh->jobj_segs_hot = NULL;
+
+ if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
+ rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
+ log_dbg(cd, "Calculating hot segments for encryption with data move.");
+ rh->jobj_segs_hot = reencrypt_make_hot_segments_encrypt_shift(hdr, rh, data_offset);
+ } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD &&
+ rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
+ log_dbg(cd, "Calculating hot segments for decryption with data move.");
+ rh->jobj_segs_hot = reencrypt_make_hot_segments_decrypt_shift(cd, hdr, rh, device_size, data_offset);
+ } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
+ log_dbg(cd, "Calculating hot segments (forward direction).");
+ rh->jobj_segs_hot = reencrypt_make_hot_segments_forward(cd, hdr, rh, device_size, data_offset);
+ } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
+ log_dbg(cd, "Calculating hot segments (backward direction).");
+ rh->jobj_segs_hot = reencrypt_make_hot_segments_backward(cd, hdr, rh, device_size, data_offset);
+ }
+
+ return rh->jobj_segs_hot ? 0 : -EINVAL;
+}
+
+static int reencrypt_make_post_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t data_offset)
+{
+ rh->jobj_segs_post = NULL;
+
+ if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
+ rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
+ log_dbg(cd, "Calculating post segments for encryption with data move.");
+ rh->jobj_segs_post = _enc_create_segments_shift_after(rh, data_offset);
+ } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD &&
+ rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
+ log_dbg(cd, "Calculating post segments for decryption with data move.");
+ rh->jobj_segs_post = _dec_create_segments_shift_after(cd, hdr, rh, data_offset);
+ } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
+ log_dbg(cd, "Calculating post segments (forward direction).");
+ rh->jobj_segs_post = reencrypt_make_post_segments_forward(cd, hdr, rh, data_offset);
+ } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
+ log_dbg(cd, "Calculating segments (backward direction).");
+ rh->jobj_segs_post = reencrypt_make_post_segments_backward(cd, hdr, rh, data_offset);
+ }
+
+ return rh->jobj_segs_post ? 0 : -EINVAL;
+}
+#endif
+static uint64_t reencrypt_data_shift(struct luks2_hdr *hdr)
+{
+ json_object *jobj_keyslot, *jobj_area, *jobj_data_shift;
+ int ks = LUKS2_find_keyslot(hdr, "reencrypt");
+
+ if (ks < 0)
+ return 0;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
+
+ json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
+ if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj_data_shift))
+ return 0;
+
+ return crypt_jobj_get_uint64(jobj_data_shift);
+}
+
+static crypt_reencrypt_mode_info reencrypt_mode(struct luks2_hdr *hdr)
+{
+ const char *mode;
+ crypt_reencrypt_mode_info mi = CRYPT_REENCRYPT_REENCRYPT;
+ json_object *jobj_keyslot, *jobj_mode;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
+ if (!jobj_keyslot)
+ return mi;
+
+ json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode);
+ mode = json_object_get_string(jobj_mode);
+
+ /* validation enforces allowed values */
+ if (!strcmp(mode, "encrypt"))
+ mi = CRYPT_REENCRYPT_ENCRYPT;
+ else if (!strcmp(mode, "decrypt"))
+ mi = CRYPT_REENCRYPT_DECRYPT;
+
+ return mi;
+}
+
+static crypt_reencrypt_direction_info reencrypt_direction(struct luks2_hdr *hdr)
+{
+ const char *value;
+ json_object *jobj_keyslot, *jobj_mode;
+ crypt_reencrypt_direction_info di = CRYPT_REENCRYPT_FORWARD;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
+ if (!jobj_keyslot)
+ return di;
+
+ json_object_object_get_ex(jobj_keyslot, "direction", &jobj_mode);
+ value = json_object_get_string(jobj_mode);
+
+ /* validation enforces allowed values */
+ if (strcmp(value, "forward"))
+ di = CRYPT_REENCRYPT_BACKWARD;
+
+ return di;
+}
+
+typedef enum { REENC_OK = 0, REENC_ERR, REENC_ROLLBACK, REENC_FATAL } reenc_status_t;
+
+void LUKS2_reencrypt_protection_erase(struct reenc_protection *rp)
+{
+ if (!rp || rp->type != REENC_PROTECTION_CHECKSUM)
+ return;
+
+ if (rp->p.csum.ch) {
+ crypt_hash_destroy(rp->p.csum.ch);
+ rp->p.csum.ch = NULL;
+ }
+
+ if (rp->p.csum.checksums) {
+ crypt_safe_memzero(rp->p.csum.checksums, rp->p.csum.checksums_len);
+ free(rp->p.csum.checksums);
+ rp->p.csum.checksums = NULL;
+ }
+}
+
+void LUKS2_reencrypt_free(struct crypt_device *cd, struct luks2_reencrypt *rh)
+{
+ if (!rh)
+ return;
+
+ LUKS2_reencrypt_protection_erase(&rh->rp);
+ LUKS2_reencrypt_protection_erase(&rh->rp_moved_segment);
+
+ json_object_put(rh->jobj_segs_hot);
+ rh->jobj_segs_hot = NULL;
+ json_object_put(rh->jobj_segs_post);
+ rh->jobj_segs_post = NULL;
+ json_object_put(rh->jobj_segment_old);
+ rh->jobj_segment_old = NULL;
+ json_object_put(rh->jobj_segment_new);
+ rh->jobj_segment_new = NULL;
+ json_object_put(rh->jobj_segment_moved);
+ rh->jobj_segment_moved = NULL;
+
+ free(rh->reenc_buffer);
+ rh->reenc_buffer = NULL;
+ crypt_storage_wrapper_destroy(rh->cw1);
+ rh->cw1 = NULL;
+ crypt_storage_wrapper_destroy(rh->cw2);
+ rh->cw2 = NULL;
+
+ free(rh->device_name);
+ free(rh->overlay_name);
+ free(rh->hotzone_name);
+ crypt_drop_keyring_key(cd, rh->vks);
+ crypt_free_volume_key(rh->vks);
+ device_release_excl(cd, crypt_data_device(cd));
+ crypt_unlock_internal(cd, rh->reenc_lock);
+ free(rh);
+}
+
+int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct reenc_protection *rp,
+ int reencrypt_keyslot,
+ uint64_t *r_length)
+{
+#if USE_LUKS2_REENCRYPTION
+ int r;
+ uint64_t dummy, area_length;
+
+ assert(hdr);
+ assert(rp);
+ assert(r_length);
+
+ if (rp->type <= REENC_PROTECTION_NONE) {
+ *r_length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
+ return 0;
+ }
+
+ if (rp->type == REENC_PROTECTION_DATASHIFT) {
+ *r_length = rp->p.ds.data_shift;
+ return 0;
+ }
+
+ r = LUKS2_keyslot_area(hdr, reencrypt_keyslot, &dummy, &area_length);
+ if (r < 0)
+ return -EINVAL;
+
+ if (rp->type == REENC_PROTECTION_JOURNAL) {
+ *r_length = area_length;
+ return 0;
+ }
+
+ if (rp->type == REENC_PROTECTION_CHECKSUM) {
+ *r_length = (area_length / rp->p.csum.hash_size) * rp->p.csum.block_size;
+ return 0;
+ }
+
+ return -EINVAL;
+#else
+ return -ENOTSUP;
+#endif
+}
+#if USE_LUKS2_REENCRYPTION
+static size_t reencrypt_get_alignment(struct crypt_device *cd,
+ struct luks2_hdr *hdr)
+{
+ size_t ss, alignment = device_block_size(cd, crypt_data_device(cd));
+
+ ss = reencrypt_get_sector_size_old(hdr);
+ if (ss > alignment)
+ alignment = ss;
+ ss = reencrypt_get_sector_size_new(hdr);
+ if (ss > alignment)
+ alignment = ss;
+
+ return alignment;
+}
+
+/* returns void because it must not fail on valid LUKS2 header */
+static void _load_backup_segments(struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh)
+{
+ int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
+
+ if (segment >= 0) {
+ rh->jobj_segment_new = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
+ rh->digest_new = LUKS2_digest_by_segment(hdr, segment);
+ } else {
+ rh->jobj_segment_new = NULL;
+ rh->digest_new = -ENOENT;
+ }
+
+ segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
+ if (segment >= 0) {
+ rh->jobj_segment_old = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
+ rh->digest_old = LUKS2_digest_by_segment(hdr, segment);
+ } else {
+ rh->jobj_segment_old = NULL;
+ rh->digest_old = -ENOENT;
+ }
+
+ segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
+ if (segment >= 0)
+ rh->jobj_segment_moved = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
+ else
+ rh->jobj_segment_moved = NULL;
+}
+
+static int reencrypt_offset_backward_moved(struct luks2_hdr *hdr, json_object *jobj_segments,
+ uint64_t *reencrypt_length, uint64_t data_shift, uint64_t *offset)
+{
+ uint64_t tmp, linear_length = 0;
+ int sg, segs = json_segments_count(jobj_segments);
+
+ /* find reencrypt offset with data shift */
+ for (sg = 0; sg < segs; sg++)
+ if (LUKS2_segment_is_type(hdr, sg, "linear"))
+ linear_length += LUKS2_segment_size(hdr, sg, 0);
+
+ /* all active linear segments length */
+ if (linear_length && segs > 1) {
+ if (linear_length < data_shift)
+ return -EINVAL;
+ tmp = linear_length - data_shift;
+ if (tmp && tmp < data_shift) {
+ *offset = data_shift;
+ *reencrypt_length = tmp;
+ } else
+ *offset = tmp;
+ return 0;
+ }
+
+ if (segs == 1) {
+ *offset = 0;
+ return 0;
+ }
+
+ /* should be unreachable */
+
+ return -EINVAL;
+}
+
+static int reencrypt_offset_forward_moved(struct luks2_hdr *hdr,
+ json_object *jobj_segments,
+ uint64_t data_shift,
+ uint64_t *offset)
+{
+ int last_crypt = LUKS2_last_segment_by_type(hdr, "crypt");
+
+ /* if last crypt segment exists and it's first one, just return offset = 0 */
+ if (last_crypt <= 0) {
+ *offset = 0;
+ return 0;
+ }
+
+ *offset = LUKS2_segment_offset(hdr, last_crypt, 0) - data_shift;
+ return 0;
+}
+
+static int _offset_forward(json_object *jobj_segments, uint64_t *offset)
+{
+ int segs = json_segments_count(jobj_segments);
+
+ if (segs == 1)
+ *offset = 0;
+ else if (segs == 2) {
+ *offset = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
+ if (!*offset)
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int _offset_backward(json_object *jobj_segments, uint64_t device_size, uint64_t *length, uint64_t *offset)
+{
+ int segs = json_segments_count(jobj_segments);
+ uint64_t tmp;
+
+ if (segs == 1) {
+ if (device_size < *length)
+ *length = device_size;
+ *offset = device_size - *length;
+ } else if (segs == 2) {
+ tmp = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
+ if (tmp < *length)
+ *length = tmp;
+ *offset = tmp - *length;
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+/* must be always relative to data offset */
+/* the LUKS2 header MUST be valid */
+static int reencrypt_offset(struct luks2_hdr *hdr,
+ crypt_reencrypt_direction_info di,
+ uint64_t device_size,
+ uint64_t *reencrypt_length,
+ uint64_t *offset)
+{
+ int r, sg;
+ json_object *jobj_segments;
+ uint64_t data_shift = reencrypt_data_shift(hdr);
+
+ if (!offset)
+ return -EINVAL;
+
+ /* if there's segment in reencryption return directly offset of it */
+ json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments);
+ sg = json_segments_segment_in_reencrypt(jobj_segments);
+ if (sg >= 0) {
+ *offset = LUKS2_segment_offset(hdr, sg, 0) - (reencrypt_get_data_offset_new(hdr));
+ return 0;
+ }
+
+ if (di == CRYPT_REENCRYPT_FORWARD) {
+ if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT &&
+ LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0) {
+ r = reencrypt_offset_forward_moved(hdr, jobj_segments, data_shift, offset);
+ if (!r && *offset > device_size)
+ *offset = device_size;
+ return r;
+ }
+ return _offset_forward(jobj_segments, offset);
+ } else if (di == CRYPT_REENCRYPT_BACKWARD) {
+ if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_ENCRYPT &&
+ LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
+ return reencrypt_offset_backward_moved(hdr, jobj_segments, reencrypt_length, data_shift, offset);
+ return _offset_backward(jobj_segments, device_size, reencrypt_length, offset);
+ }
+
+ return -EINVAL;
+}
+
+static uint64_t reencrypt_length(struct crypt_device *cd,
+ struct reenc_protection *rp,
+ uint64_t keyslot_area_length,
+ uint64_t length_max,
+ size_t alignment)
+{
+ unsigned long dummy, optimal_alignment;
+ uint64_t length, soft_mem_limit;
+
+ if (rp->type == REENC_PROTECTION_NONE)
+ length = length_max ?: LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
+ else if (rp->type == REENC_PROTECTION_CHECKSUM)
+ length = (keyslot_area_length / rp->p.csum.hash_size) * rp->p.csum.block_size;
+ else if (rp->type == REENC_PROTECTION_DATASHIFT)
+ return rp->p.ds.data_shift;
+ else
+ length = keyslot_area_length;
+
+ /* hard limit */
+ if (length > LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH)
+ length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
+
+ /* soft limit is 1/4 of system memory */
+ soft_mem_limit = crypt_getphysmemory_kb() << 8; /* multiply by (1024/4) */
+
+ if (soft_mem_limit && length > soft_mem_limit)
+ length = soft_mem_limit;
+
+ if (length_max && length > length_max)
+ length = length_max;
+
+ length -= (length % alignment);
+
+ /* Emits error later */
+ if (!length)
+ return length;
+
+ device_topology_alignment(cd, crypt_data_device(cd), &optimal_alignment, &dummy, length);
+
+ /* we have to stick with encryption sector size alignment */
+ if (optimal_alignment % alignment)
+ return length;
+
+ /* align to opt-io size only if remaining size allows it */
+ if (length > optimal_alignment)
+ length -= (length % optimal_alignment);
+
+ return length;
+}
+
+static int reencrypt_context_init(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t device_size,
+ uint64_t max_hotzone_size,
+ uint64_t fixed_device_size)
+{
+ int r;
+ size_t alignment;
+ uint64_t dummy, area_length;
+
+ rh->reenc_keyslot = LUKS2_find_keyslot(hdr, "reencrypt");
+ if (rh->reenc_keyslot < 0)
+ return -EINVAL;
+ if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &dummy, &area_length) < 0)
+ return -EINVAL;
+
+ rh->mode = reencrypt_mode(hdr);
+
+ rh->direction = reencrypt_direction(hdr);
+
+ r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp, true);
+ if (r < 0)
+ return r;
+
+ if (rh->rp.type == REENC_PROTECTION_CHECKSUM)
+ alignment = rh->rp.p.csum.block_size;
+ else
+ alignment = reencrypt_get_alignment(cd, hdr);
+
+ if (!alignment)
+ return -EINVAL;
+
+ if ((max_hotzone_size << SECTOR_SHIFT) % alignment) {
+ log_err(cd, _("Hotzone size must be multiple of calculated zone alignment (%zu bytes)."), alignment);
+ return -EINVAL;
+ }
+
+ if ((fixed_device_size << SECTOR_SHIFT) % alignment) {
+ log_err(cd, _("Device size must be multiple of calculated zone alignment (%zu bytes)."), alignment);
+ return -EINVAL;
+ }
+
+ if (fixed_device_size) {
+ log_dbg(cd, "Switching reencryption to fixed size mode.");
+ device_size = fixed_device_size << SECTOR_SHIFT;
+ rh->fixed_length = true;
+ } else
+ rh->fixed_length = false;
+
+ rh->length = reencrypt_length(cd, &rh->rp, area_length, max_hotzone_size << SECTOR_SHIFT, alignment);
+ if (!rh->length) {
+ log_dbg(cd, "Invalid reencryption length.");
+ return -EINVAL;
+ }
+
+ if (reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) {
+ log_dbg(cd, "Failed to get reencryption offset.");
+ return -EINVAL;
+ }
+
+ if (rh->offset > device_size)
+ return -EINVAL;
+ if (rh->length > device_size - rh->offset)
+ rh->length = device_size - rh->offset;
+
+ _load_backup_segments(hdr, rh);
+
+ r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp_moved_segment, false);
+ if (r < 0)
+ return r;
+
+ if (rh->rp_moved_segment.type == REENC_PROTECTION_NOT_SET)
+ log_dbg(cd, "No moved segment resilience configured.");
+
+ if (rh->direction == CRYPT_REENCRYPT_BACKWARD)
+ rh->progress = device_size - rh->offset - rh->length;
+ else if (rh->jobj_segment_moved && rh->direction == CRYPT_REENCRYPT_FORWARD) {
+ if (rh->offset == json_segment_get_offset(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false))
+ rh->progress = device_size - json_segment_get_size(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false);
+ else
+ rh->progress = rh->offset - json_segment_get_size(rh->jobj_segment_moved, 0);
+ } else
+ rh->progress = rh->offset;
+
+ log_dbg(cd, "reencrypt-direction: %s", rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward");
+ log_dbg(cd, "backup-previous digest id: %d", rh->digest_old);
+ log_dbg(cd, "backup-final digest id: %d", rh->digest_new);
+ log_dbg(cd, "reencrypt length: %" PRIu64, rh->length);
+ log_dbg(cd, "reencrypt offset: %" PRIu64, rh->offset);
+ log_dbg(cd, "reencrypt shift: %s%" PRIu64,
+ (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_BACKWARD ? "-" : ""),
+ data_shift_value(&rh->rp));
+ log_dbg(cd, "reencrypt alignment: %zu", alignment);
+ log_dbg(cd, "reencrypt progress: %" PRIu64, rh->progress);
+
+ rh->device_size = device_size;
+
+ return rh->length < 512 ? -EINVAL : 0;
+}
+
+static size_t reencrypt_buffer_length(struct luks2_reencrypt *rh)
+{
+ if (rh->rp.type == REENC_PROTECTION_DATASHIFT)
+ return data_shift_value(&rh->rp);
+ return rh->length;
+}
+
+static int reencrypt_load_clean(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint64_t device_size,
+ uint64_t max_hotzone_size,
+ uint64_t fixed_device_size,
+ struct luks2_reencrypt **rh)
+{
+ int r;
+ struct luks2_reencrypt *tmp = crypt_zalloc(sizeof (*tmp));
+
+ if (!tmp)
+ return -ENOMEM;
+
+ log_dbg(cd, "Loading stored reencryption context.");
+
+ r = reencrypt_context_init(cd, hdr, tmp, device_size, max_hotzone_size, fixed_device_size);
+ if (r)
+ goto err;
+
+ if (posix_memalign(&tmp->reenc_buffer, device_alignment(crypt_data_device(cd)),
+ reencrypt_buffer_length(tmp))) {
+ r = -ENOMEM;
+ goto err;
+ }
+
+ *rh = tmp;
+
+ return 0;
+err:
+ LUKS2_reencrypt_free(cd, tmp);
+
+ return r;
+}
+
+static int reencrypt_make_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t device_size)
+{
+ int r;
+ uint64_t data_offset = reencrypt_get_data_offset_new(hdr);
+
+ log_dbg(cd, "Calculating segments.");
+
+ r = reencrypt_make_hot_segments(cd, hdr, rh, device_size, data_offset);
+ if (!r) {
+ r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
+ if (r)
+ json_object_put(rh->jobj_segs_hot);
+ }
+
+ if (r)
+ log_dbg(cd, "Failed to make reencryption segments.");
+
+ return r;
+}
+
+static int reencrypt_make_segments_crashed(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh)
+{
+ int r;
+ uint64_t data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
+
+ if (!rh)
+ return -EINVAL;
+
+ rh->jobj_segs_hot = json_object_new_object();
+ if (!rh->jobj_segs_hot)
+ return -ENOMEM;
+
+ json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, val) {
+ if (json_segment_is_backup(val))
+ continue;
+ json_object_object_add(rh->jobj_segs_hot, key, json_object_get(val));
+ }
+
+ r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
+ if (r) {
+ json_object_put(rh->jobj_segs_hot);
+ rh->jobj_segs_hot = NULL;
+ }
+
+ return r;
+}
+
+static int reencrypt_load_crashed(struct crypt_device *cd,
+ struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reencrypt **rh)
+{
+ bool dynamic;
+ uint64_t required_device_size;
+ int r, reenc_seg;
+
+ if (LUKS2_get_data_size(hdr, &required_device_size, &dynamic))
+ return -EINVAL;
+
+ if (dynamic)
+ required_device_size = 0;
+ else
+ required_device_size >>= SECTOR_SHIFT;
+
+ r = reencrypt_load_clean(cd, hdr, device_size, 0, required_device_size, rh);
+
+ if (!r) {
+ reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
+ if (reenc_seg < 0)
+ r = -EINVAL;
+ else
+ (*rh)->length = LUKS2_segment_size(hdr, reenc_seg, 0);
+ }
+
+ if (!r)
+ r = reencrypt_make_segments_crashed(cd, hdr, *rh);
+
+ if (r) {
+ LUKS2_reencrypt_free(cd, *rh);
+ *rh = NULL;
+ }
+ return r;
+}
+
+static int reencrypt_init_storage_wrappers(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ struct volume_key *vks)
+{
+ int r;
+ struct volume_key *vk;
+ uint32_t wrapper_flags = (getuid() || geteuid()) ? 0 : DISABLE_KCAPI;
+
+ vk = crypt_volume_key_by_id(vks, rh->digest_old);
+ r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
+ reencrypt_get_data_offset_old(hdr),
+ crypt_get_iv_offset(cd),
+ reencrypt_get_sector_size_old(hdr),
+ reencrypt_segment_cipher_old(hdr),
+ vk, wrapper_flags | OPEN_READONLY);
+ if (r) {
+ log_err(cd, _("Failed to initialize old segment storage wrapper."));
+ return r;
+ }
+ rh->wflags1 = wrapper_flags | OPEN_READONLY;
+ log_dbg(cd, "Old cipher storage wrapper type: %d.", crypt_storage_wrapper_get_type(rh->cw1));
+
+ vk = crypt_volume_key_by_id(vks, rh->digest_new);
+ r = crypt_storage_wrapper_init(cd, &rh->cw2, crypt_data_device(cd),
+ reencrypt_get_data_offset_new(hdr),
+ crypt_get_iv_offset(cd),
+ reencrypt_get_sector_size_new(hdr),
+ reencrypt_segment_cipher_new(hdr),
+ vk, wrapper_flags);
+ if (r) {
+ log_err(cd, _("Failed to initialize new segment storage wrapper."));
+ return r;
+ }
+ rh->wflags2 = wrapper_flags;
+ log_dbg(cd, "New cipher storage wrapper type: %d", crypt_storage_wrapper_get_type(rh->cw2));
+
+ return 0;
+}
+
+static int reencrypt_context_set_names(struct luks2_reencrypt *rh, const char *name)
+{
+ if (!rh | !name)
+ return -EINVAL;
+
+ if (*name == '/') {
+ if (!(rh->device_name = dm_device_name(name)))
+ return -EINVAL;
+ } else if (!(rh->device_name = strdup(name)))
+ return -ENOMEM;
+
+ if (asprintf(&rh->hotzone_name, "%s-hotzone-%s", rh->device_name,
+ rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward") < 0) {
+ rh->hotzone_name = NULL;
+ return -ENOMEM;
+ }
+ if (asprintf(&rh->overlay_name, "%s-overlay", rh->device_name) < 0) {
+ rh->overlay_name = NULL;
+ return -ENOMEM;
+ }
+
+ rh->online = true;
+ return 0;
+}
+
+static int modify_offset(uint64_t *offset, uint64_t data_shift, crypt_reencrypt_direction_info di)
+{
+ int r = -EINVAL;
+
+ if (!offset)
+ return r;
+
+ if (di == CRYPT_REENCRYPT_FORWARD) {
+ if (*offset >= data_shift) {
+ *offset -= data_shift;
+ r = 0;
+ }
+ } else if (di == CRYPT_REENCRYPT_BACKWARD) {
+ *offset += data_shift;
+ r = 0;
+ }
+
+ return r;
+}
+
+static int reencrypt_update_flag(struct crypt_device *cd, uint8_t version,
+ bool enable, bool commit)
+{
+ uint32_t reqs;
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+
+ if (enable) {
+ log_dbg(cd, "Going to store reencryption requirement flag (version: %u).", version);
+ return LUKS2_config_set_requirement_version(cd, hdr, CRYPT_REQUIREMENT_ONLINE_REENCRYPT, version, commit);
+ }
+
+ if (LUKS2_config_get_requirements(cd, hdr, &reqs))
+ return -EINVAL;
+
+ reqs &= ~CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
+
+ log_dbg(cd, "Going to wipe reencryption requirement flag.");
+
+ return LUKS2_config_set_requirements(cd, hdr, reqs, commit);
+}
+
+static int reencrypt_hotzone_protect_ready(struct crypt_device *cd,
+ struct reenc_protection *rp)
+{
+ assert(rp);
+
+ if (rp->type == REENC_PROTECTION_NOT_SET)
+ return -EINVAL;
+
+ if (rp->type != REENC_PROTECTION_CHECKSUM)
+ return 0;
+
+ if (!rp->p.csum.checksums) {
+ log_dbg(cd, "Allocating buffer for storing resilience checksums.");
+ if (posix_memalign(&rp->p.csum.checksums, device_alignment(crypt_metadata_device(cd)),
+ rp->p.csum.checksums_len))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int reencrypt_recover_segment(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ struct volume_key *vks)
+{
+ struct volume_key *vk_old, *vk_new;
+ size_t count, s;
+ ssize_t read, w;
+ struct reenc_protection *rp;
+ int devfd, r, new_sector_size, old_sector_size, rseg;
+ uint64_t area_offset, area_length, area_length_read, crash_iv_offset,
+ data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
+ char *checksum_tmp = NULL, *data_buffer = NULL;
+ struct crypt_storage_wrapper *cw1 = NULL, *cw2 = NULL;
+
+ assert(hdr);
+ assert(rh);
+ assert(vks);
+
+ rseg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
+ if (rh->offset == 0 && rh->rp_moved_segment.type > REENC_PROTECTION_NOT_SET) {
+ log_dbg(cd, "Recovery using moved segment protection.");
+ rp = &rh->rp_moved_segment;
+ } else
+ rp = &rh->rp;
+
+ if (rseg < 0 || rh->length < 512)
+ return -EINVAL;
+
+ r = reencrypt_hotzone_protect_ready(cd, rp);
+ if (r) {
+ log_err(cd, _("Failed to initialize hotzone protection."));
+ return -EINVAL;
+ }
+
+ vk_new = crypt_volume_key_by_id(vks, rh->digest_new);
+ if (!vk_new && rh->mode != CRYPT_REENCRYPT_DECRYPT)
+ return -EINVAL;
+ vk_old = crypt_volume_key_by_id(vks, rh->digest_old);
+ if (!vk_old && rh->mode != CRYPT_REENCRYPT_ENCRYPT)
+ return -EINVAL;
+ old_sector_size = json_segment_get_sector_size(reencrypt_segment_old(hdr));
+ new_sector_size = json_segment_get_sector_size(reencrypt_segment_new(hdr));
+ if (rh->mode == CRYPT_REENCRYPT_DECRYPT)
+ crash_iv_offset = rh->offset >> SECTOR_SHIFT; /* TODO: + old iv_tweak */
+ else
+ crash_iv_offset = json_segment_get_iv_offset(json_segments_get_segment(rh->jobj_segs_hot, rseg));
+
+ log_dbg(cd, "crash_offset: %" PRIu64 ", crash_length: %" PRIu64 ", crash_iv_offset: %" PRIu64,
+ data_offset + rh->offset, rh->length, crash_iv_offset);
+
+ r = crypt_storage_wrapper_init(cd, &cw2, crypt_data_device(cd),
+ data_offset + rh->offset, crash_iv_offset, new_sector_size,
+ reencrypt_segment_cipher_new(hdr), vk_new, 0);
+ if (r) {
+ log_err(cd, _("Failed to initialize new segment storage wrapper."));
+ return r;
+ }
+
+ if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &area_offset, &area_length)) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (posix_memalign((void**)&data_buffer, device_alignment(crypt_data_device(cd)), rh->length)) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ switch (rp->type) {
+ case REENC_PROTECTION_CHECKSUM:
+ log_dbg(cd, "Checksums based recovery.");
+
+ r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
+ data_offset + rh->offset, crash_iv_offset, old_sector_size,
+ reencrypt_segment_cipher_old(hdr), vk_old, 0);
+ if (r) {
+ log_err(cd, _("Failed to initialize old segment storage wrapper."));
+ goto out;
+ }
+
+ count = rh->length / rp->p.csum.block_size;
+ area_length_read = count * rp->p.csum.hash_size;
+ if (area_length_read > area_length) {
+ log_dbg(cd, "Internal error in calculated area_length.");
+ r = -EINVAL;
+ goto out;
+ }
+
+ checksum_tmp = malloc(rp->p.csum.hash_size);
+ if (!checksum_tmp) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ /* TODO: lock for read */
+ devfd = device_open(cd, crypt_metadata_device(cd), O_RDONLY);
+ if (devfd < 0)
+ goto out;
+
+ /* read old data checksums */
+ read = read_lseek_blockwise(devfd, device_block_size(cd, crypt_metadata_device(cd)),
+ device_alignment(crypt_metadata_device(cd)), rp->p.csum.checksums, area_length_read, area_offset);
+ if (read < 0 || (size_t)read != area_length_read) {
+ log_err(cd, _("Failed to read checksums for current hotzone."));
+ r = -EINVAL;
+ goto out;
+ }
+
+ read = crypt_storage_wrapper_read(cw2, 0, data_buffer, rh->length);
+ if (read < 0 || (size_t)read != rh->length) {
+ log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset + data_offset);
+ r = -EINVAL;
+ goto out;
+ }
+
+ for (s = 0; s < count; s++) {
+ if (crypt_hash_write(rp->p.csum.ch, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) {
+ log_dbg(cd, "Failed to write hash.");
+ r = EINVAL;
+ goto out;
+ }
+ if (crypt_hash_final(rp->p.csum.ch, checksum_tmp, rp->p.csum.hash_size)) {
+ log_dbg(cd, "Failed to finalize hash.");
+ r = EINVAL;
+ goto out;
+ }
+ if (!memcmp(checksum_tmp, (char *)rp->p.csum.checksums + (s * rp->p.csum.hash_size), rp->p.csum.hash_size)) {
+ log_dbg(cd, "Sector %zu (size %zu, offset %zu) needs recovery", s, rp->p.csum.block_size, s * rp->p.csum.block_size);
+ if (crypt_storage_wrapper_decrypt(cw1, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) {
+ log_err(cd, _("Failed to decrypt sector %zu."), s);
+ r = -EINVAL;
+ goto out;
+ }
+ w = crypt_storage_wrapper_encrypt_write(cw2, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size);
+ if (w < 0 || (size_t)w != rp->p.csum.block_size) {
+ log_err(cd, _("Failed to recover sector %zu."), s);
+ r = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ r = 0;
+ break;
+ case REENC_PROTECTION_JOURNAL:
+ log_dbg(cd, "Journal based recovery.");
+
+ /* FIXME: validation candidate */
+ if (rh->length > area_length) {
+ r = -EINVAL;
+ log_dbg(cd, "Invalid journal size.");
+ goto out;
+ }
+
+ /* TODO locking */
+ r = crypt_storage_wrapper_init(cd, &cw1, crypt_metadata_device(cd),
+ area_offset, crash_iv_offset, old_sector_size,
+ reencrypt_segment_cipher_old(hdr), vk_old, 0);
+ if (r) {
+ log_err(cd, _("Failed to initialize old segment storage wrapper."));
+ goto out;
+ }
+ read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
+ if (read < 0 || (size_t)read != rh->length) {
+ log_dbg(cd, "Failed to read journaled data.");
+ r = -EIO;
+ /* may content plaintext */
+ crypt_safe_memzero(data_buffer, rh->length);
+ goto out;
+ }
+ read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
+ /* may content plaintext */
+ crypt_safe_memzero(data_buffer, rh->length);
+ if (read < 0 || (size_t)read != rh->length) {
+ log_dbg(cd, "recovery write failed.");
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = 0;
+ break;
+ case REENC_PROTECTION_DATASHIFT:
+ log_dbg(cd, "Data shift based recovery.");
+
+ if (rseg == 0) {
+ r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
+ json_segment_get_offset(rh->jobj_segment_moved, 0), 0,
+ reencrypt_get_sector_size_old(hdr),
+ reencrypt_segment_cipher_old(hdr), vk_old, 0);
+ } else {
+ if (rh->direction == CRYPT_REENCRYPT_FORWARD)
+ data_offset = data_offset + rh->offset + data_shift_value(rp);
+ else
+ data_offset = data_offset + rh->offset - data_shift_value(rp);
+ r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
+ data_offset,
+ crash_iv_offset,
+ reencrypt_get_sector_size_old(hdr),
+ reencrypt_segment_cipher_old(hdr), vk_old, 0);
+ }
+ if (r) {
+ log_err(cd, _("Failed to initialize old segment storage wrapper."));
+ goto out;
+ }
+
+ read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
+ if (read < 0 || (size_t)read != rh->length) {
+ log_dbg(cd, "Failed to read data.");
+ r = -EIO;
+ /* may content plaintext */
+ crypt_safe_memzero(data_buffer, rh->length);
+ goto out;
+ }
+
+ read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
+ /* may content plaintext */
+ crypt_safe_memzero(data_buffer, rh->length);
+ if (read < 0 || (size_t)read != rh->length) {
+ log_dbg(cd, "recovery write failed.");
+ r = -EINVAL;
+ goto out;
+ }
+ r = 0;
+ break;
+ default:
+ r = -EINVAL;
+ }
+
+ if (!r)
+ rh->read = rh->length;
+out:
+ free(data_buffer);
+ free(checksum_tmp);
+ crypt_storage_wrapper_destroy(cw1);
+ crypt_storage_wrapper_destroy(cw2);
+
+ return r;
+}
+
+static int reencrypt_add_moved_segment(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
+{
+ int digest = rh->digest_old, s = LUKS2_segment_first_unused_id(hdr);
+
+ if (!rh->jobj_segment_moved)
+ return 0;
+
+ if (s < 0)
+ return s;
+
+ if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(rh->jobj_segment_moved))) {
+ json_object_put(rh->jobj_segment_moved);
+ return -EINVAL;
+ }
+
+ if (!strcmp(json_segment_type(rh->jobj_segment_moved), "crypt"))
+ return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
+
+ return 0;
+}
+
+static int reencrypt_add_backup_segment(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ unsigned final)
+{
+ int digest, s = LUKS2_segment_first_unused_id(hdr);
+ json_object *jobj;
+
+ if (s < 0)
+ return s;
+
+ digest = final ? rh->digest_new : rh->digest_old;
+ jobj = final ? rh->jobj_segment_new : rh->jobj_segment_old;
+
+ if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(jobj))) {
+ json_object_put(jobj);
+ return -EINVAL;
+ }
+
+ if (strcmp(json_segment_type(jobj), "crypt"))
+ return 0;
+
+ return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
+}
+
+static int reencrypt_assign_segments_simple(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ unsigned hot,
+ unsigned commit)
+{
+ int r, sg;
+
+ if (hot && json_segments_count(rh->jobj_segs_hot) > 0) {
+ log_dbg(cd, "Setting 'hot' segments.");
+
+ r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
+ if (!r)
+ rh->jobj_segs_hot = NULL;
+ } else if (!hot && json_segments_count(rh->jobj_segs_post) > 0) {
+ log_dbg(cd, "Setting 'post' segments.");
+ r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
+ if (!r)
+ rh->jobj_segs_post = NULL;
+ } else {
+ log_dbg(cd, "No segments to set.");
+ return -EINVAL;
+ }
+
+ if (r) {
+ log_dbg(cd, "Failed to assign new enc segments.");
+ return r;
+ }
+
+ r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
+ if (r) {
+ log_dbg(cd, "Failed to assign reencryption previous backup segment.");
+ return r;
+ }
+
+ r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
+ if (r) {
+ log_dbg(cd, "Failed to assign reencryption final backup segment.");
+ return r;
+ }
+
+ r = reencrypt_add_moved_segment(cd, hdr, rh);
+ if (r) {
+ log_dbg(cd, "Failed to assign reencryption moved backup segment.");
+ return r;
+ }
+
+ for (sg = 0; sg < LUKS2_segments_count(hdr); sg++) {
+ if (LUKS2_segment_is_type(hdr, sg, "crypt") &&
+ LUKS2_digest_segment_assign(cd, hdr, sg, rh->mode == CRYPT_REENCRYPT_ENCRYPT ? rh->digest_new : rh->digest_old, 1, 0)) {
+ log_dbg(cd, "Failed to assign digest %u to segment %u.", rh->digest_new, sg);
+ return -EINVAL;
+ }
+ }
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+static int reencrypt_assign_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ unsigned hot,
+ unsigned commit)
+{
+ bool forward;
+ int rseg, scount, r = -EINVAL;
+
+ /* FIXME: validate in reencrypt context load */
+ if (rh->digest_new < 0 && rh->mode != CRYPT_REENCRYPT_DECRYPT)
+ return -EINVAL;
+
+ if (LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0))
+ return -EINVAL;
+
+ if (rh->mode == CRYPT_REENCRYPT_ENCRYPT || rh->mode == CRYPT_REENCRYPT_DECRYPT)
+ return reencrypt_assign_segments_simple(cd, hdr, rh, hot, commit);
+
+ if (hot && rh->jobj_segs_hot) {
+ log_dbg(cd, "Setting 'hot' segments.");
+
+ r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
+ if (!r)
+ rh->jobj_segs_hot = NULL;
+ } else if (!hot && rh->jobj_segs_post) {
+ log_dbg(cd, "Setting 'post' segments.");
+ r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
+ if (!r)
+ rh->jobj_segs_post = NULL;
+ }
+
+ if (r)
+ return r;
+
+ scount = LUKS2_segments_count(hdr);
+
+ /* segment in reencryption has to hold reference on both digests */
+ rseg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
+ if (rseg < 0 && hot)
+ return -EINVAL;
+
+ if (rseg >= 0) {
+ LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_new, 1, 0);
+ LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_old, 1, 0);
+ }
+
+ forward = (rh->direction == CRYPT_REENCRYPT_FORWARD);
+ if (hot) {
+ if (rseg > 0)
+ LUKS2_digest_segment_assign(cd, hdr, 0, forward ? rh->digest_new : rh->digest_old, 1, 0);
+ if (scount > rseg + 1)
+ LUKS2_digest_segment_assign(cd, hdr, rseg + 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
+ } else {
+ LUKS2_digest_segment_assign(cd, hdr, 0, forward || scount == 1 ? rh->digest_new : rh->digest_old, 1, 0);
+ if (scount > 1)
+ LUKS2_digest_segment_assign(cd, hdr, 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
+ }
+
+ r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
+ if (r) {
+ log_dbg(cd, "Failed to assign hot reencryption backup segment.");
+ return r;
+ }
+ r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
+ if (r) {
+ log_dbg(cd, "Failed to assign post reencryption backup segment.");
+ return r;
+ }
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+static int reencrypt_set_encrypt_segments(struct crypt_device *cd, struct luks2_hdr *hdr,
+ uint64_t dev_size, uint64_t data_shift, bool move_first_segment,
+ crypt_reencrypt_direction_info di)
+{
+ int r;
+ uint64_t first_segment_offset, first_segment_length,
+ second_segment_offset, second_segment_length,
+ data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT,
+ data_size = dev_size - data_shift;
+ json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
+
+ if (dev_size < data_shift)
+ return -EINVAL;
+
+ if (data_shift && (di == CRYPT_REENCRYPT_FORWARD))
+ return -ENOTSUP;
+
+ if (move_first_segment) {
+ /*
+ * future data_device layout:
+ * [future LUKS2 header (data shift size)][second data segment][gap (data shift size)][first data segment (data shift size)]
+ */
+ first_segment_offset = dev_size;
+ if (data_size < data_shift) {
+ first_segment_length = data_size;
+ second_segment_length = second_segment_offset = 0;
+ } else {
+ first_segment_length = data_shift;
+ second_segment_offset = data_shift;
+ second_segment_length = data_size - data_shift;
+ }
+ } else if (data_shift) {
+ first_segment_offset = data_offset;
+ first_segment_length = dev_size;
+ } else {
+ /* future data_device layout with detached header: [first data segment] */
+ first_segment_offset = data_offset;
+ first_segment_length = 0; /* dynamic */
+ }
+
+ jobj_segments = json_object_new_object();
+ if (!jobj_segments)
+ return -ENOMEM;
+
+ r = -EINVAL;
+ if (move_first_segment) {
+ jobj_segment_first = json_segment_create_linear(first_segment_offset, &first_segment_length, 0);
+ if (second_segment_length &&
+ !(jobj_segment_second = json_segment_create_linear(second_segment_offset, &second_segment_length, 0))) {
+ log_dbg(cd, "Failed generate 2nd segment.");
+ return r;
+ }
+ } else
+ jobj_segment_first = json_segment_create_linear(first_segment_offset, first_segment_length ? &first_segment_length : NULL, 0);
+
+ if (!jobj_segment_first) {
+ log_dbg(cd, "Failed generate 1st segment.");
+ return r;
+ }
+
+ json_object_object_add(jobj_segments, "0", jobj_segment_first);
+ if (jobj_segment_second)
+ json_object_object_add(jobj_segments, "1", jobj_segment_second);
+
+ r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
+
+ return r ?: LUKS2_segments_set(cd, hdr, jobj_segments, 0);
+}
+
+static int reencrypt_set_decrypt_shift_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint64_t dev_size,
+ uint64_t moved_segment_length,
+ crypt_reencrypt_direction_info di)
+{
+ int r;
+ uint64_t first_segment_offset, first_segment_length,
+ second_segment_offset, second_segment_length,
+ data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
+ json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
+
+ if (di == CRYPT_REENCRYPT_BACKWARD)
+ return -ENOTSUP;
+
+ /*
+ * future data_device layout:
+ * [encrypted first segment (max data shift size)][gap (data shift size)][second encrypted data segment]
+ */
+ first_segment_offset = 0;
+ first_segment_length = moved_segment_length;
+ if (dev_size > moved_segment_length) {
+ second_segment_offset = data_offset + first_segment_length;
+ second_segment_length = 0;
+ }
+
+ jobj_segments = json_object_new_object();
+ if (!jobj_segments)
+ return -ENOMEM;
+
+ r = -EINVAL;
+ jobj_segment_first = json_segment_create_crypt(first_segment_offset,
+ crypt_get_iv_offset(cd), &first_segment_length,
+ crypt_get_cipher_spec(cd), crypt_get_sector_size(cd), 0);
+
+ if (!jobj_segment_first) {
+ log_dbg(cd, "Failed generate 1st segment.");
+ return r;
+ }
+
+ if (dev_size > moved_segment_length) {
+ jobj_segment_second = json_segment_create_crypt(second_segment_offset,
+ crypt_get_iv_offset(cd) + (first_segment_length >> SECTOR_SHIFT),
+ second_segment_length ? &second_segment_length : NULL,
+ crypt_get_cipher_spec(cd),
+ crypt_get_sector_size(cd), 0);
+ if (!jobj_segment_second) {
+ json_object_put(jobj_segment_first);
+ log_dbg(cd, "Failed generate 2nd segment.");
+ return r;
+ }
+ }
+
+ json_object_object_add(jobj_segments, "0", jobj_segment_first);
+ if (jobj_segment_second)
+ json_object_object_add(jobj_segments, "1", jobj_segment_second);
+
+ r = LUKS2_segments_set(cd, hdr, jobj_segments, 0);
+
+ return r ?: LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, 0, 1, 0);
+}
+
+static int reencrypt_make_targets(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct device *hz_device,
+ struct volume_key *vks,
+ struct dm_target *result,
+ uint64_t size)
+{
+ bool reenc_seg;
+ struct volume_key *vk;
+ uint64_t segment_size, segment_offset, segment_start = 0;
+ int r;
+ int s = 0;
+ json_object *jobj, *jobj_segments = LUKS2_get_segments_jobj(hdr);
+
+ while (result) {
+ jobj = json_segments_get_segment(jobj_segments, s);
+ if (!jobj) {
+ log_dbg(cd, "Internal error. Segment %u is null.", s);
+ return -EINVAL;
+ }
+
+ reenc_seg = (s == json_segments_segment_in_reencrypt(jobj_segments));
+
+ segment_offset = json_segment_get_offset(jobj, 1);
+ segment_size = json_segment_get_size(jobj, 1);
+ /* 'dynamic' length allowed in last segment only */
+ if (!segment_size && !result->next)
+ segment_size = (size >> SECTOR_SHIFT) - segment_start;
+ if (!segment_size) {
+ log_dbg(cd, "Internal error. Wrong segment size %u", s);
+ return -EINVAL;
+ }
+
+ if (reenc_seg)
+ segment_offset -= crypt_get_data_offset(cd);
+
+ if (!strcmp(json_segment_type(jobj), "crypt")) {
+ vk = crypt_volume_key_by_id(vks, reenc_seg ? LUKS2_reencrypt_digest_new(hdr) : LUKS2_digest_by_segment(hdr, s));
+ if (!vk) {
+ log_err(cd, _("Missing key for dm-crypt segment %u"), s);
+ return -EINVAL;
+ }
+
+ r = dm_crypt_target_set(result, segment_start, segment_size,
+ reenc_seg ? hz_device : crypt_data_device(cd),
+ vk,
+ json_segment_get_cipher(jobj),
+ json_segment_get_iv_offset(jobj),
+ segment_offset,
+ "none",
+ 0,
+ json_segment_get_sector_size(jobj));
+ if (r) {
+ log_err(cd, _("Failed to set dm-crypt segment."));
+ return r;
+ }
+ } else if (!strcmp(json_segment_type(jobj), "linear")) {
+ r = dm_linear_target_set(result, segment_start, segment_size, reenc_seg ? hz_device : crypt_data_device(cd), segment_offset);
+ if (r) {
+ log_err(cd, _("Failed to set dm-linear segment."));
+ return r;
+ }
+ } else
+ return EINVAL;
+
+ segment_start += segment_size;
+ s++;
+ result = result->next;
+ }
+
+ return s;
+}
+
+/* GLOBAL FIXME: audit function names and parameters names */
+
+/* FIXME:
+ * 1) audit log routines
+ * 2) can't we derive hotzone device name from crypt context? (unlocked name, device uuid, etc?)
+ */
+static int reencrypt_load_overlay_device(struct crypt_device *cd, struct luks2_hdr *hdr,
+ const char *overlay, const char *hotzone, struct volume_key *vks, uint64_t size,
+ uint32_t flags)
+{
+ char hz_path[PATH_MAX];
+ int r;
+
+ struct device *hz_dev = NULL;
+ struct crypt_dm_active_device dmd = {
+ .flags = flags,
+ };
+
+ log_dbg(cd, "Loading new table for overlay device %s.", overlay);
+
+ r = snprintf(hz_path, PATH_MAX, "%s/%s", dm_get_dir(), hotzone);
+ if (r < 0 || r >= PATH_MAX) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = device_alloc(cd, &hz_dev, hz_path);
+ if (r)
+ goto out;
+
+ r = dm_targets_allocate(&dmd.segment, LUKS2_segments_count(hdr));
+ if (r)
+ goto out;
+
+ r = reencrypt_make_targets(cd, hdr, hz_dev, vks, &dmd.segment, size);
+ if (r < 0)
+ goto out;
+
+ r = dm_reload_device(cd, overlay, &dmd, 0, 0);
+
+ /* what else on error here ? */
+out:
+ dm_targets_free(cd, &dmd);
+ device_free(cd, hz_dev);
+
+ return r;
+}
+
+static int reencrypt_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags)
+{
+ int r, exists = 1;
+ struct crypt_dm_active_device dmd_source, dmd_target = {};
+ uint32_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH;
+
+ log_dbg(cd, "Replacing table in device %s with table from device %s.", target, source);
+
+ /* check only whether target device exists */
+ r = dm_status_device(cd, target);
+ if (r < 0) {
+ if (r == -ENODEV)
+ exists = 0;
+ else
+ return r;
+ }
+
+ r = dm_query_device(cd, source, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
+ DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmd_source);
+
+ if (r < 0)
+ return r;
+
+ if (exists && ((r = dm_query_device(cd, target, 0, &dmd_target)) < 0))
+ goto out;
+
+ dmd_source.flags |= flags;
+ dmd_source.uuid = crypt_get_uuid(cd);
+
+ if (exists) {
+ if (dmd_target.size != dmd_source.size) {
+ log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."),
+ dmd_source.size, dmd_target.size);
+ r = -EINVAL;
+ goto out;
+ }
+ r = dm_reload_device(cd, target, &dmd_source, 0, 0);
+ if (!r) {
+ log_dbg(cd, "Resuming device %s", target);
+ r = dm_resume_device(cd, target, dmflags | act2dmflags(dmd_source.flags));
+ }
+ } else
+ r = dm_create_device(cd, target, CRYPT_SUBDEV, &dmd_source);
+out:
+ dm_targets_free(cd, &dmd_source);
+ dm_targets_free(cd, &dmd_target);
+
+ return r;
+}
+
+static int reencrypt_swap_backing_device(struct crypt_device *cd, const char *name,
+ const char *new_backend_name)
+{
+ int r;
+ struct device *overlay_dev = NULL;
+ char overlay_path[PATH_MAX] = { 0 };
+ struct crypt_dm_active_device dmd = {};
+
+ log_dbg(cd, "Redirecting %s mapping to new backing device: %s.", name, new_backend_name);
+
+ r = snprintf(overlay_path, PATH_MAX, "%s/%s", dm_get_dir(), new_backend_name);
+ if (r < 0 || r >= PATH_MAX) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = device_alloc(cd, &overlay_dev, overlay_path);
+ if (r)
+ goto out;
+
+ r = device_block_adjust(cd, overlay_dev, DEV_OK,
+ 0, &dmd.size, &dmd.flags);
+ if (r)
+ goto out;
+
+ r = dm_linear_target_set(&dmd.segment, 0, dmd.size, overlay_dev, 0);
+ if (r)
+ goto out;
+
+ r = dm_reload_device(cd, name, &dmd, 0, 0);
+ if (!r) {
+ log_dbg(cd, "Resuming device %s", name);
+ r = dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
+ }
+
+out:
+ dm_targets_free(cd, &dmd);
+ device_free(cd, overlay_dev);
+
+ return r;
+}
+
+static int reencrypt_activate_hotzone_device(struct crypt_device *cd, const char *name, uint64_t device_size, uint32_t flags)
+{
+ int r;
+ uint64_t new_offset = reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT;
+
+ struct crypt_dm_active_device dmd = {
+ .flags = flags,
+ .uuid = crypt_get_uuid(cd),
+ .size = device_size >> SECTOR_SHIFT
+ };
+
+ log_dbg(cd, "Activating hotzone device %s.", name);
+
+ r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
+ new_offset, &dmd.size, &dmd.flags);
+ if (r)
+ goto out;
+
+ r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), new_offset);
+ if (r)
+ goto out;
+
+ r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd);
+out:
+ dm_targets_free(cd, &dmd);
+
+ return r;
+}
+
+static int reencrypt_init_device_stack(struct crypt_device *cd,
+ const struct luks2_reencrypt *rh)
+{
+ int r;
+
+ /* Activate hotzone device 1:1 linear mapping to data_device */
+ r = reencrypt_activate_hotzone_device(cd, rh->hotzone_name, rh->device_size, CRYPT_ACTIVATE_PRIVATE);
+ if (r) {
+ log_err(cd, _("Failed to activate hotzone device %s."), rh->hotzone_name);
+ return r;
+ }
+
+ /*
+ * Activate overlay device with exactly same table as original 'name' mapping.
+ * Note that within this step the 'name' device may already include a table
+ * constructed from more than single dm-crypt segment. Therefore transfer
+ * mapping as is.
+ *
+ * If we're about to resume reencryption orig mapping has to be already validated for
+ * abrupt shutdown and rchunk_offset has to point on next chunk to reencrypt!
+ *
+ * TODO: in crypt_activate_by*
+ */
+ r = reencrypt_replace_device(cd, rh->overlay_name, rh->device_name, CRYPT_ACTIVATE_PRIVATE);
+ if (r) {
+ log_err(cd, _("Failed to activate overlay device %s with actual origin table."), rh->overlay_name);
+ goto err;
+ }
+
+ /* swap origin mapping to overlay device */
+ r = reencrypt_swap_backing_device(cd, rh->device_name, rh->overlay_name);
+ if (r) {
+ log_err(cd, _("Failed to load new mapping for device %s."), rh->device_name);
+ goto err;
+ }
+
+ /*
+ * Now the 'name' (unlocked luks) device is mapped via dm-linear to an overlay dev.
+ * The overlay device has a original live table of 'name' device in-before the swap.
+ */
+
+ return 0;
+err:
+ /* TODO: force error helper devices on error path */
+ dm_remove_device(cd, rh->overlay_name, 0);
+ dm_remove_device(cd, rh->hotzone_name, 0);
+
+ return r;
+}
+
+/* TODO:
+ * 1) audit error path. any error in this routine is fatal and should be unlikely.
+ * usually it would hint some collision with another userspace process touching
+ * dm devices directly.
+ */
+static int reenc_refresh_helper_devices(struct crypt_device *cd, const char *overlay, const char *hotzone)
+{
+ int r;
+
+ /*
+ * we have to explicitly suspend the overlay device before suspending
+ * the hotzone one. Resuming overlay device (aka switching tables) only
+ * after suspending the hotzone may lead to deadlock.
+ *
+ * In other words: always suspend the stack from top to bottom!
+ */
+ r = dm_suspend_device(cd, overlay, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
+ if (r) {
+ log_err(cd, _("Failed to suspend device %s."), overlay);
+ return r;
+ }
+
+ /* suspend HZ device */
+ r = dm_suspend_device(cd, hotzone, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
+ if (r) {
+ log_err(cd, _("Failed to suspend device %s."), hotzone);
+ return r;
+ }
+
+ /* resume overlay device: inactive table (with hotozne) -> live */
+ r = dm_resume_device(cd, overlay, DM_RESUME_PRIVATE);
+ if (r)
+ log_err(cd, _("Failed to resume device %s."), overlay);
+
+ return r;
+}
+
+static int reencrypt_refresh_overlay_devices(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const char *overlay,
+ const char *hotzone,
+ struct volume_key *vks,
+ uint64_t device_size,
+ uint32_t flags)
+{
+ int r = reencrypt_load_overlay_device(cd, hdr, overlay, hotzone, vks, device_size, flags);
+ if (r) {
+ log_err(cd, _("Failed to reload device %s."), overlay);
+ return REENC_ERR;
+ }
+
+ r = reenc_refresh_helper_devices(cd, overlay, hotzone);
+ if (r) {
+ log_err(cd, _("Failed to refresh reencryption devices stack."));
+ return REENC_ROLLBACK;
+ }
+
+ return REENC_OK;
+}
+
+static int reencrypt_move_data(struct crypt_device *cd,
+ int devfd,
+ uint64_t data_shift,
+ crypt_reencrypt_mode_info mode)
+{
+ void *buffer;
+ int r;
+ ssize_t ret;
+ uint64_t buffer_len, offset,
+ read_offset = (mode == CRYPT_REENCRYPT_ENCRYPT ? 0 : data_shift);
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+
+ offset = json_segment_get_offset(LUKS2_get_segment_jobj(hdr, 0), 0);
+ buffer_len = json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0);
+ if (!buffer_len || buffer_len > data_shift)
+ return -EINVAL;
+
+ if (posix_memalign(&buffer, device_alignment(crypt_data_device(cd)), buffer_len))
+ return -ENOMEM;
+
+ ret = read_lseek_blockwise(devfd,
+ device_block_size(cd, crypt_data_device(cd)),
+ device_alignment(crypt_data_device(cd)),
+ buffer, buffer_len, read_offset);
+ if (ret < 0 || (uint64_t)ret != buffer_len) {
+ log_dbg(cd, "Failed to read data at offset %" PRIu64 " (size: %zu)",
+ read_offset, buffer_len);
+ r = -EIO;
+ goto out;
+ }
+
+ log_dbg(cd, "Going to write %" PRIu64 " bytes read at offset %" PRIu64 " to new offset %" PRIu64,
+ buffer_len, read_offset, offset);
+ ret = write_lseek_blockwise(devfd,
+ device_block_size(cd, crypt_data_device(cd)),
+ device_alignment(crypt_data_device(cd)),
+ buffer, buffer_len, offset);
+ if (ret < 0 || (uint64_t)ret != buffer_len) {
+ log_dbg(cd, "Failed to write data at offset %" PRIu64 " (size: %zu)",
+ offset, buffer_len);
+ r = -EIO;
+ goto out;
+ }
+
+ r = 0;
+out:
+ crypt_safe_memzero(buffer, buffer_len);
+ free(buffer);
+ return r;
+}
+
+static int reencrypt_make_backup_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot_new,
+ const char *cipher,
+ uint64_t data_offset,
+ const struct crypt_params_reencrypt *params)
+{
+ int r, segment, moved_segment = -1, digest_old = -1, digest_new = -1;
+ json_object *jobj_tmp, *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL;
+ uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
+ uint64_t segment_offset, tmp, data_shift = params->data_shift << SECTOR_SHIFT,
+ device_size = params->device_size << SECTOR_SHIFT;
+
+ if (params->mode != CRYPT_REENCRYPT_DECRYPT) {
+ digest_new = LUKS2_digest_by_keyslot(hdr, keyslot_new);
+ if (digest_new < 0)
+ return -EINVAL;
+ }
+
+ if (params->mode != CRYPT_REENCRYPT_ENCRYPT) {
+ digest_old = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT);
+ if (digest_old < 0)
+ return -EINVAL;
+ }
+
+ segment = LUKS2_segment_first_unused_id(hdr);
+ if (segment < 0)
+ return -EINVAL;
+
+ if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) {
+ if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_segment_bcp)) {
+ r = -EINVAL;
+ goto err;
+ }
+ r = LUKS2_segment_set_flag(jobj_segment_bcp, "backup-moved-segment");
+ if (r)
+ goto err;
+ moved_segment = segment++;
+ json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), moved_segment, jobj_segment_bcp);
+ if (!strcmp(json_segment_type(jobj_segment_bcp), "crypt"))
+ LUKS2_digest_segment_assign(cd, hdr, moved_segment, digest_old, 1, 0);
+ }
+
+ /* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */
+ if (digest_old >= 0) {
+ if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) {
+ jobj_tmp = LUKS2_get_segment_jobj(hdr, 0);
+ if (!jobj_tmp) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ jobj_segment_old = json_segment_create_crypt(data_offset,
+ json_segment_get_iv_offset(jobj_tmp),
+ device_size ? &device_size : NULL,
+ json_segment_get_cipher(jobj_tmp),
+ json_segment_get_sector_size(jobj_tmp),
+ 0);
+ } else {
+ if (json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old)) {
+ r = -EINVAL;
+ goto err;
+ }
+ }
+ } else if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
+ r = LUKS2_get_data_size(hdr, &tmp, NULL);
+ if (r)
+ goto err;
+
+ if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT)
+ jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0);
+ else
+ jobj_segment_old = json_segment_create_linear(data_offset, tmp ? &tmp : NULL, 0);
+ }
+
+ if (!jobj_segment_old) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous");
+ if (r)
+ goto err;
+ json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_old);
+ jobj_segment_old = NULL;
+ if (digest_old >= 0)
+ LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0);
+ segment++;
+
+ if (digest_new >= 0) {
+ segment_offset = data_offset;
+ if (params->mode != CRYPT_REENCRYPT_ENCRYPT &&
+ modify_offset(&segment_offset, data_shift, params->direction)) {
+ r = -EINVAL;
+ goto err;
+ }
+ jobj_segment_new = json_segment_create_crypt(segment_offset,
+ crypt_get_iv_offset(cd),
+ NULL, cipher, sector_size, 0);
+ } else if (params->mode == CRYPT_REENCRYPT_DECRYPT) {
+ segment_offset = data_offset;
+ if (modify_offset(&segment_offset, data_shift, params->direction)) {
+ r = -EINVAL;
+ goto err;
+ }
+ jobj_segment_new = json_segment_create_linear(segment_offset, NULL, 0);
+ }
+
+ if (!jobj_segment_new) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final");
+ if (r)
+ goto err;
+ json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_new);
+ jobj_segment_new = NULL;
+ if (digest_new >= 0)
+ LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0);
+
+ /* FIXME: also check occupied space by keyslot in shrunk area */
+ if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift &&
+ crypt_metadata_device(cd) == crypt_data_device(cd) &&
+ LUKS2_set_keyslots_size(hdr, json_segment_get_offset(reencrypt_segment_new(hdr), 0))) {
+ log_err(cd, _("Failed to set new keyslots area size."));
+ r = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+err:
+ json_object_put(jobj_segment_new);
+ json_object_put(jobj_segment_old);
+ return r;
+}
+
+static int reencrypt_verify_single_key(struct crypt_device *cd, int digest, struct volume_key *vks)
+{
+ struct volume_key *vk;
+
+ vk = crypt_volume_key_by_id(vks, digest);
+ if (!vk)
+ return -ENOENT;
+
+ if (LUKS2_digest_verify_by_digest(cd, digest, vk) != digest)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int reencrypt_verify_keys(struct crypt_device *cd,
+ int digest_old,
+ int digest_new,
+ struct volume_key *vks)
+{
+ int r;
+
+ if (digest_new >= 0 && (r = reencrypt_verify_single_key(cd, digest_new, vks)))
+ return r;
+
+ if (digest_old >= 0 && (r = reencrypt_verify_single_key(cd, digest_old, vks)))
+ return r;
+
+ return 0;
+}
+
+static int reencrypt_upload_single_key(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int digest,
+ struct volume_key *vks)
+{
+ struct volume_key *vk;
+
+ vk = crypt_volume_key_by_id(vks, digest);
+ if (!vk)
+ return -EINVAL;
+
+ return LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, digest);
+}
+
+static int reencrypt_upload_keys(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int digest_old,
+ int digest_new,
+ struct volume_key *vks)
+{
+ int r;
+
+ if (!crypt_use_keyring_for_vk(cd))
+ return 0;
+
+ if (digest_new >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_new(hdr)) &&
+ (r = reencrypt_upload_single_key(cd, hdr, digest_new, vks)))
+ return r;
+
+ if (digest_old >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr)) &&
+ (r = reencrypt_upload_single_key(cd, hdr, digest_old, vks))) {
+ crypt_drop_keyring_key(cd, vks);
+ return r;
+ }
+
+ return 0;
+}
+
+static int reencrypt_verify_and_upload_keys(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int digest_old,
+ int digest_new,
+ struct volume_key *vks)
+{
+ int r;
+
+ r = reencrypt_verify_keys(cd, digest_old, digest_new, vks);
+ if (r)
+ return r;
+
+ r = reencrypt_upload_keys(cd, hdr, digest_old, digest_new, vks);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int reencrypt_verify_checksum_params(struct crypt_device *cd,
+ const struct crypt_params_reencrypt *params)
+{
+ size_t len;
+ struct crypt_hash *ch;
+
+ assert(params);
+
+ if (!params->hash)
+ return -EINVAL;
+
+ len = strlen(params->hash);
+ if (!len || len > (LUKS2_CHECKSUM_ALG_L - 1))
+ return -EINVAL;
+
+ if (crypt_hash_size(params->hash) <= 0)
+ return -EINVAL;
+
+ if (crypt_hash_init(&ch, params->hash)) {
+ log_err(cd, _("Hash algorithm %s is not available."), params->hash);
+ return -EINVAL;
+ }
+ /* We just check for alg availability */
+ crypt_hash_destroy(ch);
+
+ return 0;
+}
+
+static int reencrypt_verify_datashift_params(struct crypt_device *cd,
+ const struct crypt_params_reencrypt *params,
+ uint32_t sector_size)
+{
+ assert(params);
+
+ if (!params->data_shift)
+ return -EINVAL;
+ if (MISALIGNED(params->data_shift, sector_size >> SECTOR_SHIFT)) {
+ log_err(cd, _("Data shift value is not aligned to encryption sector size (%" PRIu32 " bytes)."),
+ sector_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int reencrypt_verify_resilience_params(struct crypt_device *cd,
+ const struct crypt_params_reencrypt *params,
+ uint32_t sector_size, bool move_first_segment)
+{
+ /* no change requested */
+ if (!params || !params->resilience)
+ return 0;
+
+ if (!strcmp(params->resilience, "journal"))
+ return (params->data_shift || move_first_segment) ? -EINVAL : 0;
+ else if (!strcmp(params->resilience, "none"))
+ return (params->data_shift || move_first_segment) ? -EINVAL : 0;
+ else if (!strcmp(params->resilience, "datashift"))
+ return reencrypt_verify_datashift_params(cd, params, sector_size);
+ else if (!strcmp(params->resilience, "checksum")) {
+ if (params->data_shift || move_first_segment)
+ return -EINVAL;
+ return reencrypt_verify_checksum_params(cd, params);
+ } else if (!strcmp(params->resilience, "datashift-checksum")) {
+ if (!move_first_segment ||
+ reencrypt_verify_datashift_params(cd, params, sector_size))
+ return -EINVAL;
+ return reencrypt_verify_checksum_params(cd, params);
+ } else if (!strcmp(params->resilience, "datashift-journal")) {
+ if (!move_first_segment)
+ return -EINVAL;
+ return reencrypt_verify_datashift_params(cd, params, sector_size);
+ }
+
+ log_err(cd, _("Unsupported resilience mode %s"), params->resilience);
+ return -EINVAL;
+}
+
+static int reencrypt_decrypt_with_datashift_init(struct crypt_device *cd,
+ const char *name,
+ struct luks2_hdr *hdr,
+ int reencrypt_keyslot,
+ uint32_t sector_size,
+ uint64_t data_size,
+ uint64_t data_offset,
+ const char *passphrase,
+ size_t passphrase_size,
+ int keyslot_old,
+ const struct crypt_params_reencrypt *params,
+ struct volume_key **vks)
+{
+ bool clear_table = false;
+ int r, devfd = -1;
+ uint64_t data_shift, max_moved_segment_length, moved_segment_length;
+ struct reenc_protection check_rp = {};
+ struct crypt_dm_active_device dmd_target, dmd_source = {
+ .uuid = crypt_get_uuid(cd),
+ .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
+ };
+ json_object *jobj_segments_old;
+
+ assert(hdr);
+ assert(params);
+ assert(params->resilience);
+ assert(params->data_shift);
+ assert(vks);
+
+ if (!data_offset)
+ return -EINVAL;
+
+ if (params->max_hotzone_size > params->data_shift) {
+ log_err(cd, _("Moved segment size can not be greater than data shift value."));
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Initializing decryption with datashift.");
+
+ data_shift = params->data_shift << SECTOR_SHIFT;
+
+ /*
+ * In offline mode we must perform data move with exclusively opened data
+ * device in order to exclude LUKS2 decryption process and filesystem mount.
+ */
+ if (name)
+ devfd = device_open(cd, crypt_data_device(cd), O_RDWR);
+ else
+ devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
+ if (devfd < 0)
+ return -EINVAL;
+
+ /* in-memory only */
+ moved_segment_length = params->max_hotzone_size << SECTOR_SHIFT;
+ if (!moved_segment_length)
+ moved_segment_length = data_shift < LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH ?
+ data_shift : LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
+
+ if (moved_segment_length > data_size)
+ moved_segment_length = data_size;
+
+ r = reencrypt_set_decrypt_shift_segments(cd, hdr, data_size,
+ moved_segment_length,
+ params->direction);
+ if (r)
+ goto out;
+
+ r = reencrypt_make_backup_segments(cd, hdr, CRYPT_ANY_SLOT, NULL, data_offset, params);
+ if (r) {
+ log_dbg(cd, "Failed to create reencryption backup device segments.");
+ goto out;
+ }
+
+ r = reencrypt_verify_resilience_params(cd, params, sector_size, true);
+ if (r < 0) {
+ log_err(cd, _("Invalid reencryption resilience parameters."));
+ goto out;
+ }
+
+ r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot,
+ params, reencrypt_get_alignment(cd, hdr));
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_keyslot_reencrypt_load(cd, hdr, reencrypt_keyslot, &check_rp, false);
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_reencrypt_max_hotzone_size(cd, hdr, &check_rp,
+ reencrypt_keyslot,
+ &max_moved_segment_length);
+ if (r < 0)
+ goto out;
+
+ LUKS2_reencrypt_protection_erase(&check_rp);
+
+ if (moved_segment_length > max_moved_segment_length) {
+ log_err(cd, _("Moved segment too large. Requested size %" PRIu64 ", available space for: %" PRIu64 "."),
+ moved_segment_length, max_moved_segment_length);
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, CRYPT_ANY_SLOT,
+ passphrase, passphrase_size, vks);
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, *vks);
+ if (r < 0)
+ goto out;
+
+ if (name) {
+ r = reencrypt_verify_and_upload_keys(cd, hdr,
+ LUKS2_reencrypt_digest_old(hdr),
+ LUKS2_reencrypt_digest_new(hdr),
+ *vks);
+ if (r)
+ goto out;
+
+ r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
+ DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
+ DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
+ if (r < 0)
+ goto out;
+
+ jobj_segments_old = reencrypt_segments_old(hdr);
+ if (!jobj_segments_old) {
+ r = -EINVAL;
+ goto out;
+ }
+ r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, jobj_segments_old, &dmd_source);
+ if (!r) {
+ r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
+ if (r)
+ log_err(cd, _("Mismatching parameters on device %s."), name);
+ }
+ json_object_put(jobj_segments_old);
+
+ dm_targets_free(cd, &dmd_source);
+ dm_targets_free(cd, &dmd_target);
+ free(CONST_CAST(void*)dmd_target.uuid);
+
+ if (r)
+ goto out;
+
+ dmd_source.size = dmd_target.size;
+ r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
+ if (!r) {
+ r = dm_reload_device(cd, name, &dmd_source, dmd_target.flags, 0);
+ if (r)
+ log_err(cd, _("Failed to reload device %s."), name);
+ else
+ clear_table = true;
+ }
+
+ dm_targets_free(cd, &dmd_source);
+
+ if (r)
+ goto out;
+ }
+
+ if (name) {
+ r = dm_suspend_device(cd, name, DM_SUSPEND_SKIP_LOCKFS);
+ if (r) {
+ log_err(cd, _("Failed to suspend device %s."), name);
+ goto out;
+ }
+ }
+
+ if (reencrypt_move_data(cd, devfd, data_shift, params->mode)) {
+ r = -EIO;
+ goto out;
+ }
+
+ /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
+ r = reencrypt_update_flag(cd, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, true, true);
+ if (r) {
+ log_dbg(cd, "Failed to set online-reencryption requirement.");
+ r = -EINVAL;
+ } else
+ r = reencrypt_keyslot;
+out:
+ if (r < 0 && clear_table && dm_clear_device(cd, name))
+ log_err(cd, _("Failed to clear table."));
+ else if (clear_table && dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS))
+ log_err(cd, _("Failed to resume device %s."), name);
+
+ device_release_excl(cd, crypt_data_device(cd));
+ if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0)
+ log_dbg(cd, "Failed to rollback LUKS2 metadata after failure.");
+
+ return r;
+}
+
+/* This function must be called with metadata lock held */
+static int reencrypt_init(struct crypt_device *cd,
+ const char *name,
+ struct luks2_hdr *hdr,
+ const char *passphrase,
+ size_t passphrase_size,
+ int keyslot_old,
+ int keyslot_new,
+ const char *cipher,
+ const char *cipher_mode,
+ const struct crypt_params_reencrypt *params,
+ struct volume_key **vks)
+{
+ bool move_first_segment;
+ char _cipher[128];
+ uint32_t check_sector_size, new_sector_size, old_sector_size;
+ int r, reencrypt_keyslot, devfd = -1;
+ uint64_t data_offset, data_size = 0;
+ struct crypt_dm_active_device dmd_target, dmd_source = {
+ .uuid = crypt_get_uuid(cd),
+ .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
+ };
+
+ assert(cd);
+ assert(hdr);
+
+ if (!params || !params->resilience || params->mode > CRYPT_REENCRYPT_DECRYPT)
+ return -EINVAL;
+
+ if (params->mode != CRYPT_REENCRYPT_DECRYPT &&
+ (!params->luks2 || !(cipher && cipher_mode) || keyslot_new < 0))
+ return -EINVAL;
+
+ log_dbg(cd, "Initializing reencryption (mode: %s) in LUKS2 metadata.",
+ crypt_reencrypt_mode_to_str(params->mode));
+
+ move_first_segment = (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT);
+
+ old_sector_size = LUKS2_get_sector_size(hdr);
+
+ /* implicit sector size 512 for decryption */
+ new_sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
+ if (new_sector_size < SECTOR_SIZE || new_sector_size > MAX_SECTOR_SIZE ||
+ NOTPOW2(new_sector_size)) {
+ log_err(cd, _("Unsupported encryption sector size."));
+ return -EINVAL;
+ }
+ /* check the larger encryption sector size only */
+ check_sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size;
+
+ if (!cipher_mode || *cipher_mode == '\0')
+ r = snprintf(_cipher, sizeof(_cipher), "%s", cipher);
+ else
+ r = snprintf(_cipher, sizeof(_cipher), "%s-%s", cipher, cipher_mode);
+ if (r < 0 || (size_t)r >= sizeof(_cipher))
+ return -EINVAL;
+
+ data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
+
+ r = device_check_access(cd, crypt_data_device(cd), DEV_OK);
+ if (r)
+ return r;
+
+ r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
+ if (r)
+ return r;
+
+ r = device_size(crypt_data_device(cd), &data_size);
+ if (r)
+ return r;
+
+ data_size -= data_offset;
+
+ if (params->device_size) {
+ if ((params->device_size << SECTOR_SHIFT) > data_size) {
+ log_err(cd, _("Reduced data size is larger than real device size."));
+ return -EINVAL;
+ } else
+ data_size = params->device_size << SECTOR_SHIFT;
+ }
+
+ if (MISALIGNED(data_size, check_sector_size)) {
+ log_err(cd, _("Data device is not aligned to encryption sector size (%" PRIu32 " bytes)."), check_sector_size);
+ return -EINVAL;
+ }
+
+ reencrypt_keyslot = LUKS2_keyslot_find_empty(cd, hdr, 0);
+ if (reencrypt_keyslot < 0) {
+ log_err(cd, _("All key slots full."));
+ return -EINVAL;
+ }
+
+ if (params->mode == CRYPT_REENCRYPT_DECRYPT && (params->data_shift > 0) && move_first_segment)
+ return reencrypt_decrypt_with_datashift_init(cd, name, hdr,
+ reencrypt_keyslot,
+ check_sector_size,
+ data_size,
+ data_offset,
+ passphrase,
+ passphrase_size,
+ keyslot_old,
+ params,
+ vks);
+
+
+ /*
+ * We must perform data move with exclusive open data device
+ * to exclude another cryptsetup process to colide with
+ * encryption initialization (or mount)
+ */
+ if (move_first_segment) {
+ if (data_size < (params->data_shift << SECTOR_SHIFT)) {
+ log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
+ return -EINVAL;
+ }
+ if (params->data_shift < LUKS2_get_data_offset(hdr)) {
+ log_err(cd, _("Data shift (%" PRIu64 " sectors) is less than future data offset (%" PRIu64 " sectors)."),
+ params->data_shift, LUKS2_get_data_offset(hdr));
+ return -EINVAL;
+ }
+ devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
+ if (devfd < 0) {
+ if (devfd == -EBUSY)
+ log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."),
+ device_path(crypt_data_device(cd)));
+ return -EINVAL;
+ }
+ }
+
+ if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
+ /* in-memory only */
+ r = reencrypt_set_encrypt_segments(cd, hdr, data_size,
+ params->data_shift << SECTOR_SHIFT,
+ move_first_segment,
+ params->direction);
+ if (r)
+ goto out;
+ }
+
+ r = reencrypt_make_backup_segments(cd, hdr, keyslot_new, _cipher, data_offset, params);
+ if (r) {
+ log_dbg(cd, "Failed to create reencryption backup device segments.");
+ goto out;
+ }
+
+ r = reencrypt_verify_resilience_params(cd, params, check_sector_size, move_first_segment);
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot, params,
+ reencrypt_get_alignment(cd, hdr));
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_REENCRYPT_REQ_VERSION, *vks);
+ if (r < 0)
+ goto out;
+
+ if (name && params->mode != CRYPT_REENCRYPT_ENCRYPT) {
+ r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
+ if (r)
+ goto out;
+
+ r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
+ DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
+ DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
+ if (!r) {
+ r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
+ if (r)
+ log_err(cd, _("Mismatching parameters on device %s."), name);
+ }
+
+ dm_targets_free(cd, &dmd_source);
+ dm_targets_free(cd, &dmd_target);
+ free(CONST_CAST(void*)dmd_target.uuid);
+
+ if (r)
+ goto out;
+ }
+
+ if (move_first_segment && reencrypt_move_data(cd, devfd, params->data_shift << SECTOR_SHIFT, params->mode)) {
+ r = -EIO;
+ goto out;
+ }
+
+ /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
+ r = reencrypt_update_flag(cd, LUKS2_REENCRYPT_REQ_VERSION, true, true);
+ if (r) {
+ log_dbg(cd, "Failed to set online-reencryption requirement.");
+ r = -EINVAL;
+ } else
+ r = reencrypt_keyslot;
+out:
+ device_release_excl(cd, crypt_data_device(cd));
+ if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0)
+ log_dbg(cd, "Failed to rollback LUKS2 metadata after failure.");
+
+ return r;
+}
+
+static int reencrypt_hotzone_protect_final(struct crypt_device *cd,
+ struct luks2_hdr *hdr, int reencrypt_keyslot,
+ const struct reenc_protection *rp,
+ const void *buffer, size_t buffer_len)
+{
+ const void *pbuffer;
+ size_t data_offset, len;
+ int r;
+
+ assert(hdr);
+ assert(rp);
+
+ if (rp->type == REENC_PROTECTION_NONE)
+ return 0;
+
+ if (rp->type == REENC_PROTECTION_CHECKSUM) {
+ log_dbg(cd, "Checksums hotzone resilience.");
+
+ for (data_offset = 0, len = 0; data_offset < buffer_len; data_offset += rp->p.csum.block_size, len += rp->p.csum.hash_size) {
+ if (crypt_hash_write(rp->p.csum.ch, (const char *)buffer + data_offset, rp->p.csum.block_size)) {
+ log_dbg(cd, "Failed to hash sector at offset %zu.", data_offset);
+ return -EINVAL;
+ }
+ if (crypt_hash_final(rp->p.csum.ch, (char *)rp->p.csum.checksums + len, rp->p.csum.hash_size)) {
+ log_dbg(cd, "Failed to finalize hash.");
+ return -EINVAL;
+ }
+ }
+ pbuffer = rp->p.csum.checksums;
+ } else if (rp->type == REENC_PROTECTION_JOURNAL) {
+ log_dbg(cd, "Journal hotzone resilience.");
+ len = buffer_len;
+ pbuffer = buffer;
+ } else if (rp->type == REENC_PROTECTION_DATASHIFT) {
+ log_dbg(cd, "Data shift hotzone resilience.");
+ return LUKS2_hdr_write(cd, hdr);
+ } else
+ return -EINVAL;
+
+ log_dbg(cd, "Going to store %zu bytes in reencrypt keyslot.", len);
+
+ r = LUKS2_keyslot_reencrypt_store(cd, hdr, reencrypt_keyslot, pbuffer, len);
+
+ return r > 0 ? 0 : r;
+}
+
+static int reencrypt_context_update(struct crypt_device *cd,
+ struct luks2_reencrypt *rh)
+{
+ if (rh->read < 0)
+ return -EINVAL;
+
+ if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
+ if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
+ if (rh->offset)
+ rh->offset -= data_shift_value(&rh->rp);
+ if (rh->offset && (rh->offset < data_shift_value(&rh->rp))) {
+ rh->length = rh->offset;
+ rh->offset = data_shift_value(&rh->rp);
+ }
+ if (!rh->offset)
+ rh->length = data_shift_value(&rh->rp);
+ } else {
+ if (rh->offset < rh->length)
+ rh->length = rh->offset;
+ rh->offset -= rh->length;
+ }
+ } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
+ rh->offset += (uint64_t)rh->read;
+ if (rh->device_size == rh->offset &&
+ rh->jobj_segment_moved &&
+ rh->mode == CRYPT_REENCRYPT_DECRYPT &&
+ rh->rp.type == REENC_PROTECTION_DATASHIFT) {
+ rh->offset = 0;
+ rh->length = json_segment_get_size(rh->jobj_segment_moved, 0);
+ }
+ /* it fails in-case of device_size < rh->offset later */
+ else if (rh->device_size - rh->offset < rh->length)
+ rh->length = rh->device_size - rh->offset;
+ } else
+ return -EINVAL;
+
+ if (rh->device_size < rh->offset) {
+ log_dbg(cd, "Calculated reencryption offset %" PRIu64 " is beyond device size %" PRIu64 ".", rh->offset, rh->device_size);
+ return -EINVAL;
+ }
+
+ rh->progress += (uint64_t)rh->read;
+
+ return 0;
+}
+
+static int reencrypt_load(struct crypt_device *cd, struct luks2_hdr *hdr,
+ uint64_t device_size,
+ uint64_t max_hotzone_size,
+ uint64_t required_device_size,
+ struct volume_key *vks,
+ struct luks2_reencrypt **rh)
+{
+ int r;
+ struct luks2_reencrypt *tmp = NULL;
+ crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
+
+ if (ri == CRYPT_REENCRYPT_NONE) {
+ log_err(cd, _("Device not marked for LUKS2 reencryption."));
+ return -EINVAL;
+ } else if (ri == CRYPT_REENCRYPT_INVALID)
+ return -EINVAL;
+
+ r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
+ if (r < 0)
+ return r;
+
+ if (ri == CRYPT_REENCRYPT_CLEAN)
+ r = reencrypt_load_clean(cd, hdr, device_size, max_hotzone_size, required_device_size, &tmp);
+ else if (ri == CRYPT_REENCRYPT_CRASH)
+ r = reencrypt_load_crashed(cd, hdr, device_size, &tmp);
+ else
+ r = -EINVAL;
+
+ if (r < 0 || !tmp) {
+ log_err(cd, _("Failed to load LUKS2 reencryption context."));
+ return r;
+ }
+
+ *rh = tmp;
+
+ return 0;
+}
+#endif
+static int reencrypt_lock_internal(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock)
+{
+ int r;
+ char *lock_resource;
+
+ if (!crypt_metadata_locking_enabled()) {
+ *reencrypt_lock = NULL;
+ return 0;
+ }
+
+ r = asprintf(&lock_resource, "LUKS2-reencryption-%s", uuid);
+ if (r < 0)
+ return -ENOMEM;
+ if (r < 20) {
+ free(lock_resource);
+ return -EINVAL;
+ }
+
+ r = crypt_write_lock(cd, lock_resource, false, reencrypt_lock);
+
+ free(lock_resource);
+
+ return r;
+}
+
+/* internal only */
+int LUKS2_reencrypt_lock_by_dm_uuid(struct crypt_device *cd, const char *dm_uuid,
+ struct crypt_lock_handle **reencrypt_lock)
+{
+ int r;
+ char hdr_uuid[37];
+ const char *uuid = crypt_get_uuid(cd);
+
+ if (!dm_uuid)
+ return -EINVAL;
+
+ if (!uuid) {
+ r = snprintf(hdr_uuid, sizeof(hdr_uuid), "%.8s-%.4s-%.4s-%.4s-%.12s",
+ dm_uuid + 6, dm_uuid + 14, dm_uuid + 18, dm_uuid + 22, dm_uuid + 26);
+ if (r < 0 || (size_t)r != (sizeof(hdr_uuid) - 1))
+ return -EINVAL;
+ } else if (crypt_uuid_cmp(dm_uuid, uuid))
+ return -EINVAL;
+
+ return reencrypt_lock_internal(cd, uuid, reencrypt_lock);
+}
+
+/* internal only */
+int LUKS2_reencrypt_lock(struct crypt_device *cd, struct crypt_lock_handle **reencrypt_lock)
+{
+ if (!cd || !crypt_get_type(cd) || strcmp(crypt_get_type(cd), CRYPT_LUKS2))
+ return -EINVAL;
+
+ return reencrypt_lock_internal(cd, crypt_get_uuid(cd), reencrypt_lock);
+}
+
+/* internal only */
+void LUKS2_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock)
+{
+ crypt_unlock_internal(cd, reencrypt_lock);
+}
+#if USE_LUKS2_REENCRYPTION
+static int reencrypt_lock_and_verify(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct crypt_lock_handle **reencrypt_lock)
+{
+ int r;
+ crypt_reencrypt_info ri;
+ struct crypt_lock_handle *h;
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_INVALID) {
+ log_err(cd, _("Failed to get reencryption state."));
+ return -EINVAL;
+ }
+ if (ri < CRYPT_REENCRYPT_CLEAN) {
+ log_err(cd, _("Device is not in reencryption."));
+ return -EINVAL;
+ }
+
+ r = LUKS2_reencrypt_lock(cd, &h);
+ if (r < 0) {
+ if (r == -EBUSY)
+ log_err(cd, _("Reencryption process is already running."));
+ else
+ log_err(cd, _("Failed to acquire reencryption lock."));
+ return r;
+ }
+
+ /* With reencryption lock held, reload device context and verify metadata state */
+ r = crypt_load(cd, CRYPT_LUKS2, NULL);
+ if (r) {
+ LUKS2_reencrypt_unlock(cd, h);
+ return r;
+ }
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_CLEAN) {
+ *reencrypt_lock = h;
+ return 0;
+ }
+
+ LUKS2_reencrypt_unlock(cd, h);
+ log_err(cd, _("Cannot proceed with reencryption. Run reencryption recovery first."));
+ return -EINVAL;
+}
+
+static int reencrypt_load_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ const char *passphrase,
+ size_t passphrase_size,
+ int keyslot_old,
+ int keyslot_new,
+ struct volume_key **vks,
+ const struct crypt_params_reencrypt *params)
+{
+ int r, reencrypt_slot;
+ struct luks2_hdr *hdr;
+ struct crypt_lock_handle *reencrypt_lock;
+ struct luks2_reencrypt *rh;
+ const struct volume_key *vk;
+ size_t alignment;
+ uint32_t old_sector_size, new_sector_size, sector_size;
+ struct crypt_dm_active_device dmd_target, dmd_source = {
+ .uuid = crypt_get_uuid(cd),
+ .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
+ };
+ uint64_t minimal_size, device_size, mapping_size = 0, required_size = 0,
+ max_hotzone_size = 0;
+ bool dynamic;
+ uint32_t flags = 0;
+
+ assert(cd);
+
+ hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ if (!hdr)
+ return -EINVAL;
+
+ log_dbg(cd, "Loading LUKS2 reencryption context.");
+
+ old_sector_size = reencrypt_get_sector_size_old(hdr);
+ new_sector_size = reencrypt_get_sector_size_new(hdr);
+ sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size;
+
+ r = reencrypt_verify_resilience_params(cd, params, sector_size,
+ LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0);
+ if (r < 0)
+ return r;
+
+ if (params) {
+ required_size = params->device_size;
+ max_hotzone_size = params->max_hotzone_size;
+ }
+
+ rh = crypt_get_luks2_reencrypt(cd);
+ if (rh) {
+ LUKS2_reencrypt_free(cd, rh);
+ crypt_set_luks2_reencrypt(cd, NULL);
+ rh = NULL;
+ }
+
+ r = reencrypt_lock_and_verify(cd, hdr, &reencrypt_lock);
+ if (r)
+ return r;
+
+ reencrypt_slot = LUKS2_find_keyslot(hdr, "reencrypt");
+ if (reencrypt_slot < 0) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ /* From now on we hold reencryption lock */
+
+ if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic)) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ /* some configurations provides fixed device size */
+ r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, false, dynamic);
+ if (r) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ minimal_size >>= SECTOR_SHIFT;
+
+ r = reencrypt_verify_keys(cd, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
+ if (r == -ENOENT) {
+ log_dbg(cd, "Keys are not ready. Unlocking all volume keys.");
+ r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
+ }
+
+ if (r < 0)
+ goto err;
+
+ if (name) {
+ r = reencrypt_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
+ if (r < 0)
+ goto err;
+
+ r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
+ DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
+ DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
+ if (r < 0)
+ goto err;
+ flags = dmd_target.flags;
+
+ /*
+ * By default reencryption code aims to retain flags from existing dm device.
+ * The keyring activation flag can not be inherited if original cipher is null.
+ *
+ * In this case override the flag based on decision made in reencrypt_upload_keys
+ * above. The code checks if new VK is eligible for keyring.
+ */
+ vk = crypt_volume_key_by_id(*vks, LUKS2_reencrypt_digest_new(hdr));
+ if (vk && vk->key_description && crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr))) {
+ flags |= CRYPT_ACTIVATE_KEYRING_KEY;
+ dmd_source.flags |= CRYPT_ACTIVATE_KEYRING_KEY;
+ }
+
+ r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
+ if (!r) {
+ r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
+ if (r)
+ log_err(cd, _("Mismatching parameters on device %s."), name);
+ }
+
+ dm_targets_free(cd, &dmd_source);
+ dm_targets_free(cd, &dmd_target);
+ free(CONST_CAST(void*)dmd_target.uuid);
+ if (r)
+ goto err;
+ mapping_size = dmd_target.size;
+ }
+
+ r = -EINVAL;
+ if (required_size && mapping_size && (required_size != mapping_size)) {
+ log_err(cd, _("Active device size and requested reencryption size don't match."));
+ goto err;
+ }
+
+ if (mapping_size)
+ required_size = mapping_size;
+
+ if (required_size) {
+ /* TODO: Add support for changing fixed minimal size in reencryption mda where possible */
+ if ((minimal_size && (required_size < minimal_size)) ||
+ (required_size > (device_size >> SECTOR_SHIFT)) ||
+ (!dynamic && (required_size != minimal_size)) ||
+ (old_sector_size > 0 && MISALIGNED(required_size, old_sector_size >> SECTOR_SHIFT)) ||
+ (new_sector_size > 0 && MISALIGNED(required_size, new_sector_size >> SECTOR_SHIFT))) {
+ log_err(cd, _("Illegal device size requested in reencryption parameters."));
+ goto err;
+ }
+ }
+
+ alignment = reencrypt_get_alignment(cd, hdr);
+
+ r = LUKS2_keyslot_reencrypt_update_needed(cd, hdr, reencrypt_slot, params, alignment);
+ if (r > 0) /* metadata update needed */
+ r = LUKS2_keyslot_reencrypt_update(cd, hdr, reencrypt_slot, params, alignment, *vks);
+ if (r < 0)
+ goto err;
+
+ r = reencrypt_load(cd, hdr, device_size, max_hotzone_size, required_size, *vks, &rh);
+ if (r < 0 || !rh)
+ goto err;
+
+ if (name && (r = reencrypt_context_set_names(rh, name)))
+ goto err;
+
+ /* Reassure device is not mounted and there's no dm mapping active */
+ if (!name && (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0)) {
+ log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
+ r = -EBUSY;
+ goto err;
+ }
+ device_release_excl(cd, crypt_data_device(cd));
+
+ /* There's a race for dm device activation not managed by cryptsetup.
+ *
+ * 1) excl close
+ * 2) rogue dm device activation
+ * 3) one or more dm-crypt based wrapper activation
+ * 4) next excl open gets skipped due to 3) device from 2) remains undetected.
+ */
+ r = reencrypt_init_storage_wrappers(cd, hdr, rh, *vks);
+ if (r)
+ goto err;
+
+ /* If one of wrappers is based on dmcrypt fallback it already blocked mount */
+ if (!name && crypt_storage_wrapper_get_type(rh->cw1) != DMCRYPT &&
+ crypt_storage_wrapper_get_type(rh->cw2) != DMCRYPT) {
+ if (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0) {
+ log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
+ r = -EBUSY;
+ goto err;
+ }
+ }
+
+ rh->flags = flags;
+
+ MOVE_REF(rh->vks, *vks);
+ MOVE_REF(rh->reenc_lock, reencrypt_lock);
+
+ crypt_set_luks2_reencrypt(cd, rh);
+
+ return 0;
+err:
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ LUKS2_reencrypt_free(cd, rh);
+ return r;
+}
+
+static int reencrypt_recovery_by_passphrase(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot_old,
+ int keyslot_new,
+ const char *passphrase,
+ size_t passphrase_size)
+{
+ int r;
+ crypt_reencrypt_info ri;
+ struct crypt_lock_handle *reencrypt_lock;
+
+ r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
+ if (r) {
+ if (r == -EBUSY)
+ log_err(cd, _("Reencryption in-progress. Cannot perform recovery."));
+ else
+ log_err(cd, _("Failed to get reencryption lock."));
+ return r;
+ }
+
+ if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) {
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ return r;
+ }
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_INVALID) {
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ return -EINVAL;
+ }
+
+ if (ri == CRYPT_REENCRYPT_CRASH) {
+ r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot_old, keyslot_new,
+ passphrase, passphrase_size, NULL);
+ if (r < 0)
+ log_err(cd, _("LUKS2 reencryption recovery failed."));
+ } else {
+ log_dbg(cd, "No LUKS2 reencryption recovery needed.");
+ r = 0;
+ }
+
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ return r;
+}
+
+static int reencrypt_repair_by_passphrase(
+ struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot_old,
+ int keyslot_new,
+ const char *passphrase,
+ size_t passphrase_size)
+{
+ int r;
+ struct crypt_lock_handle *reencrypt_lock;
+ struct luks2_reencrypt *rh;
+ crypt_reencrypt_info ri;
+ uint8_t requirement_version;
+ const char *resilience;
+ struct volume_key *vks = NULL;
+
+ log_dbg(cd, "Loading LUKS2 reencryption context for metadata repair.");
+
+ rh = crypt_get_luks2_reencrypt(cd);
+ if (rh) {
+ LUKS2_reencrypt_free(cd, rh);
+ crypt_set_luks2_reencrypt(cd, NULL);
+ rh = NULL;
+ }
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_INVALID)
+ return -EINVAL;
+
+ if (ri < CRYPT_REENCRYPT_CLEAN) {
+ log_err(cd, _("Device is not in reencryption."));
+ return -EINVAL;
+ }
+
+ r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
+ if (r < 0) {
+ if (r == -EBUSY)
+ log_err(cd, _("Reencryption process is already running."));
+ else
+ log_err(cd, _("Failed to acquire reencryption lock."));
+ return r;
+ }
+
+ /* With reencryption lock held, reload device context and verify metadata state */
+ r = crypt_load(cd, CRYPT_LUKS2, NULL);
+ if (r)
+ goto out;
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_INVALID) {
+ r = -EINVAL;
+ goto out;
+ }
+ if (ri == CRYPT_REENCRYPT_NONE) {
+ r = 0;
+ goto out;
+ }
+
+ resilience = reencrypt_resilience_type(hdr);
+ if (!resilience) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT &&
+ !strncmp(resilience, "datashift-", 10) &&
+ LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
+ requirement_version = LUKS2_DECRYPT_DATASHIFT_REQ_VERSION;
+ else
+ requirement_version = LUKS2_REENCRYPT_REQ_VERSION;
+
+ r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, &vks);
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, requirement_version, vks);
+ crypt_free_volume_key(vks);
+ vks = NULL;
+ if (r < 0)
+ goto out;
+
+ /* replaces old online-reencrypt flag with updated version and commits metadata */
+ r = reencrypt_update_flag(cd, requirement_version, true, true);
+out:
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ crypt_free_volume_key(vks);
+ return r;
+
+}
+#endif
+static int reencrypt_init_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ const char *passphrase,
+ size_t passphrase_size,
+ int keyslot_old,
+ int keyslot_new,
+ const char *cipher,
+ const char *cipher_mode,
+ const struct crypt_params_reencrypt *params)
+{
+#if USE_LUKS2_REENCRYPTION
+ int r;
+ crypt_reencrypt_info ri;
+ struct volume_key *vks = NULL;
+ uint32_t flags = params ? params->flags : 0;
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+
+ /* short-circuit in reencryption metadata update and finish immediately. */
+ if (flags & CRYPT_REENCRYPT_REPAIR_NEEDED)
+ return reencrypt_repair_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
+
+ /* short-circuit in recovery and finish immediately. */
+ if (flags & CRYPT_REENCRYPT_RECOVERY)
+ return reencrypt_recovery_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
+
+ if (cipher && !crypt_cipher_wrapped_key(cipher, cipher_mode)) {
+ r = crypt_keyslot_get_key_size(cd, keyslot_new);
+ if (r < 0)
+ return r;
+ r = LUKS2_check_cipher(cd, r, cipher, cipher_mode);
+ if (r < 0) {
+ log_err(cd, _("Unable to use cipher specification %s-%s for LUKS2."), cipher, cipher_mode);
+ return r;
+ }
+ }
+
+ r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
+ if (r)
+ return r;
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_INVALID) {
+ device_write_unlock(cd, crypt_metadata_device(cd));
+ return -EINVAL;
+ }
+
+ if ((ri > CRYPT_REENCRYPT_NONE) && (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY)) {
+ device_write_unlock(cd, crypt_metadata_device(cd));
+ log_err(cd, _("LUKS2 reencryption already initialized in metadata."));
+ return -EBUSY;
+ }
+
+ if (ri == CRYPT_REENCRYPT_NONE && !(flags & CRYPT_REENCRYPT_RESUME_ONLY)) {
+ r = reencrypt_init(cd, name, hdr, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params, &vks);
+ if (r < 0)
+ log_err(cd, _("Failed to initialize LUKS2 reencryption in metadata."));
+ } else if (ri > CRYPT_REENCRYPT_NONE) {
+ log_dbg(cd, "LUKS2 reencryption already initialized.");
+ r = 0;
+ }
+
+ device_write_unlock(cd, crypt_metadata_device(cd));
+
+ if (r < 0 || (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY))
+ goto out;
+
+ r = reencrypt_load_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, &vks, params);
+out:
+ if (r < 0)
+ crypt_drop_keyring_key(cd, vks);
+ crypt_free_volume_key(vks);
+ return r < 0 ? r : LUKS2_find_keyslot(hdr, "reencrypt");
+#else
+ log_err(cd, _("This operation is not supported for this device type."));
+ return -ENOTSUP;
+#endif
+}
+
+int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
+ const char *name,
+ const char *passphrase_description,
+ int keyslot_old,
+ int keyslot_new,
+ const char *cipher,
+ const char *cipher_mode,
+ const struct crypt_params_reencrypt *params)
+{
+ int r;
+ char *passphrase;
+ size_t passphrase_size;
+
+ if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase_description)
+ return -EINVAL;
+ if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
+ return -EINVAL;
+
+ r = keyring_get_passphrase(passphrase_description, &passphrase, &passphrase_size);
+ if (r < 0) {
+ log_err(cd, _("Failed to read passphrase from keyring (error %d)."), r);
+ return -EINVAL;
+ }
+
+ r = reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
+
+ crypt_safe_memzero(passphrase, passphrase_size);
+ free(passphrase);
+
+ return r;
+}
+
+int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ const char *passphrase,
+ size_t passphrase_size,
+ int keyslot_old,
+ int keyslot_new,
+ const char *cipher,
+ const char *cipher_mode,
+ const struct crypt_params_reencrypt *params)
+{
+ if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase)
+ return -EINVAL;
+ if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
+ return -EINVAL;
+
+ return reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
+}
+
+#if USE_LUKS2_REENCRYPTION
+static reenc_status_t reencrypt_step(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t device_size,
+ bool online)
+{
+ int r;
+ struct reenc_protection *rp;
+
+ assert(hdr);
+ assert(rh);
+
+ rp = &rh->rp;
+
+ /* in memory only */
+ r = reencrypt_make_segments(cd, hdr, rh, device_size);
+ if (r)
+ return REENC_ERR;
+
+ r = reencrypt_assign_segments(cd, hdr, rh, 1, 0);
+ if (r) {
+ log_err(cd, _("Failed to set device segments for next reencryption hotzone."));
+ return REENC_ERR;
+ }
+
+ log_dbg(cd, "Reencrypting chunk starting at offset: %" PRIu64 ", size :%" PRIu64 ".", rh->offset, rh->length);
+ log_dbg(cd, "data_offset: %" PRIu64, crypt_get_data_offset(cd) << SECTOR_SHIFT);
+
+ if (!rh->offset && rp->type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
+ crypt_storage_wrapper_destroy(rh->cw1);
+ log_dbg(cd, "Reinitializing old segment storage wrapper for moved segment.");
+ r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
+ LUKS2_reencrypt_get_data_offset_moved(hdr),
+ crypt_get_iv_offset(cd),
+ reencrypt_get_sector_size_old(hdr),
+ reencrypt_segment_cipher_old(hdr),
+ crypt_volume_key_by_id(rh->vks, rh->digest_old),
+ rh->wflags1);
+ if (r) {
+ log_err(cd, _("Failed to initialize old segment storage wrapper."));
+ return REENC_ROLLBACK;
+ }
+
+ if (rh->rp_moved_segment.type != REENC_PROTECTION_NOT_SET) {
+ log_dbg(cd, "Switching to moved segment resilience type.");
+ rp = &rh->rp_moved_segment;
+ }
+ }
+
+ r = reencrypt_hotzone_protect_ready(cd, rp);
+ if (r) {
+ log_err(cd, _("Failed to initialize hotzone protection."));
+ return REENC_ROLLBACK;
+ }
+
+ if (online) {
+ r = reencrypt_refresh_overlay_devices(cd, hdr, rh->overlay_name, rh->hotzone_name, rh->vks, rh->device_size, rh->flags);
+ /* Teardown overlay devices with dm-error. None bio shall pass! */
+ if (r != REENC_OK)
+ return r;
+ }
+
+ rh->read = crypt_storage_wrapper_read(rh->cw1, rh->offset, rh->reenc_buffer, rh->length);
+ if (rh->read < 0) {
+ /* severity normal */
+ log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset);
+ return REENC_ROLLBACK;
+ }
+
+ /* metadata commit point */
+ r = reencrypt_hotzone_protect_final(cd, hdr, rh->reenc_keyslot, rp, rh->reenc_buffer, rh->read);
+ if (r < 0) {
+ /* severity normal */
+ log_err(cd, _("Failed to write reencryption resilience metadata."));
+ return REENC_ROLLBACK;
+ }
+
+ r = crypt_storage_wrapper_decrypt(rh->cw1, rh->offset, rh->reenc_buffer, rh->read);
+ if (r) {
+ /* severity normal */
+ log_err(cd, _("Decryption failed."));
+ return REENC_ROLLBACK;
+ }
+ if (rh->read != crypt_storage_wrapper_encrypt_write(rh->cw2, rh->offset, rh->reenc_buffer, rh->read)) {
+ /* severity fatal */
+ log_err(cd, _("Failed to write hotzone area starting at %" PRIu64 "."), rh->offset);
+ return REENC_FATAL;
+ }
+
+ if (rp->type != REENC_PROTECTION_NONE && crypt_storage_wrapper_datasync(rh->cw2)) {
+ log_err(cd, _("Failed to sync data."));
+ return REENC_FATAL;
+ }
+
+ /* metadata commit safe point */
+ r = reencrypt_assign_segments(cd, hdr, rh, 0, rp->type != REENC_PROTECTION_NONE);
+ if (r) {
+ /* severity fatal */
+ log_err(cd, _("Failed to update metadata after current reencryption hotzone completed."));
+ return REENC_FATAL;
+ }
+
+ if (online) {
+ /* severity normal */
+ log_dbg(cd, "Resuming device %s", rh->hotzone_name);
+ r = dm_resume_device(cd, rh->hotzone_name, DM_RESUME_PRIVATE);
+ if (r) {
+ log_err(cd, _("Failed to resume device %s."), rh->hotzone_name);
+ return REENC_ERR;
+ }
+ }
+
+ return REENC_OK;
+}
+
+static int reencrypt_erase_backup_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr)
+{
+ int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
+ if (segment >= 0) {
+ if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
+ return -EINVAL;
+ json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
+ }
+ segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
+ if (segment >= 0) {
+ if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
+ return -EINVAL;
+ json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
+ }
+ segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
+ if (segment >= 0) {
+ if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
+ return -EINVAL;
+ json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
+ }
+
+ return 0;
+}
+
+static int reencrypt_wipe_unused_device_area(struct crypt_device *cd, struct luks2_reencrypt *rh)
+{
+ uint64_t offset, length, dev_size;
+ int r = 0;
+
+ assert(cd);
+ assert(rh);
+
+ if (rh->jobj_segment_moved && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
+ offset = json_segment_get_offset(rh->jobj_segment_moved, 0);
+ length = json_segment_get_size(rh->jobj_segment_moved, 0);
+ log_dbg(cd, "Wiping %" PRIu64 " bytes of backup segment data at offset %" PRIu64,
+ length, offset);
+ r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
+ offset, length, 1024 * 1024, NULL, NULL);
+ }
+
+ if (r < 0)
+ return r;
+
+ if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_FORWARD) {
+ r = device_size(crypt_data_device(cd), &dev_size);
+ if (r < 0)
+ return r;
+
+ if (dev_size < data_shift_value(&rh->rp))
+ return -EINVAL;
+
+ offset = dev_size - data_shift_value(&rh->rp);
+ length = data_shift_value(&rh->rp);
+ log_dbg(cd, "Wiping %" PRIu64 " bytes of data at offset %" PRIu64,
+ length, offset);
+ r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
+ offset, length, 1024 * 1024, NULL, NULL);
+ }
+
+ return r;
+}
+
+static int reencrypt_teardown_ok(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
+{
+ int i, r;
+ uint32_t dmt_flags;
+ bool finished = !(rh->device_size > rh->progress);
+
+ if (rh->rp.type == REENC_PROTECTION_NONE &&
+ LUKS2_hdr_write(cd, hdr)) {
+ log_err(cd, _("Failed to write LUKS2 metadata."));
+ return -EINVAL;
+ }
+
+ if (rh->online) {
+ r = LUKS2_reload(cd, rh->device_name, rh->vks, rh->device_size, rh->flags);
+ if (r)
+ log_err(cd, _("Failed to reload device %s."), rh->device_name);
+ if (!r) {
+ r = dm_resume_device(cd, rh->device_name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
+ if (r)
+ log_err(cd, _("Failed to resume device %s."), rh->device_name);
+ }
+ dm_remove_device(cd, rh->overlay_name, 0);
+ dm_remove_device(cd, rh->hotzone_name, 0);
+
+ if (!r && finished && rh->mode == CRYPT_REENCRYPT_DECRYPT &&
+ !dm_flags(cd, DM_LINEAR, &dmt_flags) && (dmt_flags & DM_DEFERRED_SUPPORTED))
+ dm_remove_device(cd, rh->device_name, CRYPT_DEACTIVATE_DEFERRED);
+ }
+
+ if (finished) {
+ if (reencrypt_wipe_unused_device_area(cd, rh))
+ log_err(cd, _("Failed to wipe unused data device area."));
+ if (reencrypt_get_data_offset_new(hdr) && LUKS2_set_keyslots_size(hdr, reencrypt_get_data_offset_new(hdr)))
+ log_dbg(cd, "Failed to set new keyslots area size.");
+ if (rh->digest_old >= 0 && rh->digest_new != rh->digest_old)
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++)
+ if (LUKS2_digest_by_keyslot(hdr, i) == rh->digest_old && crypt_keyslot_destroy(cd, i))
+ log_err(cd, _("Failed to remove unused (unbound) keyslot %d."), i);
+
+ if (reencrypt_erase_backup_segments(cd, hdr))
+ log_dbg(cd, "Failed to erase backup segments");
+
+ if (reencrypt_update_flag(cd, 0, false, false))
+ log_dbg(cd, "Failed to disable reencryption requirement flag.");
+
+ /* metadata commit point also removing reencryption flag on-disk */
+ if (crypt_keyslot_destroy(cd, rh->reenc_keyslot)) {
+ log_err(cd, _("Failed to remove reencryption keyslot."));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void reencrypt_teardown_fatal(struct crypt_device *cd, struct luks2_reencrypt *rh)
+{
+ log_err(cd, _("Fatal error while reencrypting chunk starting at %" PRIu64 ", %" PRIu64 " sectors long."),
+ (rh->offset >> SECTOR_SHIFT) + crypt_get_data_offset(cd), rh->length >> SECTOR_SHIFT);
+
+ if (rh->online) {
+ log_err(cd, _("Online reencryption failed."));
+ if (dm_status_suspended(cd, rh->hotzone_name) > 0) {
+ log_dbg(cd, "Hotzone device %s suspended, replacing with dm-error.", rh->hotzone_name);
+ if (dm_error_device(cd, rh->hotzone_name)) {
+ log_err(cd, _("Failed to replace suspended device %s with dm-error target."), rh->hotzone_name);
+ log_err(cd, _("Do not resume the device unless replaced with error target manually."));
+ }
+ }
+ }
+}
+
+static int reencrypt_teardown(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh, reenc_status_t rs, bool interrupted,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
+ void *usrptr)
+{
+ int r;
+
+ switch (rs) {
+ case REENC_OK:
+ if (progress && !interrupted)
+ progress(rh->device_size, rh->progress, usrptr);
+ r = reencrypt_teardown_ok(cd, hdr, rh);
+ break;
+ case REENC_FATAL:
+ reencrypt_teardown_fatal(cd, rh);
+ /* fall-through */
+ default:
+ r = -EIO;
+ }
+
+ /* this frees reencryption lock */
+ LUKS2_reencrypt_free(cd, rh);
+ crypt_set_luks2_reencrypt(cd, NULL);
+
+ return r;
+}
+#endif
+
+int crypt_reencrypt_run(
+ struct crypt_device *cd,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
+ void *usrptr)
+{
+#if USE_LUKS2_REENCRYPTION
+ int r;
+ crypt_reencrypt_info ri;
+ struct luks2_hdr *hdr;
+ struct luks2_reencrypt *rh;
+ reenc_status_t rs;
+ bool quit = false;
+
+ if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
+ return -EINVAL;
+
+ hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri > CRYPT_REENCRYPT_CLEAN) {
+ log_err(cd, _("Cannot proceed with reencryption. Unexpected reencryption status."));
+ return -EINVAL;
+ }
+
+ rh = crypt_get_luks2_reencrypt(cd);
+ if (!rh || (!rh->reenc_lock && crypt_metadata_locking_enabled())) {
+ log_err(cd, _("Missing or invalid reencrypt context."));
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Resuming LUKS2 reencryption.");
+
+ if (rh->online && reencrypt_init_device_stack(cd, rh)) {
+ log_err(cd, _("Failed to initialize reencryption device stack."));
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
+
+ rs = REENC_OK;
+
+ if (progress && progress(rh->device_size, rh->progress, usrptr))
+ quit = true;
+
+ while (!quit && (rh->device_size > rh->progress)) {
+ rs = reencrypt_step(cd, hdr, rh, rh->device_size, rh->online);
+ if (rs != REENC_OK)
+ break;
+
+ log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
+ if (progress && progress(rh->device_size, rh->progress, usrptr))
+ quit = true;
+
+ r = reencrypt_context_update(cd, rh);
+ if (r) {
+ log_err(cd, _("Failed to update reencryption context."));
+ rs = REENC_ERR;
+ break;
+ }
+
+ log_dbg(cd, "Next reencryption offset will be %" PRIu64 " sectors.", rh->offset);
+ log_dbg(cd, "Next reencryption chunk size will be %" PRIu64 " sectors).", rh->length);
+ }
+
+ r = reencrypt_teardown(cd, hdr, rh, rs, quit, progress, usrptr);
+ return r;
+#else
+ log_err(cd, _("This operation is not supported for this device type."));
+ return -ENOTSUP;
+#endif
+}
+
+int crypt_reencrypt(
+ struct crypt_device *cd,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
+{
+ return crypt_reencrypt_run(cd, progress, NULL);
+}
+#if USE_LUKS2_REENCRYPTION
+static int reencrypt_recovery(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint64_t device_size,
+ struct volume_key *vks)
+{
+ int r;
+ struct luks2_reencrypt *rh = NULL;
+
+ r = reencrypt_load(cd, hdr, device_size, 0, 0, vks, &rh);
+ if (r < 0) {
+ log_err(cd, _("Failed to load LUKS2 reencryption context."));
+ return r;
+ }
+
+ r = reencrypt_recover_segment(cd, hdr, rh, vks);
+ if (r < 0)
+ goto out;
+
+ if ((r = reencrypt_assign_segments(cd, hdr, rh, 0, 0)))
+ goto out;
+
+ r = reencrypt_context_update(cd, rh);
+ if (r) {
+ log_err(cd, _("Failed to update reencryption context."));
+ goto out;
+ }
+
+ r = reencrypt_teardown_ok(cd, hdr, rh);
+ if (!r)
+ r = LUKS2_hdr_write(cd, hdr);
+out:
+ LUKS2_reencrypt_free(cd, rh);
+
+ return r;
+}
+#endif
+/*
+ * use only for calculation of minimal data device size.
+ * The real data offset is taken directly from segments!
+ */
+int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise)
+{
+ crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
+ uint64_t data_offset = LUKS2_get_data_offset(hdr);
+
+ if (ri == CRYPT_REENCRYPT_CLEAN && reencrypt_direction(hdr) == CRYPT_REENCRYPT_FORWARD)
+ data_offset += reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
+
+ return blockwise ? data_offset : data_offset << SECTOR_SHIFT;
+}
+
+/* internal only */
+int LUKS2_reencrypt_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr,
+ uint64_t check_size, uint64_t *dev_size, bool activation, bool dynamic)
+{
+ int r;
+ uint64_t data_offset, real_size = 0;
+
+ if (reencrypt_direction(hdr) == CRYPT_REENCRYPT_BACKWARD &&
+ (LUKS2_get_segment_by_flag(hdr, "backup-moved-segment") || dynamic))
+ check_size += reencrypt_data_shift(hdr);
+
+ r = device_check_access(cd, crypt_data_device(cd), activation ? DEV_EXCL : DEV_OK);
+ if (r)
+ return r;
+
+ data_offset = LUKS2_reencrypt_data_offset(hdr, false);
+
+ r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
+ if (r)
+ return r;
+
+ r = device_size(crypt_data_device(cd), &real_size);
+ if (r)
+ return r;
+
+ log_dbg(cd, "Required minimal device size: %" PRIu64 " (%" PRIu64 " sectors)"
+ ", real device size: %" PRIu64 " (%" PRIu64 " sectors) "
+ "calculated device size: %" PRIu64 " (%" PRIu64 " sectors)",
+ check_size, check_size >> SECTOR_SHIFT, real_size, real_size >> SECTOR_SHIFT,
+ real_size - data_offset, (real_size - data_offset) >> SECTOR_SHIFT);
+
+ if (real_size < data_offset || (check_size && real_size < check_size)) {
+ log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
+ return -EINVAL;
+ }
+
+ *dev_size = real_size - data_offset;
+
+ return 0;
+}
+#if USE_LUKS2_REENCRYPTION
+/* returns keyslot number on success (>= 0) or negative errnor otherwise */
+int LUKS2_reencrypt_locked_recovery_by_passphrase(struct crypt_device *cd,
+ int keyslot_old,
+ int keyslot_new,
+ const char *passphrase,
+ size_t passphrase_size,
+ struct volume_key **vks)
+{
+ uint64_t minimal_size, device_size;
+ int keyslot, r = -EINVAL;
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ struct volume_key *vk = NULL, *_vks = NULL;
+
+ log_dbg(cd, "Entering reencryption crash recovery.");
+
+ if (LUKS2_get_data_size(hdr, &minimal_size, NULL))
+ return r;
+
+ r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new,
+ passphrase, passphrase_size, &_vks);
+ if (r < 0)
+ goto out;
+ keyslot = r;
+
+ if (crypt_use_keyring_for_vk(cd))
+ vk = _vks;
+
+ while (vk) {
+ r = LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, crypt_volume_key_get_id(vk));
+ if (r < 0)
+ goto out;
+ vk = crypt_volume_key_next(vk);
+ }
+
+ if (LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, false))
+ goto out;
+
+ r = reencrypt_recovery(cd, hdr, device_size, _vks);
+
+ if (!r && vks)
+ MOVE_REF(*vks, _vks);
+out:
+ if (r < 0)
+ crypt_drop_keyring_key(cd, _vks);
+ crypt_free_volume_key(_vks);
+
+ return r < 0 ? r : keyslot;
+}
+#endif
+crypt_reencrypt_info LUKS2_reencrypt_get_params(struct luks2_hdr *hdr,
+ struct crypt_params_reencrypt *params)
+{
+ crypt_reencrypt_info ri;
+ int digest;
+ uint8_t version;
+
+ if (params)
+ memset(params, 0, sizeof(*params));
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_NONE || ri == CRYPT_REENCRYPT_INVALID || !params)
+ return ri;
+
+ digest = LUKS2_digest_by_keyslot(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
+ if (digest < 0 && digest != -ENOENT)
+ return CRYPT_REENCRYPT_INVALID;
+
+ /*
+ * In case there's an old "online-reencrypt" requirement or reencryption
+ * keyslot digest is missing inform caller reencryption metadata requires repair.
+ */
+ if (!LUKS2_config_get_reencrypt_version(hdr, &version) &&
+ (version < 2 || digest == -ENOENT)) {
+ params->flags |= CRYPT_REENCRYPT_REPAIR_NEEDED;
+ return ri;
+ }
+
+ params->mode = reencrypt_mode(hdr);
+ params->direction = reencrypt_direction(hdr);
+ params->resilience = reencrypt_resilience_type(hdr);
+ params->hash = reencrypt_resilience_hash(hdr);
+ params->data_shift = reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
+ params->max_hotzone_size = 0;
+ if (LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
+ params->flags |= CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT;
+
+ return ri;
+}
diff --git a/lib/luks2/luks2_reencrypt_digest.c b/lib/luks2/luks2_reencrypt_digest.c
new file mode 100644
index 0000000..bc86f54
--- /dev/null
+++ b/lib/luks2/luks2_reencrypt_digest.c
@@ -0,0 +1,410 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, reencryption digest helpers
+ *
+ * Copyright (C) 2022-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2022-2023 Ondrej Kozina
+ * Copyright (C) 2022-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+#define MAX_STR 64
+
+struct jtype {
+ enum { JNONE = 0, JSTR, JU64, JX64, JU32 } type;
+ json_object *jobj;
+ const char *id;
+};
+
+static size_t sr(struct jtype *j, uint8_t *ptr)
+{
+ json_object *jobj;
+ size_t len = 0;
+ uint64_t u64;
+ uint32_t u32;
+
+ if (!json_object_is_type(j->jobj, json_type_object))
+ return 0;
+
+ if (!json_object_object_get_ex(j->jobj, j->id, &jobj))
+ return 0;
+
+ switch(j->type) {
+ case JSTR: /* JSON string */
+ if (!json_object_is_type(jobj, json_type_string))
+ return 0;
+ len = strlen(json_object_get_string(jobj));
+ if (len > MAX_STR)
+ return 0;
+ if (ptr)
+ memcpy(ptr, json_object_get_string(jobj), len);
+ break;
+ case JU64: /* Unsigned 64bit integer stored as string */
+ if (!json_object_is_type(jobj, json_type_string))
+ break;
+ len = sizeof(u64);
+ if (ptr) {
+ u64 = cpu_to_be64(crypt_jobj_get_uint64(jobj));
+ memcpy(ptr, &u64, len);
+ }
+ break;
+ case JX64: /* Unsigned 64bit segment size (allows "dynamic") */
+ if (!json_object_is_type(jobj, json_type_string))
+ break;
+ if (!strcmp(json_object_get_string(jobj), "dynamic")) {
+ len = strlen("dynamic");
+ if (ptr)
+ memcpy(ptr, json_object_get_string(jobj), len);
+ } else {
+ len = sizeof(u64);
+ u64 = cpu_to_be64(crypt_jobj_get_uint64(jobj));
+ if (ptr)
+ memcpy(ptr, &u64, len);
+ }
+ break;
+ case JU32: /* Unsigned 32bit integer, stored as JSON int */
+ if (!json_object_is_type(jobj, json_type_int))
+ return 0;
+ len = sizeof(u32);
+ if (ptr) {
+ u32 = cpu_to_be32(crypt_jobj_get_uint32(jobj));
+ memcpy(ptr, &u32, len);
+ }
+ break;
+ case JNONE:
+ return 0;
+ };
+
+ return len;
+}
+
+static size_t srs(struct jtype j[], uint8_t *ptr)
+{
+ size_t l, len = 0;
+
+ while(j->jobj) {
+ l = sr(j, ptr);
+ if (!l)
+ return 0;
+ len += l;
+ if (ptr)
+ ptr += l;
+ j++;
+ }
+ return len;
+}
+
+static size_t segment_linear_serialize(json_object *jobj_segment, uint8_t *buffer)
+{
+ struct jtype j[] = {
+ { JSTR, jobj_segment, "type" },
+ { JU64, jobj_segment, "offset" },
+ { JX64, jobj_segment, "size" },
+ {}
+ };
+ return srs(j, buffer);
+}
+
+static size_t segment_crypt_serialize(json_object *jobj_segment, uint8_t *buffer)
+{
+ struct jtype j[] = {
+ { JSTR, jobj_segment, "type" },
+ { JU64, jobj_segment, "offset" },
+ { JX64, jobj_segment, "size" },
+ { JU64, jobj_segment, "iv_tweak" },
+ { JSTR, jobj_segment, "encryption" },
+ { JU32, jobj_segment, "sector_size" },
+ {}
+ };
+ return srs(j, buffer);
+}
+
+static size_t segment_serialize(json_object *jobj_segment, uint8_t *buffer)
+{
+ json_object *jobj_type;
+ const char *segment_type;
+
+ if (!json_object_object_get_ex(jobj_segment, "type", &jobj_type))
+ return 0;
+
+ if (!(segment_type = json_object_get_string(jobj_type)))
+ return 0;
+
+ if (!strcmp(segment_type, "crypt"))
+ return segment_crypt_serialize(jobj_segment, buffer);
+ else if (!strcmp(segment_type, "linear"))
+ return segment_linear_serialize(jobj_segment, buffer);
+
+ return 0;
+}
+
+static size_t backup_segments_serialize(struct luks2_hdr *hdr, uint8_t *buffer)
+{
+ json_object *jobj_segment;
+ size_t l, len = 0;
+
+ jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-previous");
+ if (!jobj_segment || !(l = segment_serialize(jobj_segment, buffer)))
+ return 0;
+ len += l;
+ if (buffer)
+ buffer += l;
+
+ jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-final");
+ if (!jobj_segment || !(l = segment_serialize(jobj_segment, buffer)))
+ return 0;
+ len += l;
+ if (buffer)
+ buffer += l;
+
+ jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment");
+ if (jobj_segment) {
+ if (!(l = segment_serialize(jobj_segment, buffer)))
+ return 0;
+ len += l;
+ }
+
+ return len;
+}
+
+static size_t reenc_keyslot_serialize(struct luks2_hdr *hdr, uint8_t *buffer)
+{
+ json_object *jobj_keyslot, *jobj_area, *jobj_type;
+ const char *area_type;
+ int keyslot_reencrypt;
+
+ keyslot_reencrypt = LUKS2_find_keyslot(hdr, "reencrypt");
+ if (keyslot_reencrypt < 0)
+ return 0;
+
+ if (!(jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot_reencrypt)))
+ return 0;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return 0;
+
+ if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
+ return 0;
+
+ if (!(area_type = json_object_get_string(jobj_type)))
+ return 0;
+
+ struct jtype j[] = {
+ { JSTR, jobj_keyslot, "mode" },
+ { JSTR, jobj_keyslot, "direction" },
+ { JSTR, jobj_area, "type" },
+ { JU64, jobj_area, "offset" },
+ { JU64, jobj_area, "size" },
+ {}
+ };
+ struct jtype j_datashift[] = {
+ { JSTR, jobj_keyslot, "mode" },
+ { JSTR, jobj_keyslot, "direction" },
+ { JSTR, jobj_area, "type" },
+ { JU64, jobj_area, "offset" },
+ { JU64, jobj_area, "size" },
+ { JU64, jobj_area, "shift_size" },
+ {}
+ };
+ struct jtype j_checksum[] = {
+ { JSTR, jobj_keyslot, "mode" },
+ { JSTR, jobj_keyslot, "direction" },
+ { JSTR, jobj_area, "type" },
+ { JU64, jobj_area, "offset" },
+ { JU64, jobj_area, "size" },
+ { JSTR, jobj_area, "hash" },
+ { JU32, jobj_area, "sector_size" },
+ {}
+ };
+ struct jtype j_datashift_checksum[] = {
+ { JSTR, jobj_keyslot, "mode" },
+ { JSTR, jobj_keyslot, "direction" },
+ { JSTR, jobj_area, "type" },
+ { JU64, jobj_area, "offset" },
+ { JU64, jobj_area, "size" },
+ { JSTR, jobj_area, "hash" },
+ { JU32, jobj_area, "sector_size" },
+ { JU64, jobj_area, "shift_size" },
+ {}
+ };
+
+ if (!strcmp(area_type, "datashift-checksum"))
+ return srs(j_datashift_checksum, buffer);
+ else if (!strcmp(area_type, "datashift") ||
+ !strcmp(area_type, "datashift-journal"))
+ return srs(j_datashift, buffer);
+ else if (!strcmp(area_type, "checksum"))
+ return srs(j_checksum, buffer);
+
+ return srs(j, buffer);
+}
+
+static size_t blob_serialize(void *blob, size_t length, uint8_t *buffer)
+{
+ if (buffer)
+ memcpy(buffer, blob, length);
+
+ return length;
+}
+
+static int reencrypt_assembly_verification_data(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct volume_key *vks,
+ uint8_t version,
+ struct volume_key **verification_data)
+{
+ uint8_t *ptr;
+ int digest_new, digest_old;
+ struct volume_key *data = NULL, *vk_old = NULL, *vk_new = NULL;
+ size_t keyslot_data_len, segments_data_len, data_len = 2;
+
+ /*
+ * This works up to (including) version v207.
+ */
+ assert(version < (UINT8_MAX - 0x2F));
+
+ /* Keys - calculate length */
+ digest_new = LUKS2_reencrypt_digest_new(hdr);
+ digest_old = LUKS2_reencrypt_digest_old(hdr);
+
+ if (digest_old >= 0) {
+ vk_old = crypt_volume_key_by_id(vks, digest_old);
+ if (!vk_old) {
+ log_dbg(cd, "Key (digest id %d) required but not unlocked.", digest_old);
+ return -EINVAL;
+ }
+ data_len += blob_serialize(vk_old->key, vk_old->keylength, NULL);
+ }
+
+ if (digest_new >= 0 && digest_old != digest_new) {
+ vk_new = crypt_volume_key_by_id(vks, digest_new);
+ if (!vk_new) {
+ log_dbg(cd, "Key (digest id %d) required but not unlocked.", digest_new);
+ return -EINVAL;
+ }
+ data_len += blob_serialize(vk_new->key, vk_new->keylength, NULL);
+ }
+
+ if (data_len == 2)
+ return -EINVAL;
+
+ /* Metadata - calculate length */
+ if (!(keyslot_data_len = reenc_keyslot_serialize(hdr, NULL)))
+ return -EINVAL;
+ data_len += keyslot_data_len;
+
+ if (!(segments_data_len = backup_segments_serialize(hdr, NULL)))
+ return -EINVAL;
+ data_len += segments_data_len;
+
+ /* Alloc and fill serialization data */
+ data = crypt_alloc_volume_key(data_len, NULL);
+ if (!data)
+ return -ENOMEM;
+
+ ptr = (uint8_t*)data->key;
+
+ *ptr++ = 0x76;
+ *ptr++ = 0x30 + version;
+
+ if (vk_old)
+ ptr += blob_serialize(vk_old->key, vk_old->keylength, ptr);
+
+ if (vk_new)
+ ptr += blob_serialize(vk_new->key, vk_new->keylength, ptr);
+
+ if (!reenc_keyslot_serialize(hdr, ptr))
+ goto bad;
+ ptr += keyslot_data_len;
+
+ if (!backup_segments_serialize(hdr, ptr))
+ goto bad;
+ ptr += segments_data_len;
+
+ assert((size_t)(ptr - (uint8_t*)data->key) == data_len);
+
+ *verification_data = data;
+
+ return 0;
+bad:
+ crypt_free_volume_key(data);
+ return -EINVAL;
+}
+
+int LUKS2_keyslot_reencrypt_digest_create(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint8_t version,
+ struct volume_key *vks)
+{
+ int digest_reencrypt, keyslot_reencrypt, r;
+ struct volume_key *data;
+
+ keyslot_reencrypt = LUKS2_find_keyslot(hdr, "reencrypt");
+ if (keyslot_reencrypt < 0)
+ return keyslot_reencrypt;
+
+ r = reencrypt_assembly_verification_data(cd, hdr, vks, version, &data);
+ if (r < 0)
+ return r;
+
+ r = LUKS2_digest_create(cd, "pbkdf2", hdr, data);
+ crypt_free_volume_key(data);
+ if (r < 0)
+ return r;
+
+ digest_reencrypt = r;
+
+ r = LUKS2_digest_assign(cd, hdr, keyslot_reencrypt, CRYPT_ANY_DIGEST, 0, 0);
+ if (r < 0)
+ return r;
+
+ return LUKS2_digest_assign(cd, hdr, keyslot_reencrypt, digest_reencrypt, 1, 0);
+}
+
+int LUKS2_reencrypt_digest_verify(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct volume_key *vks)
+{
+ int r, keyslot_reencrypt;
+ struct volume_key *data;
+ uint8_t version;
+
+ log_dbg(cd, "Verifying reencryption metadata.");
+
+ keyslot_reencrypt = LUKS2_find_keyslot(hdr, "reencrypt");
+ if (keyslot_reencrypt < 0)
+ return keyslot_reencrypt;
+
+ if (LUKS2_config_get_reencrypt_version(hdr, &version))
+ return -EINVAL;
+
+ r = reencrypt_assembly_verification_data(cd, hdr, vks, version, &data);
+ if (r < 0)
+ return r;
+
+ r = LUKS2_digest_verify(cd, hdr, data, keyslot_reencrypt);
+ crypt_free_volume_key(data);
+
+ if (r < 0) {
+ if (r == -ENOENT)
+ log_dbg(cd, "Reencryption digest is missing.");
+ log_err(cd, _("Reencryption metadata is invalid."));
+ } else
+ log_dbg(cd, "Reencryption metadata verified.");
+
+ return r;
+}
diff --git a/lib/luks2/luks2_segment.c b/lib/luks2/luks2_segment.c
new file mode 100644
index 0000000..63e7c14
--- /dev/null
+++ b/lib/luks2/luks2_segment.c
@@ -0,0 +1,426 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, internal segment handling
+ *
+ * Copyright (C) 2018-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2018-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+/* use only on already validated 'segments' object */
+uint64_t json_segments_get_minimal_offset(json_object *jobj_segments, unsigned blockwise)
+{
+ uint64_t tmp, min = blockwise ? UINT64_MAX >> SECTOR_SHIFT : UINT64_MAX;
+
+ if (!jobj_segments)
+ return 0;
+
+ json_object_object_foreach(jobj_segments, key, val) {
+ UNUSED(key);
+
+ if (json_segment_is_backup(val))
+ continue;
+
+ tmp = json_segment_get_offset(val, blockwise);
+
+ if (!tmp)
+ return tmp;
+
+ if (tmp < min)
+ min = tmp;
+ }
+
+ return min;
+}
+
+uint64_t json_segment_get_offset(json_object *jobj_segment, unsigned blockwise)
+{
+ json_object *jobj;
+
+ if (!jobj_segment ||
+ !json_object_object_get_ex(jobj_segment, "offset", &jobj))
+ return 0;
+
+ return blockwise ? crypt_jobj_get_uint64(jobj) >> SECTOR_SHIFT : crypt_jobj_get_uint64(jobj);
+}
+
+const char *json_segment_type(json_object *jobj_segment)
+{
+ json_object *jobj;
+
+ if (!jobj_segment ||
+ !json_object_object_get_ex(jobj_segment, "type", &jobj))
+ return NULL;
+
+ return json_object_get_string(jobj);
+}
+
+uint64_t json_segment_get_iv_offset(json_object *jobj_segment)
+{
+ json_object *jobj;
+
+ if (!jobj_segment ||
+ !json_object_object_get_ex(jobj_segment, "iv_tweak", &jobj))
+ return 0;
+
+ return crypt_jobj_get_uint64(jobj);
+}
+
+uint64_t json_segment_get_size(json_object *jobj_segment, unsigned blockwise)
+{
+ json_object *jobj;
+
+ if (!jobj_segment ||
+ !json_object_object_get_ex(jobj_segment, "size", &jobj))
+ return 0;
+
+ return blockwise ? crypt_jobj_get_uint64(jobj) >> SECTOR_SHIFT : crypt_jobj_get_uint64(jobj);
+}
+
+const char *json_segment_get_cipher(json_object *jobj_segment)
+{
+ json_object *jobj;
+
+ /* FIXME: Pseudo "null" cipher should be handled elsewhere */
+ if (!jobj_segment ||
+ !json_object_object_get_ex(jobj_segment, "encryption", &jobj))
+ return "null";
+
+ return json_object_get_string(jobj);
+}
+
+uint32_t json_segment_get_sector_size(json_object *jobj_segment)
+{
+ json_object *jobj;
+ int i;
+
+ if (!jobj_segment ||
+ !json_object_object_get_ex(jobj_segment, "sector_size", &jobj))
+ return SECTOR_SIZE;
+
+ i = json_object_get_int(jobj);
+ return i < 0 ? SECTOR_SIZE : i;
+}
+
+static json_object *json_segment_get_flags(json_object *jobj_segment)
+{
+ json_object *jobj;
+
+ if (!jobj_segment || !(json_object_object_get_ex(jobj_segment, "flags", &jobj)))
+ return NULL;
+ return jobj;
+}
+
+bool json_segment_contains_flag(json_object *jobj_segment, const char *flag_str, size_t len)
+{
+ int r, i;
+ json_object *jobj, *jobj_flags = json_segment_get_flags(jobj_segment);
+
+ if (!jobj_flags)
+ return false;
+
+ for (i = 0; i < (int)json_object_array_length(jobj_flags); i++) {
+ jobj = json_object_array_get_idx(jobj_flags, i);
+ if (len)
+ r = strncmp(json_object_get_string(jobj), flag_str, len);
+ else
+ r = strcmp(json_object_get_string(jobj), flag_str);
+ if (!r)
+ return true;
+ }
+
+ return false;
+}
+
+bool json_segment_is_backup(json_object *jobj_segment)
+{
+ return json_segment_contains_flag(jobj_segment, "backup-", 7);
+}
+
+json_object *json_segments_get_segment(json_object *jobj_segments, int segment)
+{
+ json_object *jobj;
+ char segment_name[16];
+
+ if (snprintf(segment_name, sizeof(segment_name), "%u", segment) < 1)
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj_segments, segment_name, &jobj))
+ return NULL;
+
+ return jobj;
+}
+
+unsigned json_segments_count(json_object *jobj_segments)
+{
+ unsigned count = 0;
+
+ if (!jobj_segments)
+ return 0;
+
+ json_object_object_foreach(jobj_segments, slot, val) {
+ UNUSED(slot);
+ if (!json_segment_is_backup(val))
+ count++;
+ }
+
+ return count;
+}
+
+static void _get_segment_or_id_by_flag(json_object *jobj_segments, const char *flag, unsigned id, void *retval)
+{
+ json_object *jobj_flags, **jobj_ret = (json_object **)retval;
+ int *ret = (int *)retval;
+
+ if (!flag)
+ return;
+
+ json_object_object_foreach(jobj_segments, key, value) {
+ if (!json_object_object_get_ex(value, "flags", &jobj_flags))
+ continue;
+ if (LUKS2_array_jobj(jobj_flags, flag)) {
+ if (id)
+ *ret = atoi(key);
+ else
+ *jobj_ret = value;
+ return;
+ }
+ }
+}
+
+void json_segment_remove_flag(json_object *jobj_segment, const char *flag)
+{
+ json_object *jobj_flags, *jobj_flags_new;
+
+ if (!jobj_segment)
+ return;
+
+ jobj_flags = json_segment_get_flags(jobj_segment);
+ if (!jobj_flags)
+ return;
+
+ jobj_flags_new = LUKS2_array_remove(jobj_flags, flag);
+ if (!jobj_flags_new)
+ return;
+
+ if (json_object_array_length(jobj_flags_new) <= 0) {
+ json_object_put(jobj_flags_new);
+ json_object_object_del(jobj_segment, "flags");
+ } else
+ json_object_object_add(jobj_segment, "flags", jobj_flags_new);
+}
+
+static json_object *_segment_create_generic(const char *type, uint64_t offset, const uint64_t *length)
+{
+ json_object *jobj = json_object_new_object();
+ if (!jobj)
+ return NULL;
+
+ json_object_object_add(jobj, "type", json_object_new_string(type));
+ json_object_object_add(jobj, "offset", crypt_jobj_new_uint64(offset));
+ json_object_object_add(jobj, "size", length ? crypt_jobj_new_uint64(*length) : json_object_new_string("dynamic"));
+
+ return jobj;
+}
+
+json_object *json_segment_create_linear(uint64_t offset, const uint64_t *length, unsigned reencryption)
+{
+ json_object *jobj = _segment_create_generic("linear", offset, length);
+ if (reencryption)
+ LUKS2_segment_set_flag(jobj, "in-reencryption");
+ return jobj;
+}
+
+json_object *json_segment_create_crypt(uint64_t offset,
+ uint64_t iv_offset, const uint64_t *length,
+ const char *cipher, uint32_t sector_size,
+ unsigned reencryption)
+{
+ json_object *jobj = _segment_create_generic("crypt", offset, length);
+ if (!jobj)
+ return NULL;
+
+ json_object_object_add(jobj, "iv_tweak", crypt_jobj_new_uint64(iv_offset));
+ json_object_object_add(jobj, "encryption", json_object_new_string(cipher));
+ json_object_object_add(jobj, "sector_size", json_object_new_int(sector_size));
+ if (reencryption)
+ LUKS2_segment_set_flag(jobj, "in-reencryption");
+
+ return jobj;
+}
+
+uint64_t LUKS2_segment_offset(struct luks2_hdr *hdr, int segment, unsigned blockwise)
+{
+ return json_segment_get_offset(LUKS2_get_segment_jobj(hdr, segment), blockwise);
+}
+
+int json_segments_segment_in_reencrypt(json_object *jobj_segments)
+{
+ json_object *jobj_flags;
+
+ json_object_object_foreach(jobj_segments, slot, val) {
+ if (!json_object_object_get_ex(val, "flags", &jobj_flags) ||
+ !LUKS2_array_jobj(jobj_flags, "in-reencryption"))
+ continue;
+
+ return atoi(slot);
+ }
+
+ return -1;
+}
+
+uint64_t LUKS2_segment_size(struct luks2_hdr *hdr, int segment, unsigned blockwise)
+{
+ return json_segment_get_size(LUKS2_get_segment_jobj(hdr, segment), blockwise);
+}
+
+int LUKS2_segment_is_type(struct luks2_hdr *hdr, int segment, const char *type)
+{
+ return !strcmp(json_segment_type(LUKS2_get_segment_jobj(hdr, segment)) ?: "", type);
+}
+
+int LUKS2_last_segment_by_type(struct luks2_hdr *hdr, const char *type)
+{
+ json_object *jobj_segments;
+ int last_found = -1;
+
+ if (!type)
+ return -1;
+
+ if (!json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments))
+ return -1;
+
+ json_object_object_foreach(jobj_segments, slot, val) {
+ if (json_segment_is_backup(val))
+ continue;
+ if (strcmp(type, json_segment_type(val) ?: ""))
+ continue;
+
+ if (atoi(slot) > last_found)
+ last_found = atoi(slot);
+ }
+
+ return last_found;
+}
+
+int LUKS2_segment_by_type(struct luks2_hdr *hdr, const char *type)
+{
+ json_object *jobj_segments;
+ int first_found = -1;
+
+ if (!type)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments))
+ return -EINVAL;
+
+ json_object_object_foreach(jobj_segments, slot, val) {
+ if (json_segment_is_backup(val))
+ continue;
+ if (strcmp(type, json_segment_type(val) ?: ""))
+ continue;
+
+ if (first_found < 0)
+ first_found = atoi(slot);
+ else if (atoi(slot) < first_found)
+ first_found = atoi(slot);
+ }
+
+ return first_found;
+}
+
+int LUKS2_segment_first_unused_id(struct luks2_hdr *hdr)
+{
+ json_object *jobj_segments;
+
+ if (!json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments))
+ return -EINVAL;
+
+ return json_object_object_length(jobj_segments);
+}
+
+int LUKS2_segment_set_flag(json_object *jobj_segment, const char *flag)
+{
+ json_object *jobj_flags;
+
+ if (!jobj_segment || !flag)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_segment, "flags", &jobj_flags)) {
+ jobj_flags = json_object_new_array();
+ if (!jobj_flags)
+ return -ENOMEM;
+ json_object_object_add(jobj_segment, "flags", jobj_flags);
+ }
+
+ if (LUKS2_array_jobj(jobj_flags, flag))
+ return 0;
+
+ json_object_array_add(jobj_flags, json_object_new_string(flag));
+
+ return 0;
+}
+
+int LUKS2_segments_set(struct crypt_device *cd, struct luks2_hdr *hdr,
+ json_object *jobj_segments, int commit)
+{
+ json_object_object_add(hdr->jobj, "segments", jobj_segments);
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+int LUKS2_get_segment_id_by_flag(struct luks2_hdr *hdr, const char *flag)
+{
+ int ret = -ENOENT;
+ json_object *jobj_segments = LUKS2_get_segments_jobj(hdr);
+
+ if (jobj_segments)
+ _get_segment_or_id_by_flag(jobj_segments, flag, 1, &ret);
+
+ return ret;
+}
+
+json_object *LUKS2_get_segment_by_flag(struct luks2_hdr *hdr, const char *flag)
+{
+ json_object *jobj_segment = NULL,
+ *jobj_segments = LUKS2_get_segments_jobj(hdr);
+
+ if (jobj_segments)
+ _get_segment_or_id_by_flag(jobj_segments, flag, 0, &jobj_segment);
+
+ return jobj_segment;
+}
+
+/* compares key characteristics of both segments */
+bool json_segment_cmp(json_object *jobj_segment_1, json_object *jobj_segment_2)
+{
+ const char *type = json_segment_type(jobj_segment_1);
+ const char *type2 = json_segment_type(jobj_segment_2);
+
+ if (!type || !type2)
+ return false;
+
+ if (strcmp(type, type2))
+ return false;
+
+ if (!strcmp(type, "crypt"))
+ return (json_segment_get_sector_size(jobj_segment_1) == json_segment_get_sector_size(jobj_segment_2) &&
+ !strcmp(json_segment_get_cipher(jobj_segment_1),
+ json_segment_get_cipher(jobj_segment_2)));
+
+ return true;
+}
diff --git a/lib/luks2/luks2_token.c b/lib/luks2/luks2_token.c
new file mode 100644
index 0000000..5f65918
--- /dev/null
+++ b/lib/luks2/luks2_token.c
@@ -0,0 +1,1043 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, token handling
+ *
+ * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <ctype.h>
+#include <dlfcn.h>
+
+#include "luks2_internal.h"
+
+#if USE_EXTERNAL_TOKENS
+static bool external_tokens_enabled = true;
+#else
+static bool external_tokens_enabled = false;
+#endif
+
+static struct crypt_token_handler_internal token_handlers[LUKS2_TOKENS_MAX] = {
+ /* keyring builtin token */
+ {
+ .version = 1,
+ .u = {
+ .v1 = { .name = LUKS2_TOKEN_KEYRING,
+ .open = keyring_open,
+ .buffer_free = keyring_buffer_free,
+ .validate = keyring_validate,
+ .dump = keyring_dump }
+ }
+ }
+};
+
+void crypt_token_external_disable(void)
+{
+ external_tokens_enabled = false;
+}
+
+const char *crypt_token_external_path(void)
+{
+ return external_tokens_enabled ? EXTERNAL_LUKS2_TOKENS_PATH : NULL;
+}
+
+#if USE_EXTERNAL_TOKENS
+static void *token_dlvsym(struct crypt_device *cd,
+ void *handle,
+ const char *symbol,
+ const char *version)
+{
+ char *error;
+ void *sym;
+
+#ifdef HAVE_DLVSYM
+ log_dbg(cd, "Loading symbol %s@%s.", symbol, version);
+ sym = dlvsym(handle, symbol, version);
+#else
+ log_dbg(cd, "Loading default version of symbol %s.", symbol);
+ sym = dlsym(handle, symbol);
+#endif
+ error = dlerror();
+
+ if (error)
+ log_dbg(cd, "%s", error);
+
+ return sym;
+}
+#endif
+
+static bool token_validate_v1(struct crypt_device *cd, const crypt_token_handler *h)
+{
+ if (!h)
+ return false;
+
+ if (!h->name) {
+ log_dbg(cd, "Error: token handler does not provide name attribute.");
+ return false;
+ }
+
+ if (!h->open) {
+ log_dbg(cd, "Error: token handler does not provide open function.");
+ return false;
+ }
+
+ return true;
+}
+
+#if USE_EXTERNAL_TOKENS
+static bool token_validate_v2(struct crypt_device *cd, const struct crypt_token_handler_internal *h)
+{
+ if (!h)
+ return false;
+
+ if (!token_validate_v1(cd, &h->u.v1))
+ return false;
+
+ if (!h->u.v2.version) {
+ log_dbg(cd, "Error: token handler does not provide " CRYPT_TOKEN_ABI_VERSION " function.");
+ return false;
+ }
+
+ return true;
+}
+
+static bool external_token_name_valid(const char *name)
+{
+ if (!*name || strlen(name) > LUKS2_TOKEN_NAME_MAX)
+ return false;
+
+ while (*name) {
+ if (!isalnum(*name) && *name != '-' && *name != '_')
+ return false;
+ name++;
+ }
+
+ return true;
+}
+#endif
+
+static int
+crypt_token_load_external(struct crypt_device *cd, const char *name, struct crypt_token_handler_internal *ret)
+{
+#if USE_EXTERNAL_TOKENS
+ struct crypt_token_handler_v2 *token;
+ void *h;
+ char buf[PATH_MAX];
+ int r;
+
+ if (!external_tokens_enabled)
+ return -ENOTSUP;
+
+ if (!ret || !name)
+ return -EINVAL;
+
+ if (!external_token_name_valid(name)) {
+ log_dbg(cd, "External token name (%.*s) invalid.", LUKS2_TOKEN_NAME_MAX, name);
+ return -EINVAL;
+ }
+
+ token = &ret->u.v2;
+
+ r = snprintf(buf, sizeof(buf), "%s/libcryptsetup-token-%s.so", crypt_token_external_path(), name);
+ if (r < 0 || (size_t)r >= sizeof(buf))
+ return -EINVAL;
+
+ assert(*buf == '/');
+
+ log_dbg(cd, "Trying to load %s.", buf);
+
+ h = dlopen(buf, RTLD_LAZY);
+ if (!h) {
+ log_dbg(cd, "%s", dlerror());
+ return -EINVAL;
+ }
+ dlerror();
+
+ token->name = strdup(name);
+ token->open = token_dlvsym(cd, h, CRYPT_TOKEN_ABI_OPEN, CRYPT_TOKEN_ABI_VERSION1);
+ token->buffer_free = token_dlvsym(cd, h, CRYPT_TOKEN_ABI_BUFFER_FREE, CRYPT_TOKEN_ABI_VERSION1);
+ token->validate = token_dlvsym(cd, h, CRYPT_TOKEN_ABI_VALIDATE, CRYPT_TOKEN_ABI_VERSION1);
+ token->dump = token_dlvsym(cd, h, CRYPT_TOKEN_ABI_DUMP, CRYPT_TOKEN_ABI_VERSION1);
+ token->open_pin = token_dlvsym(cd, h, CRYPT_TOKEN_ABI_OPEN_PIN, CRYPT_TOKEN_ABI_VERSION1);
+ token->version = token_dlvsym(cd, h, CRYPT_TOKEN_ABI_VERSION, CRYPT_TOKEN_ABI_VERSION1);
+
+ if (!token_validate_v2(cd, ret)) {
+ free(CONST_CAST(void *)token->name);
+ dlclose(h);
+ memset(token, 0, sizeof(*token));
+ return -EINVAL;
+ }
+
+ /* Token loaded, possible error here means only debug message fail and can be ignored */
+ r = snprintf(buf, sizeof(buf), "%s", token->version() ?: "");
+ if (r < 0 || (size_t)r >= sizeof(buf))
+ *buf = '\0';
+
+ log_dbg(cd, "Token handler %s-%s loaded successfully.", token->name, buf);
+
+ token->dlhandle = h;
+ ret->version = 2;
+
+ return 0;
+#else
+ return -ENOTSUP;
+#endif
+}
+
+static int is_builtin_candidate(const char *type)
+{
+ return !strncmp(type, LUKS2_BUILTIN_TOKEN_PREFIX, LUKS2_BUILTIN_TOKEN_PREFIX_LEN);
+}
+
+static int crypt_token_find_free(struct crypt_device *cd, const char *name, int *index)
+{
+ int i;
+
+ if (is_builtin_candidate(name)) {
+ log_dbg(cd, "'" LUKS2_BUILTIN_TOKEN_PREFIX "' is reserved prefix for builtin tokens.");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < LUKS2_TOKENS_MAX && token_handlers[i].u.v1.name; i++) {
+ if (!strcmp(token_handlers[i].u.v1.name, name)) {
+ log_dbg(cd, "Keyslot handler %s is already registered.", name);
+ return -EINVAL;
+ }
+ }
+
+ if (i == LUKS2_TOKENS_MAX)
+ return -EINVAL;
+
+ if (index)
+ *index = i;
+
+ return 0;
+}
+
+int crypt_token_register(const crypt_token_handler *handler)
+{
+ int i, r;
+
+ if (!token_validate_v1(NULL, handler))
+ return -EINVAL;
+
+ r = crypt_token_find_free(NULL, handler->name, &i);
+ if (r < 0)
+ return r;
+
+ token_handlers[i].version = 1;
+ token_handlers[i].u.v1 = *handler;
+ return 0;
+}
+
+void crypt_token_unload_external_all(struct crypt_device *cd)
+{
+#if USE_EXTERNAL_TOKENS
+ int i;
+
+ for (i = LUKS2_TOKENS_MAX - 1; i >= 0; i--) {
+ if (token_handlers[i].version < 2)
+ continue;
+
+ log_dbg(cd, "Unloading %s token handler.", token_handlers[i].u.v2.name);
+
+ free(CONST_CAST(void *)token_handlers[i].u.v2.name);
+
+ if (dlclose(CONST_CAST(void *)token_handlers[i].u.v2.dlhandle))
+ log_dbg(cd, "%s", dlerror());
+ }
+#endif
+}
+
+static const void
+*LUKS2_token_handler_type(struct crypt_device *cd, const char *type)
+{
+ int i;
+
+ for (i = 0; i < LUKS2_TOKENS_MAX && token_handlers[i].u.v1.name; i++)
+ if (!strcmp(token_handlers[i].u.v1.name, type))
+ return &token_handlers[i].u;
+
+ if (i >= LUKS2_TOKENS_MAX)
+ return NULL;
+
+ if (is_builtin_candidate(type))
+ return NULL;
+
+ if (crypt_token_load_external(cd, type, &token_handlers[i]))
+ return NULL;
+
+ return &token_handlers[i].u;
+}
+
+static const void
+*LUKS2_token_handler(struct crypt_device *cd, int token)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj1, *jobj2;
+
+ if (token < 0)
+ return NULL;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return NULL;
+
+ if (!(jobj1 = LUKS2_get_token_jobj(hdr, token)))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj1, "type", &jobj2))
+ return NULL;
+
+ return LUKS2_token_handler_type(cd, json_object_get_string(jobj2));
+}
+
+static int LUKS2_token_find_free(struct luks2_hdr *hdr)
+{
+ int i;
+
+ for (i = 0; i < LUKS2_TOKENS_MAX; i++)
+ if (!LUKS2_get_token_jobj(hdr, i))
+ return i;
+
+ return -EINVAL;
+}
+
+int LUKS2_token_create(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *json,
+ int commit)
+{
+ const crypt_token_handler *h;
+ json_object *jobj_tokens, *jobj_type, *jobj;
+ enum json_tokener_error jerr;
+ char num[16];
+
+ if (token == CRYPT_ANY_TOKEN) {
+ if (!json)
+ return -EINVAL;
+ token = LUKS2_token_find_free(hdr);
+ }
+
+ if (token < 0 || token >= LUKS2_TOKENS_MAX)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(hdr->jobj, "tokens", &jobj_tokens))
+ return -EINVAL;
+
+ if (snprintf(num, sizeof(num), "%d", token) < 0)
+ return -EINVAL;
+
+ /* Remove token */
+ if (!json)
+ json_object_object_del(jobj_tokens, num);
+ else {
+
+ jobj = json_tokener_parse_verbose(json, &jerr);
+ if (!jobj) {
+ log_dbg(cd, "Token JSON parse failed.");
+ return -EINVAL;
+ }
+
+ if (LUKS2_token_validate(cd, hdr->jobj, jobj, num)) {
+ json_object_put(jobj);
+ return -EINVAL;
+ }
+
+ json_object_object_get_ex(jobj, "type", &jobj_type);
+ h = LUKS2_token_handler_type(cd, json_object_get_string(jobj_type));
+
+ if (is_builtin_candidate(json_object_get_string(jobj_type)) && !h) {
+ log_dbg(cd, "%s is builtin token candidate with missing handler",
+ json_object_get_string(jobj_type));
+ json_object_put(jobj);
+ return -EINVAL;
+ }
+
+ if (h && h->validate && h->validate(cd, json)) {
+ json_object_put(jobj);
+ log_dbg(cd, "Token type %s validation failed.", h->name);
+ return -EINVAL;
+ }
+
+ json_object_object_add(jobj_tokens, num, jobj);
+ if (LUKS2_check_json_size(cd, hdr)) {
+ log_dbg(cd, "Not enough space in header json area for new token.");
+ json_object_object_del(jobj_tokens, num);
+ return -ENOSPC;
+ }
+ }
+
+ if (commit)
+ return LUKS2_hdr_write(cd, hdr) ?: token;
+
+ return token;
+}
+
+crypt_token_info LUKS2_token_status(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char **type)
+{
+ const char *tmp;
+ const crypt_token_handler *th;
+ json_object *jobj_type, *jobj_token;
+
+ if (token < 0 || token >= LUKS2_TOKENS_MAX)
+ return CRYPT_TOKEN_INVALID;
+
+ if (!(jobj_token = LUKS2_get_token_jobj(hdr, token)))
+ return CRYPT_TOKEN_INACTIVE;
+
+ json_object_object_get_ex(jobj_token, "type", &jobj_type);
+ tmp = json_object_get_string(jobj_type);
+
+ if ((th = LUKS2_token_handler_type(cd, tmp))) {
+ if (type)
+ *type = th->name;
+ return is_builtin_candidate(tmp) ? CRYPT_TOKEN_INTERNAL : CRYPT_TOKEN_EXTERNAL;
+ }
+
+ if (type)
+ *type = tmp;
+
+ return is_builtin_candidate(tmp) ? CRYPT_TOKEN_INTERNAL_UNKNOWN : CRYPT_TOKEN_EXTERNAL_UNKNOWN;
+}
+
+static const char *token_json_to_string(json_object *jobj_token)
+{
+ return json_object_to_json_string_ext(jobj_token,
+ JSON_C_TO_STRING_PLAIN | JSON_C_TO_STRING_NOSLASHESCAPE);
+}
+
+static int token_is_usable(struct luks2_hdr *hdr, json_object *jobj_token, int segment,
+ crypt_keyslot_priority minimal_priority, bool requires_keyslot)
+{
+ crypt_keyslot_priority keyslot_priority;
+ json_object *jobj_array;
+ int i, keyslot, len, r = -ENOENT;
+
+ if (!jobj_token)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_token, "keyslots", &jobj_array))
+ return -EINVAL;
+
+ if (segment < 0 && segment != CRYPT_ANY_SEGMENT)
+ return -EINVAL;
+
+ /* no assigned keyslot returns -ENOENT even for CRYPT_ANY_SEGMENT */
+ len = json_object_array_length(jobj_array);
+ if (len < 0)
+ return -ENOENT;
+
+ if (!requires_keyslot)
+ return 0;
+
+ if (!len)
+ return -ENOENT;
+
+ for (i = 0; i < len; i++) {
+ keyslot = atoi(json_object_get_string(json_object_array_get_idx(jobj_array, i)));
+
+ keyslot_priority = LUKS2_keyslot_priority_get(hdr, keyslot);
+ if (keyslot_priority == CRYPT_SLOT_PRIORITY_INVALID)
+ return -EINVAL;
+
+ if (keyslot_priority < minimal_priority)
+ continue;
+
+ r = LUKS2_keyslot_for_segment(hdr, keyslot, segment);
+ if (r != -ENOENT)
+ return r;
+ }
+
+ return r;
+}
+
+static int translate_errno(struct crypt_device *cd, int ret_val, const char *type)
+{
+ if ((ret_val > 0 || ret_val == -EINVAL || ret_val == -EPERM) && !is_builtin_candidate(type)) {
+ log_dbg(cd, "%s token handler returned %d. Changing to %d.", type, ret_val, -ENOENT);
+ ret_val = -ENOENT;
+ }
+
+ return ret_val;
+}
+
+static int token_open(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ json_object *jobj_token,
+ const char *type,
+ int segment,
+ crypt_keyslot_priority priority,
+ const char *pin,
+ size_t pin_size,
+ char **buffer,
+ size_t *buffer_len,
+ void *usrptr,
+ bool requires_keyslot)
+{
+ const struct crypt_token_handler_v2 *h;
+ json_object *jobj_type;
+ int r;
+
+ assert(token >= 0);
+ assert(jobj_token);
+ assert(priority >= 0);
+
+ if (type) {
+ if (!json_object_object_get_ex(jobj_token, "type", &jobj_type))
+ return -EINVAL;
+ if (strcmp(type, json_object_get_string(jobj_type)))
+ return -ENOENT;
+ }
+
+ r = token_is_usable(hdr, jobj_token, segment, priority, requires_keyslot);
+ if (r < 0) {
+ if (r == -ENOENT)
+ log_dbg(cd, "Token %d unusable for segment %d with desired keyslot priority %d.",
+ token, segment, priority);
+ return r;
+ }
+
+ if (!(h = LUKS2_token_handler(cd, token)))
+ return -ENOENT;
+
+ if (h->validate && h->validate(cd, token_json_to_string(jobj_token))) {
+ log_dbg(cd, "Token %d (%s) validation failed.", token, h->name);
+ return -ENOENT;
+ }
+
+ if (pin && !h->open_pin)
+ r = -ENOENT;
+ else if (pin)
+ r = translate_errno(cd, h->open_pin(cd, token, pin, pin_size, buffer, buffer_len, usrptr), h->name);
+ else
+ r = translate_errno(cd, h->open(cd, token, buffer, buffer_len, usrptr), h->name);
+ if (r < 0)
+ log_dbg(cd, "Token %d (%s) open failed with %d.", token, h->name, r);
+
+ return r;
+}
+
+static void LUKS2_token_buffer_free(struct crypt_device *cd,
+ int token,
+ void *buffer,
+ size_t buffer_len)
+{
+ const crypt_token_handler *h = LUKS2_token_handler(cd, token);
+
+ if (h && h->buffer_free)
+ h->buffer_free(buffer, buffer_len);
+ else {
+ crypt_safe_memzero(buffer, buffer_len);
+ free(buffer);
+ }
+}
+
+static bool break_loop_retval(int r)
+{
+ if (r == -ENOENT || r == -EPERM || r == -EAGAIN || r == -ENOANO)
+ return false;
+ return true;
+}
+
+static void update_return_errno(int r, int *stored)
+{
+ if (*stored == -ENOANO)
+ return;
+ else if (r == -ENOANO)
+ *stored = r;
+ else if (r == -EAGAIN && *stored != -ENOANO)
+ *stored = r;
+ else if (r == -EPERM && (*stored != -ENOANO && *stored != -EAGAIN))
+ *stored = r;
+}
+
+static int LUKS2_keyslot_open_by_token(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ int segment,
+ crypt_keyslot_priority priority,
+ const char *buffer,
+ size_t buffer_len,
+ struct volume_key **vk)
+{
+ crypt_keyslot_priority keyslot_priority;
+ json_object *jobj_token, *jobj_token_keyslots, *jobj_type, *jobj;
+ unsigned int num = 0;
+ int i, r = -ENOENT, stored_retval = -ENOENT;
+
+ jobj_token = LUKS2_get_token_jobj(hdr, token);
+ if (!jobj_token)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_token, "type", &jobj_type))
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_token, "keyslots", &jobj_token_keyslots);
+ if (!jobj_token_keyslots)
+ return -EINVAL;
+
+ /* Try to open keyslot referenced in token */
+ for (i = 0; i < (int) json_object_array_length(jobj_token_keyslots) && r < 0; i++) {
+ jobj = json_object_array_get_idx(jobj_token_keyslots, i);
+ num = atoi(json_object_get_string(jobj));
+ keyslot_priority = LUKS2_keyslot_priority_get(hdr, num);
+ if (keyslot_priority == CRYPT_SLOT_PRIORITY_INVALID)
+ return -EINVAL;
+ if (keyslot_priority < priority)
+ continue;
+ log_dbg(cd, "Trying to open keyslot %u with token %d (type %s).",
+ num, token, json_object_get_string(jobj_type));
+ r = LUKS2_keyslot_open(cd, num, segment, buffer, buffer_len, vk);
+ /* short circuit on fatal error */
+ if (r < 0 && r != -EPERM && r != -ENOENT)
+ return r;
+ /* save -EPERM in case no other keyslot is usable */
+ if (r == -EPERM)
+ stored_retval = r;
+ }
+
+ if (r < 0)
+ return stored_retval;
+
+ return num;
+}
+
+static bool token_is_blocked(int token, uint32_t *block_list)
+{
+ /* it is safe now, but have assert in case LUKS2_TOKENS_MAX grows */
+ assert(token >= 0 && (size_t)token < BITFIELD_SIZE(block_list));
+
+ return (*block_list & (UINT32_C(1) << token));
+}
+
+static void token_block(int token, uint32_t *block_list)
+{
+ /* it is safe now, but have assert in case LUKS2_TOKENS_MAX grows */
+ assert(token >= 0 && (size_t)token < BITFIELD_SIZE(block_list));
+
+ *block_list |= (UINT32_C(1) << token);
+}
+
+static int token_open_priority(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ json_object *jobj_tokens,
+ const char *type,
+ int segment,
+ crypt_keyslot_priority priority,
+ const char *pin,
+ size_t pin_size,
+ void *usrptr,
+ int *stored_retval,
+ uint32_t *block_list,
+ struct volume_key **vk)
+{
+ char *buffer;
+ size_t buffer_size;
+ int token, r;
+
+ assert(stored_retval);
+ assert(block_list);
+
+ json_object_object_foreach(jobj_tokens, slot, val) {
+ token = atoi(slot);
+ if (token_is_blocked(token, block_list))
+ continue;
+ r = token_open(cd, hdr, token, val, type, segment, priority, pin, pin_size, &buffer, &buffer_size, usrptr, true);
+ if (!r) {
+ r = LUKS2_keyslot_open_by_token(cd, hdr, token, segment, priority,
+ buffer, buffer_size, vk);
+ LUKS2_token_buffer_free(cd, token, buffer, buffer_size);
+ }
+
+ if (r == -ENOANO)
+ token_block(token, block_list);
+
+ if (break_loop_retval(r))
+ return r;
+
+ update_return_errno(r, stored_retval);
+ }
+
+ return *stored_retval;
+}
+
+static int token_open_any(struct crypt_device *cd, struct luks2_hdr *hdr, const char *type, int segment,
+ const char *pin, size_t pin_size, void *usrptr, struct volume_key **vk)
+{
+ json_object *jobj_tokens;
+ int r, retval = -ENOENT;
+ uint32_t blocked = 0; /* bitmap with tokens blocked from loop by returning -ENOANO (wrong/missing pin) */
+
+ json_object_object_get_ex(hdr->jobj, "tokens", &jobj_tokens);
+
+ /* passing usrptr for CRYPT_ANY_TOKEN does not make sense without specific type */
+ if (!type)
+ usrptr = NULL;
+
+ r = token_open_priority(cd, hdr, jobj_tokens, type, segment, CRYPT_SLOT_PRIORITY_PREFER,
+ pin, pin_size, usrptr, &retval, &blocked, vk);
+ if (break_loop_retval(r))
+ return r;
+
+ return token_open_priority(cd, hdr, jobj_tokens, type, segment, CRYPT_SLOT_PRIORITY_NORMAL,
+ pin, pin_size, usrptr, &retval, &blocked, vk);
+}
+
+int LUKS2_token_unlock_key(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ int segment,
+ void *usrptr,
+ struct volume_key **vk)
+{
+ char *buffer;
+ size_t buffer_size;
+ json_object *jobj_token;
+ int r = -ENOENT;
+
+ assert(vk);
+
+ if (segment == CRYPT_DEFAULT_SEGMENT)
+ segment = LUKS2_get_default_segment(hdr);
+
+ if (segment < 0 && segment != CRYPT_ANY_SEGMENT)
+ return -EINVAL;
+
+ if (token >= 0 && token < LUKS2_TOKENS_MAX) {
+ if ((jobj_token = LUKS2_get_token_jobj(hdr, token))) {
+ r = token_open(cd, hdr, token, jobj_token, type, segment, CRYPT_SLOT_PRIORITY_IGNORE,
+ pin, pin_size, &buffer, &buffer_size, usrptr, true);
+ if (!r) {
+ r = LUKS2_keyslot_open_by_token(cd, hdr, token, segment, CRYPT_SLOT_PRIORITY_IGNORE,
+ buffer, buffer_size, vk);
+ LUKS2_token_buffer_free(cd, token, buffer, buffer_size);
+ }
+ }
+ } else if (token == CRYPT_ANY_TOKEN)
+ /*
+ * return priorities (ordered form least to most significant):
+ * ENOENT - unusable for activation (no token handler, invalid token metadata, not assigned to volume segment, etc)
+ * EPERM - usable but token provided passphrase did not unlock any assigned keyslot
+ * EAGAIN - usable but not ready (token HW is missing)
+ * ENOANO - ready, but token pin is wrong or missing
+ *
+ * success (>= 0) or any other negative errno short-circuits token activation loop
+ * immediately
+ */
+ r = token_open_any(cd, hdr, type, segment, pin, pin_size, usrptr, vk);
+ else
+ r = -EINVAL;
+
+ return r;
+}
+
+int LUKS2_token_open_and_activate(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *name,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ uint32_t flags,
+ void *usrptr)
+{
+ bool use_keyring;
+ int keyslot, r, segment;
+ struct volume_key *vk = NULL;
+
+ if (flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY)
+ segment = CRYPT_ANY_SEGMENT;
+ else
+ segment = CRYPT_DEFAULT_SEGMENT;
+
+ r = LUKS2_token_unlock_key(cd, hdr, token, type, pin, pin_size, segment, usrptr, &vk);
+ if (r < 0)
+ return r;
+
+ assert(vk);
+
+ keyslot = r;
+
+ if (!crypt_use_keyring_for_vk(cd))
+ use_keyring = false;
+ else
+ use_keyring = ((name && !crypt_is_cipher_null(crypt_get_cipher(cd))) ||
+ (flags & CRYPT_ACTIVATE_KEYRING_KEY));
+
+ if (use_keyring) {
+ if (!(r = LUKS2_volume_key_load_in_keyring_by_keyslot(cd, hdr, vk, keyslot)))
+ flags |= CRYPT_ACTIVATE_KEYRING_KEY;
+ }
+
+ if (r >= 0 && name)
+ r = LUKS2_activate(cd, name, vk, flags);
+
+ if (r < 0)
+ crypt_drop_keyring_key(cd, vk);
+ crypt_free_volume_key(vk);
+
+ return r < 0 ? r : keyslot;
+}
+
+void LUKS2_token_dump(struct crypt_device *cd, int token)
+{
+ const crypt_token_handler *h;
+ json_object *jobj_token;
+
+ h = LUKS2_token_handler(cd, token);
+ if (h && h->dump) {
+ jobj_token = LUKS2_get_token_jobj(crypt_get_hdr(cd, CRYPT_LUKS2), token);
+ if (jobj_token)
+ h->dump(cd, json_object_to_json_string_ext(jobj_token,
+ JSON_C_TO_STRING_PLAIN | JSON_C_TO_STRING_NOSLASHESCAPE));
+ }
+}
+
+int LUKS2_token_json_get(struct luks2_hdr *hdr, int token, const char **json)
+{
+ json_object *jobj_token;
+
+ jobj_token = LUKS2_get_token_jobj(hdr, token);
+ if (!jobj_token)
+ return -EINVAL;
+
+ *json = token_json_to_string(jobj_token);
+ return 0;
+}
+
+static int assign_one_keyslot(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int token, int keyslot, int assign)
+{
+ json_object *jobj1, *jobj_token, *jobj_token_keyslots;
+ char num[16];
+
+ log_dbg(cd, "Keyslot %i %s token %i.", keyslot, assign ? "assigned to" : "unassigned from", token);
+
+ jobj_token = LUKS2_get_token_jobj(hdr, token);
+ if (!jobj_token)
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_token, "keyslots", &jobj_token_keyslots);
+ if (!jobj_token_keyslots)
+ return -EINVAL;
+
+ if (snprintf(num, sizeof(num), "%d", keyslot) < 0)
+ return -EINVAL;
+
+ if (assign) {
+ jobj1 = LUKS2_array_jobj(jobj_token_keyslots, num);
+ if (!jobj1)
+ json_object_array_add(jobj_token_keyslots, json_object_new_string(num));
+ } else {
+ jobj1 = LUKS2_array_remove(jobj_token_keyslots, num);
+ if (jobj1)
+ json_object_object_add(jobj_token, "keyslots", jobj1);
+ }
+
+ return 0;
+}
+
+static int assign_one_token(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, int token, int assign)
+{
+ json_object *jobj_keyslots;
+ int r = 0;
+
+ if (!LUKS2_get_token_jobj(hdr, token))
+ return -EINVAL;
+
+ if (keyslot == CRYPT_ANY_SLOT) {
+ json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots);
+
+ json_object_object_foreach(jobj_keyslots, key, val) {
+ UNUSED(val);
+ r = assign_one_keyslot(cd, hdr, token, atoi(key), assign);
+ if (r < 0)
+ break;
+ }
+ } else
+ r = assign_one_keyslot(cd, hdr, token, keyslot, assign);
+
+ return r;
+}
+
+int LUKS2_token_assign(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, int token, int assign, int commit)
+{
+ json_object *jobj_tokens;
+ int r = 0;
+
+ if ((keyslot < 0 && keyslot != CRYPT_ANY_SLOT) || keyslot >= LUKS2_KEYSLOTS_MAX ||
+ (token < 0 && token != CRYPT_ANY_TOKEN) || token >= LUKS2_TOKENS_MAX)
+ return -EINVAL;
+
+ if (token == CRYPT_ANY_TOKEN) {
+ json_object_object_get_ex(hdr->jobj, "tokens", &jobj_tokens);
+
+ json_object_object_foreach(jobj_tokens, key, val) {
+ UNUSED(val);
+ r = assign_one_token(cd, hdr, keyslot, atoi(key), assign);
+ if (r < 0)
+ break;
+ }
+ } else
+ r = assign_one_token(cd, hdr, keyslot, token, assign);
+
+ if (r < 0)
+ return r;
+
+ if (commit)
+ return LUKS2_hdr_write(cd, hdr) ?: token;
+
+ return token;
+}
+
+static int token_is_assigned(struct luks2_hdr *hdr, int keyslot, int token)
+{
+ int i;
+ json_object *jobj, *jobj_token_keyslots,
+ *jobj_token = LUKS2_get_token_jobj(hdr, token);
+
+ if (!jobj_token)
+ return -ENOENT;
+
+ json_object_object_get_ex(jobj_token, "keyslots", &jobj_token_keyslots);
+
+ for (i = 0; i < (int) json_object_array_length(jobj_token_keyslots); i++) {
+ jobj = json_object_array_get_idx(jobj_token_keyslots, i);
+ if (keyslot == atoi(json_object_get_string(jobj)))
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int LUKS2_token_is_assigned(struct luks2_hdr *hdr, int keyslot, int token)
+{
+ if (keyslot < 0 || keyslot >= LUKS2_KEYSLOTS_MAX || token < 0 || token >= LUKS2_TOKENS_MAX)
+ return -EINVAL;
+
+ return token_is_assigned(hdr, keyslot, token);
+}
+
+int LUKS2_tokens_count(struct luks2_hdr *hdr)
+{
+ json_object *jobj_tokens = LUKS2_get_tokens_jobj(hdr);
+ if (!jobj_tokens)
+ return -EINVAL;
+
+ return json_object_object_length(jobj_tokens);
+}
+
+int LUKS2_token_assignment_copy(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot_from,
+ int keyslot_to,
+ int commit)
+{
+ int i, r;
+
+ if (keyslot_from < 0 || keyslot_from >= LUKS2_KEYSLOTS_MAX || keyslot_to < 0 || keyslot_to >= LUKS2_KEYSLOTS_MAX)
+ return -EINVAL;
+
+ r = LUKS2_tokens_count(hdr);
+ if (r <= 0)
+ return r;
+
+ for (i = 0; i < LUKS2_TOKENS_MAX; i++) {
+ if (!token_is_assigned(hdr, keyslot_from, i)) {
+ if ((r = assign_one_token(cd, hdr, keyslot_to, i, 1)))
+ return r;
+ }
+ }
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+int LUKS2_token_unlock_passphrase(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ void *usrptr,
+ char **passphrase,
+ size_t *passphrase_size)
+{
+ char *buffer;
+ size_t buffer_size;
+ json_object *jobj_token, *jobj_tokens;
+ int r = -ENOENT, retval = -ENOENT;
+
+ if (!hdr)
+ return -EINVAL;
+
+ if (token >= 0 && token < LUKS2_TOKENS_MAX) {
+ if ((jobj_token = LUKS2_get_token_jobj(hdr, token)))
+ r = token_open(cd, hdr, token, jobj_token, type, CRYPT_ANY_SEGMENT, CRYPT_SLOT_PRIORITY_IGNORE,
+ pin, pin_size, &buffer, &buffer_size, usrptr, false);
+ } else if (token == CRYPT_ANY_TOKEN) {
+ json_object_object_get_ex(hdr->jobj, "tokens", &jobj_tokens);
+
+ if (!type)
+ usrptr = NULL;
+
+ json_object_object_foreach(jobj_tokens, slot, val) {
+ token = atoi(slot);
+ r = token_open(cd, hdr, token, val, type, CRYPT_ANY_SEGMENT, CRYPT_SLOT_PRIORITY_IGNORE,
+ pin, pin_size, &buffer, &buffer_size, usrptr, false);
+
+ /*
+ * return priorities (ordered form least to most significant):
+ * ENOENT - unusable for activation (no token handler, invalid token metadata, etc)
+ * EAGAIN - usable but not ready (token HW is missing)
+ * ENOANO - ready, but token pin is wrong or missing
+ *
+ * success (>= 0) or any other negative errno short-circuits token activation loop
+ * immediately
+ */
+ if (break_loop_retval(r))
+ goto out;
+
+ update_return_errno(r, &retval);
+ }
+ r = retval;
+ } else
+ r = -EINVAL;
+out:
+ if (!r) {
+ *passphrase = crypt_safe_alloc(buffer_size);
+ if (*passphrase) {
+ memcpy(*passphrase, buffer, buffer_size);
+ *passphrase_size = buffer_size;
+ } else
+ r = -ENOMEM;
+ LUKS2_token_buffer_free(cd, token, buffer, buffer_size);
+ }
+
+ if (!r)
+ return token;
+
+ return r;
+}
diff --git a/lib/luks2/luks2_token_keyring.c b/lib/luks2/luks2_token_keyring.c
new file mode 100644
index 0000000..ad18798
--- /dev/null
+++ b/lib/luks2/luks2_token_keyring.c
@@ -0,0 +1,144 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, kernel keyring token
+ *
+ * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+int keyring_open(struct crypt_device *cd,
+ int token,
+ char **buffer,
+ size_t *buffer_len,
+ void *usrptr __attribute__((unused)))
+{
+ json_object *jobj_token, *jobj_key;
+ struct luks2_hdr *hdr;
+ int r;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ jobj_token = LUKS2_get_token_jobj(hdr, token);
+ if (!jobj_token)
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_token, "key_description", &jobj_key);
+
+ r = keyring_get_passphrase(json_object_get_string(jobj_key), buffer, buffer_len);
+ if (r == -ENOTSUP) {
+ log_dbg(cd, "Kernel keyring features disabled.");
+ return -ENOENT;
+ } else if (r < 0) {
+ log_dbg(cd, "keyring_get_passphrase failed (error %d)", r);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int keyring_validate(struct crypt_device *cd __attribute__((unused)),
+ const char *json)
+{
+ enum json_tokener_error jerr;
+ json_object *jobj_token, *jobj_key;
+ int r = 1;
+
+ log_dbg(cd, "Validating keyring token json");
+
+ jobj_token = json_tokener_parse_verbose(json, &jerr);
+ if (!jobj_token) {
+ log_dbg(cd, "Keyring token JSON parse failed.");
+ return r;
+ }
+
+ if (json_object_object_length(jobj_token) != 3) {
+ log_dbg(cd, "Keyring token is expected to have exactly 3 fields.");
+ goto out;
+ }
+
+ if (!json_object_object_get_ex(jobj_token, "key_description", &jobj_key)) {
+ log_dbg(cd, "missing key_description field.");
+ goto out;
+ }
+
+ if (!json_object_is_type(jobj_key, json_type_string)) {
+ log_dbg(cd, "key_description is not a string.");
+ goto out;
+ }
+
+ /* TODO: perhaps check that key description is in '%s:%s'
+ * format where both strings are not empty */
+ r = !strlen(json_object_get_string(jobj_key));
+out:
+ json_object_put(jobj_token);
+ return r;
+}
+
+void keyring_dump(struct crypt_device *cd, const char *json)
+{
+ enum json_tokener_error jerr;
+ json_object *jobj_token, *jobj_key;
+
+ jobj_token = json_tokener_parse_verbose(json, &jerr);
+ if (!jobj_token)
+ return;
+
+ if (!json_object_object_get_ex(jobj_token, "key_description", &jobj_key)) {
+ json_object_put(jobj_token);
+ return;
+ }
+
+ log_std(cd, "\tKey description: %s\n", json_object_get_string(jobj_key));
+
+ json_object_put(jobj_token);
+}
+
+int LUKS2_token_keyring_json(char *buffer, size_t buffer_size,
+ const struct crypt_token_params_luks2_keyring *keyring_params)
+{
+ int r;
+
+ r = snprintf(buffer, buffer_size, "{ \"type\": \"%s\", \"keyslots\":[],\"key_description\":\"%s\"}",
+ LUKS2_TOKEN_KEYRING, keyring_params->key_description);
+ if (r < 0 || (size_t)r >= buffer_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+int LUKS2_token_keyring_get(struct luks2_hdr *hdr,
+ int token, struct crypt_token_params_luks2_keyring *keyring_params)
+{
+ json_object *jobj_token, *jobj;
+
+ jobj_token = LUKS2_get_token_jobj(hdr, token);
+ json_object_object_get_ex(jobj_token, "type", &jobj);
+ assert(!strcmp(json_object_get_string(jobj), LUKS2_TOKEN_KEYRING));
+
+ json_object_object_get_ex(jobj_token, "key_description", &jobj);
+
+ keyring_params->key_description = json_object_get_string(jobj);
+
+ return token;
+}
+
+void keyring_buffer_free(void *buffer, size_t buffer_len __attribute__((unused)))
+{
+ crypt_safe_free(buffer);
+}
diff --git a/lib/nls.h b/lib/nls.h
new file mode 100644
index 0000000..39760b1
--- /dev/null
+++ b/lib/nls.h
@@ -0,0 +1,34 @@
+#ifndef CRYPTSETUP_NLS_H
+#define CRYPTSETUP_NLS_H
+
+#ifndef LOCALEDIR
+#define LOCALEDIR "/usr/share/locale"
+#endif
+
+#ifdef HAVE_LOCALE_H
+# include <locale.h>
+#else
+# undef setlocale
+# define setlocale(Category, Locale) /* empty */
+#endif
+
+#ifdef ENABLE_NLS
+# include <libintl.h>
+# define _(Text) gettext (Text)
+# ifdef gettext_noop
+# define N_(String) gettext_noop (String)
+# else
+# define N_(String) (String)
+# endif
+#else
+# undef bindtextdomain
+# define bindtextdomain(Domain, Directory) /* empty */
+# undef textdomain
+# define textdomain(Domain) /* empty */
+# define _(Text) (Text)
+# define N_(Text) (Text)
+# define ngettext(Singular, Plural, Count) \
+ ( (Count) == 1 ? (Singular) : (Plural) )
+#endif
+
+#endif /* CRYPTSETUP_NLS_H */
diff --git a/lib/random.c b/lib/random.c
new file mode 100644
index 0000000..0dfcff9
--- /dev/null
+++ b/lib/random.c
@@ -0,0 +1,244 @@
+/*
+ * cryptsetup kernel RNG access functions
+ *
+ * Copyright (C) 2010-2023 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/select.h>
+
+#include "libcryptsetup.h"
+#include "internal.h"
+
+static int random_initialised = 0;
+
+#define URANDOM_DEVICE "/dev/urandom"
+static int urandom_fd = -1;
+
+#define RANDOM_DEVICE "/dev/random"
+static int random_fd = -1;
+
+/* Read random chunk - gathered data usually appears with this granularity */
+#define RANDOM_DEVICE_CHUNK 8
+
+/* Timeout to print warning if no random data (entropy) */
+#define RANDOM_DEVICE_TIMEOUT 5
+
+/* URANDOM_DEVICE access */
+static int _get_urandom(char *buf, size_t len)
+{
+ int r;
+ size_t old_len = len;
+ char *old_buf = buf;
+
+ assert(urandom_fd != -1);
+
+ while (len) {
+ r = read(urandom_fd, buf, len);
+ if (r == -1 && errno != EINTR)
+ return -EINVAL;
+ if (r > 0) {
+ len -= r;
+ buf += r;
+ }
+ }
+
+ assert(len == 0);
+ assert((size_t)(buf - old_buf) == old_len);
+
+ return 0;
+}
+
+static void _get_random_progress(struct crypt_device *ctx, int warn,
+ size_t expected_len, size_t read_len)
+{
+ if (warn)
+ log_std(ctx,
+ _("System is out of entropy while generating volume key.\n"
+ "Please move mouse or type some text in another window "
+ "to gather some random events.\n"));
+
+ log_std(ctx, _("Generating key (%d%% done).\n"),
+ (int)((expected_len - read_len) * 100 / expected_len));
+}
+
+/* RANDOM_DEVICE access */
+static int _get_random(struct crypt_device *ctx, char *buf, size_t len)
+{
+ int r, warn_once = 1;
+ size_t n, old_len = len;
+ char *old_buf = buf;
+ fd_set fds;
+ struct timeval tv;
+
+ assert(random_fd != -1);
+
+ while (len) {
+ FD_ZERO(&fds);
+ FD_SET(random_fd, &fds);
+
+ tv.tv_sec = RANDOM_DEVICE_TIMEOUT;
+ tv.tv_usec = 0;
+
+ r = select(random_fd + 1, &fds, NULL, NULL, &tv);
+ if(r == -1)
+ return -EINVAL;
+
+ if(!r) {
+ _get_random_progress(ctx, warn_once, old_len, len);
+ warn_once = 0;
+ continue;
+ }
+
+ do {
+ n = RANDOM_DEVICE_CHUNK;
+ if (len < RANDOM_DEVICE_CHUNK)
+ n = len;
+
+ r = read(random_fd, buf, n);
+
+ if (r == -1 && errno == EINTR) {
+ r = 0;
+ continue;
+ }
+
+ /* bogus read? */
+ if(r > (int)n)
+ return -EINVAL;
+
+ /* random device is opened with O_NONBLOCK, EAGAIN is expected */
+ if (r == -1 && (errno != EAGAIN && errno != EWOULDBLOCK))
+ return -EINVAL;
+
+ if (r > 0) {
+ len -= r;
+ buf += r;
+ }
+ } while (len && r > 0);
+ }
+
+ assert(len == 0);
+ assert((size_t)(buf - old_buf) == old_len);
+
+ if (!warn_once)
+ _get_random_progress(ctx, 0, old_len, len);
+
+ return 0;
+}
+/* Initialisation of both RNG file descriptors is mandatory */
+int crypt_random_init(struct crypt_device *ctx)
+{
+ if (random_initialised)
+ return 0;
+
+ /* Used for CRYPT_RND_NORMAL */
+ if(urandom_fd == -1)
+ urandom_fd = open(URANDOM_DEVICE, O_RDONLY | O_CLOEXEC);
+ if(urandom_fd == -1)
+ goto err;
+
+ /* Used for CRYPT_RND_KEY */
+ if(random_fd == -1)
+ random_fd = open(RANDOM_DEVICE, O_RDONLY | O_NONBLOCK | O_CLOEXEC);
+ if(random_fd == -1)
+ goto err;
+
+ if (crypt_fips_mode())
+ log_verbose(ctx, _("Running in FIPS mode."));
+
+ random_initialised = 1;
+ return 0;
+err:
+ crypt_random_exit();
+ log_err(ctx, _("Fatal error during RNG initialisation."));
+ return -ENOSYS;
+}
+
+/* coverity[ -taint_source : arg-1 ] */
+int crypt_random_get(struct crypt_device *ctx, char *buf, size_t len, int quality)
+{
+ int status, rng_type;
+
+ switch(quality) {
+ case CRYPT_RND_NORMAL:
+ status = _get_urandom(buf, len);
+ break;
+ case CRYPT_RND_SALT:
+ if (crypt_fips_mode())
+ status = crypt_backend_rng(buf, len, quality, 1);
+ else
+ status = _get_urandom(buf, len);
+ break;
+ case CRYPT_RND_KEY:
+ if (crypt_fips_mode()) {
+ status = crypt_backend_rng(buf, len, quality, 1);
+ break;
+ }
+ rng_type = ctx ? crypt_get_rng_type(ctx) :
+ crypt_random_default_key_rng();
+ switch (rng_type) {
+ case CRYPT_RNG_URANDOM:
+ status = _get_urandom(buf, len);
+ break;
+ case CRYPT_RNG_RANDOM:
+ status = _get_random(ctx, buf, len);
+ break;
+ default:
+ abort();
+ }
+ break;
+ default:
+ log_err(ctx, _("Unknown RNG quality requested."));
+ return -EINVAL;
+ }
+
+ if (status)
+ log_err(ctx, _("Error reading from RNG."));
+
+ return status;
+}
+
+void crypt_random_exit(void)
+{
+ random_initialised = 0;
+
+ if(random_fd != -1) {
+ (void)close(random_fd);
+ random_fd = -1;
+ }
+
+ if(urandom_fd != -1) {
+ (void)close(urandom_fd);
+ urandom_fd = -1;
+ }
+}
+
+int crypt_random_default_key_rng(void)
+{
+ /* coverity[pointless_string_compare] */
+ if (!strcmp(DEFAULT_RNG, RANDOM_DEVICE))
+ return CRYPT_RNG_RANDOM;
+
+ /* coverity[pointless_string_compare] */
+ if (!strcmp(DEFAULT_RNG, URANDOM_DEVICE))
+ return CRYPT_RNG_URANDOM;
+
+ /* RNG misconfiguration is fatal */
+ abort();
+}
diff --git a/lib/setup.c b/lib/setup.c
new file mode 100644
index 0000000..1c9d47d
--- /dev/null
+++ b/lib/setup.c
@@ -0,0 +1,6564 @@
+/*
+ * libcryptsetup - cryptsetup library
+ *
+ * Copyright (C) 2004 Jana Saout <jana@saout.de>
+ * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <sys/utsname.h>
+#include <errno.h>
+
+#include "libcryptsetup.h"
+#include "luks1/luks.h"
+#include "luks2/luks2.h"
+#include "loopaes/loopaes.h"
+#include "verity/verity.h"
+#include "tcrypt/tcrypt.h"
+#include "integrity/integrity.h"
+#include "bitlk/bitlk.h"
+#include "fvault2/fvault2.h"
+#include "utils_device_locking.h"
+#include "internal.h"
+#include "keyslot_context.h"
+
+#define CRYPT_CD_UNRESTRICTED (1 << 0)
+#define CRYPT_CD_QUIET (1 << 1)
+
+struct crypt_device {
+ char *type;
+
+ struct device *device;
+ struct device *metadata_device;
+
+ struct volume_key *volume_key;
+ int rng_type;
+ uint32_t compatibility;
+ struct crypt_pbkdf_type pbkdf;
+
+ /* global context scope settings */
+ unsigned key_in_keyring:1;
+
+ uint64_t data_offset;
+ uint64_t metadata_size; /* Used in LUKS2 format */
+ uint64_t keyslots_size; /* Used in LUKS2 format */
+
+ /* Workaround for OOM during parallel activation (like in systemd) */
+ bool memory_hard_pbkdf_lock_enabled;
+ struct crypt_lock_handle *pbkdf_memory_hard_lock;
+
+ union {
+ struct { /* used in CRYPT_LUKS1 */
+ struct luks_phdr hdr;
+ char *cipher_spec;
+ } luks1;
+ struct { /* used in CRYPT_LUKS2 */
+ struct luks2_hdr hdr;
+ char cipher[MAX_CIPHER_LEN]; /* only for compatibility */
+ char cipher_mode[MAX_CIPHER_LEN]; /* only for compatibility */
+ char *keyslot_cipher;
+ unsigned int keyslot_key_size;
+ struct luks2_reencrypt *rh;
+ } luks2;
+ struct { /* used in CRYPT_PLAIN */
+ struct crypt_params_plain hdr;
+ char *cipher_spec;
+ char *cipher;
+ const char *cipher_mode;
+ unsigned int key_size;
+ } plain;
+ struct { /* used in CRYPT_LOOPAES */
+ struct crypt_params_loopaes hdr;
+ char *cipher_spec;
+ char *cipher;
+ const char *cipher_mode;
+ unsigned int key_size;
+ } loopaes;
+ struct { /* used in CRYPT_VERITY */
+ struct crypt_params_verity hdr;
+ const char *root_hash;
+ unsigned int root_hash_size;
+ char *uuid;
+ struct device *fec_device;
+ } verity;
+ struct { /* used in CRYPT_TCRYPT */
+ struct crypt_params_tcrypt params;
+ struct tcrypt_phdr hdr;
+ } tcrypt;
+ struct { /* used in CRYPT_INTEGRITY */
+ struct crypt_params_integrity params;
+ struct volume_key *journal_mac_key;
+ struct volume_key *journal_crypt_key;
+ uint32_t sb_flags;
+ } integrity;
+ struct { /* used in CRYPT_BITLK */
+ struct bitlk_metadata params;
+ char *cipher_spec;
+ } bitlk;
+ struct { /* used in CRYPT_FVAULT2 */
+ struct fvault2_params params;
+ } fvault2;
+ struct { /* used if initialized without header by name */
+ char *active_name;
+ /* buffers, must refresh from kernel on every query */
+ char cipher_spec[MAX_CIPHER_LEN*2+1];
+ char cipher[MAX_CIPHER_LEN];
+ const char *cipher_mode;
+ unsigned int key_size;
+ } none;
+ } u;
+
+ /* callbacks definitions */
+ void (*log)(int level, const char *msg, void *usrptr);
+ void *log_usrptr;
+ int (*confirm)(const char *msg, void *usrptr);
+ void *confirm_usrptr;
+};
+
+/* Just to suppress redundant messages about crypto backend */
+static int _crypto_logged = 0;
+
+/* Log helper */
+static void (*_default_log)(int level, const char *msg, void *usrptr) = NULL;
+static void *_default_log_usrptr = NULL;
+static int _debug_level = 0;
+
+/* Library can do metadata locking */
+static int _metadata_locking = 1;
+
+/* Library scope detection for kernel keyring support */
+static int _kernel_keyring_supported;
+
+/* Library allowed to use kernel keyring for loading VK in kernel crypto layer */
+static int _vk_via_keyring = 1;
+
+void crypt_set_debug_level(int level)
+{
+ _debug_level = level;
+}
+
+int crypt_get_debug_level(void)
+{
+ return _debug_level;
+}
+
+void crypt_log(struct crypt_device *cd, int level, const char *msg)
+{
+ if (!msg)
+ return;
+
+ if (level < _debug_level)
+ return;
+
+ if (cd && cd->log)
+ cd->log(level, msg, cd->log_usrptr);
+ else if (_default_log)
+ _default_log(level, msg, _default_log_usrptr);
+ /* Default to stdout/stderr if there is no callback. */
+ else
+ fprintf(level == CRYPT_LOG_ERROR ? stderr : stdout, "%s", msg);
+}
+
+__attribute__((format(printf, 3, 4)))
+void crypt_logf(struct crypt_device *cd, int level, const char *format, ...)
+{
+ va_list argp;
+ char target[LOG_MAX_LEN + 2];
+ int len;
+
+ va_start(argp, format);
+
+ len = vsnprintf(&target[0], LOG_MAX_LEN, format, argp);
+ if (len > 0 && len < LOG_MAX_LEN) {
+ /* All verbose and error messages in tools end with EOL. */
+ if (level == CRYPT_LOG_VERBOSE || level == CRYPT_LOG_ERROR ||
+ level == CRYPT_LOG_DEBUG || level == CRYPT_LOG_DEBUG_JSON)
+ strncat(target, "\n", LOG_MAX_LEN);
+
+ crypt_log(cd, level, target);
+ }
+
+ va_end(argp);
+}
+
+static const char *mdata_device_path(struct crypt_device *cd)
+{
+ return device_path(cd->metadata_device ?: cd->device);
+}
+
+static const char *data_device_path(struct crypt_device *cd)
+{
+ return device_path(cd->device);
+}
+
+/* internal only */
+struct device *crypt_metadata_device(struct crypt_device *cd)
+{
+ return cd->metadata_device ?: cd->device;
+}
+
+struct device *crypt_data_device(struct crypt_device *cd)
+{
+ return cd->device;
+}
+
+int init_crypto(struct crypt_device *ctx)
+{
+ struct utsname uts;
+ int r;
+
+ r = crypt_random_init(ctx);
+ if (r < 0) {
+ log_err(ctx, _("Cannot initialize crypto RNG backend."));
+ return r;
+ }
+
+ r = crypt_backend_init(crypt_fips_mode());
+ if (r < 0)
+ log_err(ctx, _("Cannot initialize crypto backend."));
+
+ if (!r && !_crypto_logged) {
+ log_dbg(ctx, "Crypto backend (%s) initialized in cryptsetup library version %s.",
+ crypt_backend_version(), PACKAGE_VERSION);
+ if (!uname(&uts))
+ log_dbg(ctx, "Detected kernel %s %s %s.",
+ uts.sysname, uts.release, uts.machine);
+ _crypto_logged = 1;
+ }
+
+ return r;
+}
+
+static int process_key(struct crypt_device *cd, const char *hash_name,
+ size_t key_size, const char *pass, size_t passLen,
+ struct volume_key **vk)
+{
+ int r;
+
+ if (!key_size)
+ return -EINVAL;
+
+ *vk = crypt_alloc_volume_key(key_size, NULL);
+ if (!*vk)
+ return -ENOMEM;
+
+ if (hash_name) {
+ r = crypt_plain_hash(cd, hash_name, (*vk)->key, key_size, pass, passLen);
+ if (r < 0) {
+ if (r == -ENOENT)
+ log_err(cd, _("Hash algorithm %s not supported."),
+ hash_name);
+ else
+ log_err(cd, _("Key processing error (using hash %s)."),
+ hash_name);
+ crypt_free_volume_key(*vk);
+ *vk = NULL;
+ return -EINVAL;
+ }
+ } else if (passLen > key_size) {
+ memcpy((*vk)->key, pass, key_size);
+ } else {
+ memcpy((*vk)->key, pass, passLen);
+ }
+
+ return 0;
+}
+
+static int isPLAIN(const char *type)
+{
+ return (type && !strcmp(CRYPT_PLAIN, type));
+}
+
+static int isLUKS1(const char *type)
+{
+ return (type && !strcmp(CRYPT_LUKS1, type));
+}
+
+static int isLUKS2(const char *type)
+{
+ return (type && !strcmp(CRYPT_LUKS2, type));
+}
+
+static int isLUKS(const char *type)
+{
+ return (isLUKS2(type) || isLUKS1(type));
+}
+
+static int isLOOPAES(const char *type)
+{
+ return (type && !strcmp(CRYPT_LOOPAES, type));
+}
+
+static int isVERITY(const char *type)
+{
+ return (type && !strcmp(CRYPT_VERITY, type));
+}
+
+static int isTCRYPT(const char *type)
+{
+ return (type && !strcmp(CRYPT_TCRYPT, type));
+}
+
+static int isINTEGRITY(const char *type)
+{
+ return (type && !strcmp(CRYPT_INTEGRITY, type));
+}
+
+static int isBITLK(const char *type)
+{
+ return (type && !strcmp(CRYPT_BITLK, type));
+}
+
+static int isFVAULT2(const char *type)
+{
+ return (type && !strcmp(CRYPT_FVAULT2, type));
+}
+
+static int _onlyLUKS(struct crypt_device *cd, uint32_t cdflags)
+{
+ int r = 0;
+
+ if (cd && !cd->type) {
+ if (!(cdflags & CRYPT_CD_QUIET))
+ log_err(cd, _("Cannot determine device type. Incompatible activation of device?"));
+ r = -EINVAL;
+ }
+
+ if (!cd || !isLUKS(cd->type)) {
+ if (!(cdflags & CRYPT_CD_QUIET))
+ log_err(cd, _("This operation is supported only for LUKS device."));
+ r = -EINVAL;
+ }
+
+ if (r || (cdflags & CRYPT_CD_UNRESTRICTED) || isLUKS1(cd->type))
+ return r;
+
+ return LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, 0, cdflags & CRYPT_CD_QUIET);
+}
+
+static int onlyLUKS(struct crypt_device *cd)
+{
+ return _onlyLUKS(cd, 0);
+}
+
+static int _onlyLUKS2(struct crypt_device *cd, uint32_t cdflags, uint32_t mask)
+{
+ int r = 0;
+
+ if (cd && !cd->type) {
+ if (!(cdflags & CRYPT_CD_QUIET))
+ log_err(cd, _("Cannot determine device type. Incompatible activation of device?"));
+ r = -EINVAL;
+ }
+
+ if (!cd || !isLUKS2(cd->type)) {
+ if (!(cdflags & CRYPT_CD_QUIET))
+ log_err(cd, _("This operation is supported only for LUKS2 device."));
+ r = -EINVAL;
+ }
+
+ if (r || (cdflags & CRYPT_CD_UNRESTRICTED))
+ return r;
+
+ return LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, mask, cdflags & CRYPT_CD_QUIET);
+}
+
+/* Internal only */
+int onlyLUKS2(struct crypt_device *cd)
+{
+ return _onlyLUKS2(cd, 0, 0);
+}
+
+/* Internal only */
+int onlyLUKS2mask(struct crypt_device *cd, uint32_t mask)
+{
+ return _onlyLUKS2(cd, 0, mask);
+}
+
+static void crypt_set_null_type(struct crypt_device *cd)
+{
+ free(cd->type);
+ cd->type = NULL;
+ cd->data_offset = 0;
+ cd->metadata_size = 0;
+ cd->keyslots_size = 0;
+ crypt_safe_memzero(&cd->u, sizeof(cd->u));
+}
+
+static void crypt_reset_null_type(struct crypt_device *cd)
+{
+ if (cd->type)
+ return;
+
+ free(cd->u.none.active_name);
+ cd->u.none.active_name = NULL;
+}
+
+/* keyslot helpers */
+static int keyslot_verify_or_find_empty(struct crypt_device *cd, int *keyslot)
+{
+ crypt_keyslot_info ki;
+
+ if (*keyslot == CRYPT_ANY_SLOT) {
+ if (isLUKS1(cd->type))
+ *keyslot = LUKS_keyslot_find_empty(&cd->u.luks1.hdr);
+ else
+ *keyslot = LUKS2_keyslot_find_empty(cd, &cd->u.luks2.hdr, 0);
+ if (*keyslot < 0) {
+ log_err(cd, _("All key slots full."));
+ return -EINVAL;
+ }
+ }
+
+ if (isLUKS1(cd->type))
+ ki = LUKS_keyslot_info(&cd->u.luks1.hdr, *keyslot);
+ else
+ ki = LUKS2_keyslot_info(&cd->u.luks2.hdr, *keyslot);
+ switch (ki) {
+ case CRYPT_SLOT_INVALID:
+ log_err(cd, _("Key slot %d is invalid, please select between 0 and %d."),
+ *keyslot, crypt_keyslot_max(cd->type) - 1);
+ return -EINVAL;
+ case CRYPT_SLOT_INACTIVE:
+ break;
+ default:
+ log_err(cd, _("Key slot %d is full, please select another one."),
+ *keyslot);
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Selected keyslot %d.", *keyslot);
+ return 0;
+}
+
+/*
+ * compares UUIDs returned by device-mapper (striped by cryptsetup) and uuid in header
+ */
+int crypt_uuid_cmp(const char *dm_uuid, const char *hdr_uuid)
+{
+ int i, j;
+ char *str;
+
+ if (!dm_uuid || !hdr_uuid)
+ return -EINVAL;
+
+ str = strchr(dm_uuid, '-');
+ if (!str)
+ return -EINVAL;
+
+ for (i = 0, j = 1; hdr_uuid[i]; i++) {
+ if (hdr_uuid[i] == '-')
+ continue;
+
+ if (!str[j] || str[j] == '-')
+ return -EINVAL;
+
+ if (str[j] != hdr_uuid[i])
+ return -EINVAL;
+ j++;
+ }
+
+ return 0;
+}
+
+/*
+ * compares type of active device to provided string (only if there is no explicit type)
+ */
+static int crypt_uuid_type_cmp(struct crypt_device *cd, const char *type)
+{
+ struct crypt_dm_active_device dmd;
+ size_t len;
+ int r;
+
+ /* Must use header-on-disk if we know the type here */
+ if (cd->type || !cd->u.none.active_name)
+ return -EINVAL;
+
+ log_dbg(cd, "Checking if active device %s without header has UUID type %s.",
+ cd->u.none.active_name, type);
+
+ r = dm_query_device(cd, cd->u.none.active_name, DM_ACTIVE_UUID, &dmd);
+ if (r < 0)
+ return r;
+
+ r = -ENODEV;
+ len = strlen(type);
+ if (dmd.uuid && strlen(dmd.uuid) > len &&
+ !strncmp(dmd.uuid, type, len) && dmd.uuid[len] == '-')
+ r = 0;
+
+ free(CONST_CAST(void*)dmd.uuid);
+ return r;
+}
+
+int PLAIN_activate(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vk,
+ uint64_t size,
+ uint32_t flags)
+{
+ int r;
+ struct crypt_dm_active_device dmd = {
+ .flags = flags,
+ .size = size,
+ };
+
+ log_dbg(cd, "Trying to activate PLAIN device %s using cipher %s.",
+ name, crypt_get_cipher_spec(cd));
+
+ if (MISALIGNED(size, device_block_size(cd, crypt_data_device(cd)) >> SECTOR_SHIFT)) {
+ log_err(cd, _("Device size is not aligned to device logical block size."));
+ return -EINVAL;
+ }
+
+ r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
+ vk, crypt_get_cipher_spec(cd), crypt_get_iv_offset(cd),
+ crypt_get_data_offset(cd), crypt_get_integrity(cd),
+ crypt_get_integrity_tag_size(cd), crypt_get_sector_size(cd));
+ if (r < 0)
+ return r;
+
+ r = create_or_reload_device(cd, name, CRYPT_PLAIN, &dmd);
+
+ dm_targets_free(cd, &dmd);
+ return r;
+}
+
+int crypt_confirm(struct crypt_device *cd, const char *msg)
+{
+ if (!cd || !cd->confirm)
+ return 1;
+ else
+ return cd->confirm(msg, cd->confirm_usrptr);
+}
+
+void crypt_set_log_callback(struct crypt_device *cd,
+ void (*log)(int level, const char *msg, void *usrptr),
+ void *usrptr)
+{
+ if (!cd) {
+ _default_log = log;
+ _default_log_usrptr = usrptr;
+ } else {
+ cd->log = log;
+ cd->log_usrptr = usrptr;
+ }
+}
+
+void crypt_set_confirm_callback(struct crypt_device *cd,
+ int (*confirm)(const char *msg, void *usrptr),
+ void *usrptr)
+{
+ if (cd) {
+ cd->confirm = confirm;
+ cd->confirm_usrptr = usrptr;
+ }
+}
+
+const char *crypt_get_dir(void)
+{
+ return dm_get_dir();
+}
+
+int crypt_init(struct crypt_device **cd, const char *device)
+{
+ struct crypt_device *h = NULL;
+ int r;
+
+ if (!cd)
+ return -EINVAL;
+
+ log_dbg(NULL, "Allocating context for crypt device %s.", device ?: "(none)");
+#if !HAVE_DECL_O_CLOEXEC
+ log_dbg(NULL, "Running without O_CLOEXEC.");
+#endif
+
+ if (!(h = malloc(sizeof(struct crypt_device))))
+ return -ENOMEM;
+
+ memset(h, 0, sizeof(*h));
+
+ r = device_alloc(NULL, &h->device, device);
+ if (r < 0) {
+ free(h);
+ return r;
+ }
+
+ dm_backend_init(NULL);
+
+ h->rng_type = crypt_random_default_key_rng();
+
+ *cd = h;
+ return 0;
+}
+
+static int crypt_check_data_device_size(struct crypt_device *cd)
+{
+ int r;
+ uint64_t size, size_min;
+
+ /* Check data device size, require at least header or one sector */
+ size_min = crypt_get_data_offset(cd) << SECTOR_SHIFT ?: SECTOR_SIZE;
+
+ r = device_size(cd->device, &size);
+ if (r < 0)
+ return r;
+
+ if (size < size_min) {
+ log_err(cd, _("Header detected but device %s is too small."),
+ device_path(cd->device));
+ return -EINVAL;
+ }
+
+ return r;
+}
+
+static int _crypt_set_data_device(struct crypt_device *cd, const char *device)
+{
+ struct device *dev = NULL;
+ int r;
+
+ r = device_alloc(cd, &dev, device);
+ if (r < 0)
+ return r;
+
+ if (!cd->metadata_device) {
+ cd->metadata_device = cd->device;
+ } else
+ device_free(cd, cd->device);
+
+ cd->device = dev;
+
+ r = crypt_check_data_device_size(cd);
+ if (!r && isLUKS2(cd->type))
+ device_set_block_size(crypt_data_device(cd), LUKS2_get_sector_size(&cd->u.luks2.hdr));
+
+ return r;
+}
+
+int crypt_set_data_device(struct crypt_device *cd, const char *device)
+{
+ /* metadata device must be set */
+ if (!cd || !cd->device || !device)
+ return -EINVAL;
+
+ log_dbg(cd, "Setting ciphertext data device to %s.", device ?: "(none)");
+
+ if (!isLUKS1(cd->type) && !isLUKS2(cd->type) && !isVERITY(cd->type) &&
+ !isINTEGRITY(cd->type) && !isTCRYPT(cd->type)) {
+ log_err(cd, _("This operation is not supported for this device type."));
+ return -EINVAL;
+ }
+
+ if (isLUKS2(cd->type) && crypt_get_luks2_reencrypt(cd)) {
+ log_err(cd, _("Illegal operation with reencryption in-progress."));
+ return -EINVAL;
+ }
+
+ return _crypt_set_data_device(cd, device);
+}
+
+int crypt_init_data_device(struct crypt_device **cd, const char *device, const char *data_device)
+{
+ int r;
+
+ if (!cd)
+ return -EINVAL;
+
+ r = crypt_init(cd, device);
+ if (r || !data_device || !strcmp(device, data_device))
+ return r;
+
+ log_dbg(NULL, "Setting ciphertext data device to %s.", data_device);
+ r = _crypt_set_data_device(*cd, data_device);
+ if (r) {
+ crypt_free(*cd);
+ *cd = NULL;
+ }
+
+ return r;
+}
+
+static void crypt_free_type(struct crypt_device *cd, const char *force_type)
+{
+ const char *type = force_type ?: cd->type;
+
+ if (isPLAIN(type)) {
+ free(CONST_CAST(void*)cd->u.plain.hdr.hash);
+ free(cd->u.plain.cipher);
+ free(cd->u.plain.cipher_spec);
+ } else if (isLUKS2(type)) {
+ LUKS2_reencrypt_free(cd, cd->u.luks2.rh);
+ LUKS2_hdr_free(cd, &cd->u.luks2.hdr);
+ free(cd->u.luks2.keyslot_cipher);
+ } else if (isLUKS1(type)) {
+ free(cd->u.luks1.cipher_spec);
+ } else if (isLOOPAES(type)) {
+ free(CONST_CAST(void*)cd->u.loopaes.hdr.hash);
+ free(cd->u.loopaes.cipher);
+ free(cd->u.loopaes.cipher_spec);
+ } else if (isVERITY(type)) {
+ free(CONST_CAST(void*)cd->u.verity.hdr.hash_name);
+ free(CONST_CAST(void*)cd->u.verity.hdr.data_device);
+ free(CONST_CAST(void*)cd->u.verity.hdr.hash_device);
+ free(CONST_CAST(void*)cd->u.verity.hdr.fec_device);
+ free(CONST_CAST(void*)cd->u.verity.hdr.salt);
+ free(CONST_CAST(void*)cd->u.verity.root_hash);
+ free(cd->u.verity.uuid);
+ device_free(cd, cd->u.verity.fec_device);
+ } else if (isINTEGRITY(type)) {
+ free(CONST_CAST(void*)cd->u.integrity.params.integrity);
+ free(CONST_CAST(void*)cd->u.integrity.params.journal_integrity);
+ free(CONST_CAST(void*)cd->u.integrity.params.journal_crypt);
+ crypt_free_volume_key(cd->u.integrity.journal_crypt_key);
+ crypt_free_volume_key(cd->u.integrity.journal_mac_key);
+ } else if (isBITLK(type)) {
+ free(cd->u.bitlk.cipher_spec);
+ BITLK_bitlk_metadata_free(&cd->u.bitlk.params);
+ } else if (!type) {
+ free(cd->u.none.active_name);
+ cd->u.none.active_name = NULL;
+ }
+
+ crypt_set_null_type(cd);
+}
+
+/* internal only */
+struct crypt_pbkdf_type *crypt_get_pbkdf(struct crypt_device *cd)
+{
+ return &cd->pbkdf;
+}
+
+/*
+ * crypt_load() helpers
+ */
+static int _crypt_load_luks2(struct crypt_device *cd, int reload, int repair)
+{
+ int r;
+ char *type = NULL;
+ struct luks2_hdr hdr2 = {};
+
+ log_dbg(cd, "%soading LUKS2 header (repair %sabled).", reload ? "Rel" : "L", repair ? "en" : "dis");
+
+ r = LUKS2_hdr_read(cd, &hdr2, repair);
+ if (r)
+ return r;
+
+ if (!reload && !(type = strdup(CRYPT_LUKS2))) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ if (verify_pbkdf_params(cd, &cd->pbkdf)) {
+ r = init_pbkdf_type(cd, NULL, CRYPT_LUKS2);
+ if (r)
+ goto out;
+ }
+
+ if (reload) {
+ LUKS2_hdr_free(cd, &cd->u.luks2.hdr);
+ free(cd->u.luks2.keyslot_cipher);
+ } else
+ cd->type = type;
+
+ r = 0;
+ memcpy(&cd->u.luks2.hdr, &hdr2, sizeof(hdr2));
+ cd->u.luks2.keyslot_cipher = NULL;
+ cd->u.luks2.rh = NULL;
+
+out:
+ if (r) {
+ free(type);
+ LUKS2_hdr_free(cd, &hdr2);
+ }
+ return r;
+}
+
+static void _luks2_rollback(struct crypt_device *cd)
+{
+ if (!cd || !isLUKS2(cd->type))
+ return;
+
+ if (LUKS2_hdr_rollback(cd, &cd->u.luks2.hdr)) {
+ log_err(cd, _("Failed to rollback LUKS2 metadata in memory."));
+ return;
+ }
+
+ free(cd->u.luks2.keyslot_cipher);
+ cd->u.luks2.keyslot_cipher = NULL;
+}
+
+static int _crypt_load_luks(struct crypt_device *cd, const char *requested_type,
+ bool quiet, bool repair)
+{
+ char *cipher_spec;
+ struct luks_phdr hdr = {};
+ int r, version;
+
+ r = init_crypto(cd);
+ if (r < 0)
+ return r;
+
+ /* This will return 0 if primary LUKS2 header is damaged */
+ version = LUKS2_hdr_version_unlocked(cd, NULL);
+
+ if ((isLUKS1(requested_type) && version == 2) ||
+ (isLUKS2(requested_type) && version == 1))
+ return -EINVAL;
+
+ if (requested_type)
+ version = 0;
+
+ if (isLUKS1(requested_type) || version == 1) {
+ if (isLUKS2(cd->type)) {
+ log_dbg(cd, "Context is already initialized to type %s", cd->type);
+ return -EINVAL;
+ }
+
+ if (verify_pbkdf_params(cd, &cd->pbkdf)) {
+ r = init_pbkdf_type(cd, NULL, CRYPT_LUKS1);
+ if (r)
+ return r;
+ }
+
+ r = LUKS_read_phdr(&hdr, !quiet, repair, cd);
+ if (r)
+ goto out;
+
+ if (!cd->type && !(cd->type = strdup(CRYPT_LUKS1))) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ /* Set hash to the same as in the loaded header */
+ if (!cd->pbkdf.hash || strcmp(cd->pbkdf.hash, hdr.hashSpec)) {
+ free(CONST_CAST(void*)cd->pbkdf.hash);
+ cd->pbkdf.hash = strdup(hdr.hashSpec);
+ if (!cd->pbkdf.hash) {
+ r = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (asprintf(&cipher_spec, "%s-%s", hdr.cipherName, hdr.cipherMode) < 0) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ free(cd->u.luks1.cipher_spec);
+ cd->u.luks1.cipher_spec = cipher_spec;
+
+ memcpy(&cd->u.luks1.hdr, &hdr, sizeof(hdr));
+ } else if (isLUKS2(requested_type) || version == 2 || version == 0) {
+ if (isLUKS1(cd->type)) {
+ log_dbg(cd, "Context is already initialized to type %s", cd->type);
+ return -EINVAL;
+ }
+
+ /*
+ * Current LUKS2 repair just overrides blkid probes
+ * and perform auto-recovery if possible. This is safe
+ * unless future LUKS2 repair code do something more
+ * sophisticated. In such case we would need to check
+ * for LUKS2 requirements and decide if it's safe to
+ * perform repair.
+ */
+ r = _crypt_load_luks2(cd, cd->type != NULL, repair);
+ if (!r)
+ device_set_block_size(crypt_data_device(cd), LUKS2_get_sector_size(&cd->u.luks2.hdr));
+ else if (!quiet)
+ log_err(cd, _("Device %s is not a valid LUKS device."), mdata_device_path(cd));
+ } else {
+ if (version > 2)
+ log_err(cd, _("Unsupported LUKS version %d."), version);
+ r = -EINVAL;
+ }
+out:
+ crypt_safe_memzero(&hdr, sizeof(hdr));
+
+ return r;
+}
+
+static int _crypt_load_tcrypt(struct crypt_device *cd, struct crypt_params_tcrypt *params)
+{
+ int r;
+
+ if (!params)
+ return -EINVAL;
+
+ r = init_crypto(cd);
+ if (r < 0)
+ return r;
+
+ memcpy(&cd->u.tcrypt.params, params, sizeof(*params));
+
+ r = TCRYPT_read_phdr(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
+
+ cd->u.tcrypt.params.passphrase = NULL;
+ cd->u.tcrypt.params.passphrase_size = 0;
+ cd->u.tcrypt.params.keyfiles = NULL;
+ cd->u.tcrypt.params.keyfiles_count = 0;
+ cd->u.tcrypt.params.veracrypt_pim = 0;
+
+ if (r < 0)
+ goto out;
+
+ if (!cd->type && !(cd->type = strdup(CRYPT_TCRYPT)))
+ r = -ENOMEM;
+out:
+ if (r < 0)
+ crypt_free_type(cd, CRYPT_TCRYPT);
+ return r;
+}
+
+static int _crypt_load_verity(struct crypt_device *cd, struct crypt_params_verity *params)
+{
+ int r;
+ uint64_t sb_offset = 0;
+
+ r = init_crypto(cd);
+ if (r < 0)
+ return r;
+
+ if (params && params->flags & CRYPT_VERITY_NO_HEADER)
+ return -EINVAL;
+
+ if (params)
+ sb_offset = params->hash_area_offset;
+
+ r = VERITY_read_sb(cd, sb_offset, &cd->u.verity.uuid, &cd->u.verity.hdr);
+ if (r < 0)
+ goto out;
+
+ if (!cd->type && !(cd->type = strdup(CRYPT_VERITY))) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ if (params)
+ cd->u.verity.hdr.flags = params->flags;
+
+ /* Hash availability checked in sb load */
+ cd->u.verity.root_hash_size = crypt_hash_size(cd->u.verity.hdr.hash_name);
+ if (cd->u.verity.root_hash_size > 4096) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (params && params->data_device &&
+ (r = crypt_set_data_device(cd, params->data_device)) < 0)
+ goto out;
+
+ if (params && params->fec_device) {
+ r = device_alloc(cd, &cd->u.verity.fec_device, params->fec_device);
+ if (r < 0)
+ goto out;
+ cd->u.verity.hdr.fec_area_offset = params->fec_area_offset;
+ cd->u.verity.hdr.fec_roots = params->fec_roots;
+ }
+out:
+ if (r < 0)
+ crypt_free_type(cd, CRYPT_VERITY);
+ return r;
+}
+
+static int _crypt_load_integrity(struct crypt_device *cd,
+ struct crypt_params_integrity *params)
+{
+ int r;
+
+ r = init_crypto(cd);
+ if (r < 0)
+ return r;
+
+ r = INTEGRITY_read_sb(cd, &cd->u.integrity.params, &cd->u.integrity.sb_flags);
+ if (r < 0)
+ goto out;
+
+ // FIXME: add checks for fields in integrity sb vs params
+
+ r = -ENOMEM;
+ if (params) {
+ cd->u.integrity.params.journal_watermark = params->journal_watermark;
+ cd->u.integrity.params.journal_commit_time = params->journal_commit_time;
+ cd->u.integrity.params.buffer_sectors = params->buffer_sectors;
+ if (params->integrity &&
+ !(cd->u.integrity.params.integrity = strdup(params->integrity)))
+ goto out;
+ cd->u.integrity.params.integrity_key_size = params->integrity_key_size;
+ if (params->journal_integrity &&
+ !(cd->u.integrity.params.journal_integrity = strdup(params->journal_integrity)))
+ goto out;
+ if (params->journal_crypt &&
+ !(cd->u.integrity.params.journal_crypt = strdup(params->journal_crypt)))
+ goto out;
+
+ if (params->journal_crypt_key) {
+ cd->u.integrity.journal_crypt_key =
+ crypt_alloc_volume_key(params->journal_crypt_key_size,
+ params->journal_crypt_key);
+ if (!cd->u.integrity.journal_crypt_key)
+ goto out;
+ }
+ if (params->journal_integrity_key) {
+ cd->u.integrity.journal_mac_key =
+ crypt_alloc_volume_key(params->journal_integrity_key_size,
+ params->journal_integrity_key);
+ if (!cd->u.integrity.journal_mac_key)
+ goto out;
+ }
+ }
+
+ if (!cd->type && !(cd->type = strdup(CRYPT_INTEGRITY)))
+ goto out;
+ r = 0;
+out:
+ if (r < 0)
+ crypt_free_type(cd, CRYPT_INTEGRITY);
+ return r;
+}
+
+static int _crypt_load_bitlk(struct crypt_device *cd)
+{
+ int r;
+
+ r = init_crypto(cd);
+ if (r < 0)
+ return r;
+
+ r = BITLK_read_sb(cd, &cd->u.bitlk.params);
+ if (r < 0)
+ goto out;
+
+ if (asprintf(&cd->u.bitlk.cipher_spec, "%s-%s",
+ cd->u.bitlk.params.cipher, cd->u.bitlk.params.cipher_mode) < 0) {
+ cd->u.bitlk.cipher_spec = NULL;
+ r = -ENOMEM;
+ goto out;
+ }
+
+ if (!cd->type && !(cd->type = strdup(CRYPT_BITLK))) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ device_set_block_size(crypt_data_device(cd), cd->u.bitlk.params.sector_size);
+out:
+ if (r < 0)
+ crypt_free_type(cd, CRYPT_BITLK);
+ return r;
+}
+
+static int _crypt_load_fvault2(struct crypt_device *cd)
+{
+ int r;
+
+ r = init_crypto(cd);
+ if (r < 0)
+ return r;
+
+ r = FVAULT2_read_metadata(cd, &cd->u.fvault2.params);
+ if (r < 0)
+ goto out;
+
+ if (!cd->type && !(cd->type = strdup(CRYPT_FVAULT2)))
+ r = -ENOMEM;
+out:
+ if (r < 0)
+ crypt_free_type(cd, CRYPT_FVAULT2);
+ return r;
+}
+
+int crypt_load(struct crypt_device *cd,
+ const char *requested_type,
+ void *params)
+{
+ int r;
+
+ if (!cd)
+ return -EINVAL;
+
+ log_dbg(cd, "Trying to load %s crypt type from device %s.",
+ requested_type ?: "any", mdata_device_path(cd) ?: "(none)");
+
+ if (!crypt_metadata_device(cd))
+ return -EINVAL;
+
+ crypt_reset_null_type(cd);
+ cd->data_offset = 0;
+ cd->metadata_size = 0;
+ cd->keyslots_size = 0;
+
+ if (!requested_type || isLUKS1(requested_type) || isLUKS2(requested_type)) {
+ if (cd->type && !isLUKS1(cd->type) && !isLUKS2(cd->type)) {
+ log_dbg(cd, "Context is already initialized to type %s", cd->type);
+ return -EINVAL;
+ }
+
+ r = _crypt_load_luks(cd, requested_type, true, false);
+ } else if (isVERITY(requested_type)) {
+ if (cd->type && !isVERITY(cd->type)) {
+ log_dbg(cd, "Context is already initialized to type %s", cd->type);
+ return -EINVAL;
+ }
+ r = _crypt_load_verity(cd, params);
+ } else if (isTCRYPT(requested_type)) {
+ if (cd->type && !isTCRYPT(cd->type)) {
+ log_dbg(cd, "Context is already initialized to type %s", cd->type);
+ return -EINVAL;
+ }
+ r = _crypt_load_tcrypt(cd, params);
+ } else if (isINTEGRITY(requested_type)) {
+ if (cd->type && !isINTEGRITY(cd->type)) {
+ log_dbg(cd, "Context is already initialized to type %s", cd->type);
+ return -EINVAL;
+ }
+ r = _crypt_load_integrity(cd, params);
+ } else if (isBITLK(requested_type)) {
+ if (cd->type && !isBITLK(cd->type)) {
+ log_dbg(cd, "Context is already initialized to type %s", cd->type);
+ return -EINVAL;
+ }
+ r = _crypt_load_bitlk(cd);
+ } else if (isFVAULT2(requested_type)) {
+ if (cd->type && !isFVAULT2(cd->type)) {
+ log_dbg(cd, "Context is already initialized to type %s", cd->type);
+ return -EINVAL;
+ }
+ r = _crypt_load_fvault2(cd);
+ } else
+ return -EINVAL;
+
+ return r;
+}
+
+/*
+ * crypt_init() helpers
+ */
+static int _init_by_name_crypt_none(struct crypt_device *cd)
+{
+ int r;
+ char _mode[MAX_CIPHER_LEN];
+ struct crypt_dm_active_device dmd;
+ struct dm_target *tgt = &dmd.segment;
+
+ if (cd->type || !cd->u.none.active_name)
+ return -EINVAL;
+
+ r = dm_query_device(cd, cd->u.none.active_name,
+ DM_ACTIVE_CRYPT_CIPHER |
+ DM_ACTIVE_CRYPT_KEYSIZE, &dmd);
+ if (r < 0)
+ return r;
+ if (!single_segment(&dmd) || tgt->type != DM_CRYPT)
+ r = -EINVAL;
+ if (r >= 0)
+ r = crypt_parse_name_and_mode(tgt->u.crypt.cipher,
+ cd->u.none.cipher, NULL,
+ _mode);
+
+ if (!r) {
+ r = snprintf(cd->u.none.cipher_spec, sizeof(cd->u.none.cipher_spec),
+ "%s-%s", cd->u.none.cipher, _mode);
+ if (r < 0 || (size_t)r >= sizeof(cd->u.none.cipher_spec))
+ r = -EINVAL;
+ else {
+ cd->u.none.cipher_mode = cd->u.none.cipher_spec + strlen(cd->u.none.cipher) + 1;
+ cd->u.none.key_size = tgt->u.crypt.vk->keylength;
+ r = 0;
+ }
+ }
+
+ dm_targets_free(cd, &dmd);
+ return r;
+}
+
+static const char *LUKS_UUID(struct crypt_device *cd)
+{
+ if (!cd)
+ return NULL;
+ else if (isLUKS1(cd->type))
+ return cd->u.luks1.hdr.uuid;
+ else if (isLUKS2(cd->type))
+ return cd->u.luks2.hdr.uuid;
+
+ return NULL;
+}
+
+static int _init_by_name_crypt(struct crypt_device *cd, const char *name)
+{
+ bool found = false;
+ char **dep, *cipher_spec = NULL, cipher[MAX_CIPHER_LEN], cipher_mode[MAX_CIPHER_LEN];
+ char deps_uuid_prefix[40], *deps[MAX_DM_DEPS+1] = {};
+ const char *dev, *namei;
+ int key_nums, r;
+ struct crypt_dm_active_device dmd, dmdi = {}, dmdep = {};
+ struct dm_target *tgt = &dmd.segment, *tgti = &dmdi.segment;
+
+ r = dm_query_device(cd, name,
+ DM_ACTIVE_DEVICE |
+ DM_ACTIVE_UUID |
+ DM_ACTIVE_CRYPT_CIPHER |
+ DM_ACTIVE_CRYPT_KEYSIZE, &dmd);
+ if (r < 0)
+ return r;
+
+ if (tgt->type != DM_CRYPT && tgt->type != DM_LINEAR) {
+ log_dbg(cd, "Unsupported device table detected in %s.", name);
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = -EINVAL;
+
+ if (dmd.uuid) {
+ r = snprintf(deps_uuid_prefix, sizeof(deps_uuid_prefix), CRYPT_SUBDEV "-%.32s", dmd.uuid + 6);
+ if (r < 0 || (size_t)r != (sizeof(deps_uuid_prefix) - 1))
+ r = -EINVAL;
+ }
+
+ if (r >= 0) {
+ r = dm_device_deps(cd, name, deps_uuid_prefix, deps, ARRAY_SIZE(deps));
+ if (r)
+ goto out;
+ }
+
+ r = crypt_parse_name_and_mode(tgt->type == DM_LINEAR ? "null" : tgt->u.crypt.cipher, cipher,
+ &key_nums, cipher_mode);
+ if (r < 0) {
+ log_dbg(cd, "Cannot parse cipher and mode from active device.");
+ goto out;
+ }
+
+ dep = deps;
+
+ if (tgt->type == DM_CRYPT && tgt->u.crypt.integrity && (namei = device_dm_name(tgt->data_device))) {
+ r = dm_query_device(cd, namei, DM_ACTIVE_DEVICE, &dmdi);
+ if (r < 0)
+ goto out;
+ if (!single_segment(&dmdi) || tgti->type != DM_INTEGRITY) {
+ log_dbg(cd, "Unsupported device table detected in %s.", namei);
+ r = -EINVAL;
+ goto out;
+ }
+ if (!cd->metadata_device) {
+ device_free(cd, cd->device);
+ MOVE_REF(cd->device, tgti->data_device);
+ }
+ }
+
+ /* do not try to lookup LUKS2 header in detached header mode */
+ if (dmd.uuid && !cd->metadata_device && !found) {
+ while (*dep && !found) {
+ r = dm_query_device(cd, *dep, DM_ACTIVE_DEVICE, &dmdep);
+ if (r < 0)
+ goto out;
+
+ tgt = &dmdep.segment;
+
+ while (tgt && !found) {
+ dev = device_path(tgt->data_device);
+ if (!dev) {
+ tgt = tgt->next;
+ continue;
+ }
+ if (!strstr(dev, dm_get_dir()) ||
+ !crypt_string_in(dev + strlen(dm_get_dir()) + 1, deps, ARRAY_SIZE(deps))) {
+ device_free(cd, cd->device);
+ MOVE_REF(cd->device, tgt->data_device);
+ found = true;
+ }
+ tgt = tgt->next;
+ }
+ dep++;
+ dm_targets_free(cd, &dmdep);
+ }
+ }
+
+ if (asprintf(&cipher_spec, "%s-%s", cipher, cipher_mode) < 0) {
+ cipher_spec = NULL;
+ r = -ENOMEM;
+ goto out;
+ }
+
+ tgt = &dmd.segment;
+ r = 0;
+
+ if (isPLAIN(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) {
+ cd->u.plain.hdr.hash = NULL; /* no way to get this */
+ cd->u.plain.hdr.offset = tgt->u.crypt.offset;
+ cd->u.plain.hdr.skip = tgt->u.crypt.iv_offset;
+ cd->u.plain.hdr.sector_size = tgt->u.crypt.sector_size;
+ cd->u.plain.key_size = tgt->u.crypt.vk->keylength;
+ cd->u.plain.cipher = strdup(cipher);
+ MOVE_REF(cd->u.plain.cipher_spec, cipher_spec);
+ cd->u.plain.cipher_mode = cd->u.plain.cipher_spec + strlen(cipher) + 1;
+ } else if (isLOOPAES(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) {
+ cd->u.loopaes.hdr.offset = tgt->u.crypt.offset;
+ cd->u.loopaes.cipher = strdup(cipher);
+ MOVE_REF(cd->u.loopaes.cipher_spec, cipher_spec);
+ cd->u.loopaes.cipher_mode = cd->u.loopaes.cipher_spec + strlen(cipher) + 1;
+ /* version 3 uses last key for IV */
+ if (tgt->u.crypt.vk->keylength % key_nums)
+ key_nums++;
+ cd->u.loopaes.key_size = tgt->u.crypt.vk->keylength / key_nums;
+ } else if (isLUKS1(cd->type) || isLUKS2(cd->type)) {
+ if (crypt_metadata_device(cd)) {
+ r = _crypt_load_luks(cd, cd->type, true, false);
+ if (r < 0) {
+ log_dbg(cd, "LUKS device header does not match active device.");
+ crypt_set_null_type(cd);
+ device_close(cd, cd->metadata_device);
+ device_close(cd, cd->device);
+ r = 0;
+ goto out;
+ }
+ /* check whether UUIDs match each other */
+ r = crypt_uuid_cmp(dmd.uuid, LUKS_UUID(cd));
+ if (r < 0) {
+ log_dbg(cd, "LUKS device header uuid: %s mismatches DM returned uuid %s",
+ LUKS_UUID(cd), dmd.uuid);
+ crypt_free_type(cd, NULL);
+ r = 0;
+ goto out;
+ }
+ } else {
+ log_dbg(cd, "LUKS device header not available.");
+ crypt_set_null_type(cd);
+ r = 0;
+ }
+ } else if (isTCRYPT(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) {
+ r = TCRYPT_init_by_name(cd, name, dmd.uuid, tgt, &cd->device,
+ &cd->u.tcrypt.params, &cd->u.tcrypt.hdr);
+ } else if (isBITLK(cd->type)) {
+ r = _crypt_load_bitlk(cd);
+ if (r < 0) {
+ log_dbg(cd, "BITLK device header not available.");
+ crypt_set_null_type(cd);
+ r = 0;
+ }
+ } else if (isFVAULT2(cd->type)) {
+ r = _crypt_load_fvault2(cd);
+ if (r < 0) {
+ log_dbg(cd, "FVAULT2 device header not available.");
+ crypt_set_null_type(cd);
+ r = 0;
+ }
+ }
+out:
+ dm_targets_free(cd, &dmd);
+ dm_targets_free(cd, &dmdi);
+ dm_targets_free(cd, &dmdep);
+ free(CONST_CAST(void*)dmd.uuid);
+ free(cipher_spec);
+ dep = deps;
+ while (*dep)
+ free(*dep++);
+ return r;
+}
+
+static int _init_by_name_verity(struct crypt_device *cd, const char *name)
+{
+ struct crypt_dm_active_device dmd;
+ struct dm_target *tgt = &dmd.segment;
+ int r;
+
+ r = dm_query_device(cd, name,
+ DM_ACTIVE_DEVICE |
+ DM_ACTIVE_VERITY_HASH_DEVICE |
+ DM_ACTIVE_VERITY_ROOT_HASH |
+ DM_ACTIVE_VERITY_PARAMS, &dmd);
+ if (r < 0)
+ return r;
+ if (!single_segment(&dmd) || tgt->type != DM_VERITY) {
+ log_dbg(cd, "Unsupported device table detected in %s.", name);
+ r = -EINVAL;
+ goto out;
+ }
+ if (r > 0)
+ r = 0;
+
+ if (isVERITY(cd->type)) {
+ cd->u.verity.uuid = NULL; // FIXME
+ cd->u.verity.hdr.flags = CRYPT_VERITY_NO_HEADER; //FIXME
+ cd->u.verity.hdr.data_size = tgt->u.verity.vp->data_size;
+ cd->u.verity.root_hash_size = tgt->u.verity.root_hash_size;
+ MOVE_REF(cd->u.verity.hdr.hash_name, tgt->u.verity.vp->hash_name);
+ cd->u.verity.hdr.data_device = NULL;
+ cd->u.verity.hdr.hash_device = NULL;
+ cd->u.verity.hdr.data_block_size = tgt->u.verity.vp->data_block_size;
+ cd->u.verity.hdr.hash_block_size = tgt->u.verity.vp->hash_block_size;
+ cd->u.verity.hdr.hash_area_offset = tgt->u.verity.hash_offset;
+ cd->u.verity.hdr.fec_area_offset = tgt->u.verity.fec_offset;
+ cd->u.verity.hdr.hash_type = tgt->u.verity.vp->hash_type;
+ cd->u.verity.hdr.flags = tgt->u.verity.vp->flags;
+ cd->u.verity.hdr.salt_size = tgt->u.verity.vp->salt_size;
+ MOVE_REF(cd->u.verity.hdr.salt, tgt->u.verity.vp->salt);
+ MOVE_REF(cd->u.verity.hdr.fec_device, tgt->u.verity.vp->fec_device);
+ cd->u.verity.hdr.fec_roots = tgt->u.verity.vp->fec_roots;
+ MOVE_REF(cd->u.verity.fec_device, tgt->u.verity.fec_device);
+ MOVE_REF(cd->metadata_device, tgt->u.verity.hash_device);
+ MOVE_REF(cd->u.verity.root_hash, tgt->u.verity.root_hash);
+ }
+out:
+ dm_targets_free(cd, &dmd);
+ return r;
+}
+
+static int _init_by_name_integrity(struct crypt_device *cd, const char *name)
+{
+ struct crypt_dm_active_device dmd;
+ struct dm_target *tgt = &dmd.segment;
+ int r;
+
+ r = dm_query_device(cd, name, DM_ACTIVE_DEVICE |
+ DM_ACTIVE_CRYPT_KEY |
+ DM_ACTIVE_CRYPT_KEYSIZE |
+ DM_ACTIVE_INTEGRITY_PARAMS, &dmd);
+ if (r < 0)
+ return r;
+ if (!single_segment(&dmd) || tgt->type != DM_INTEGRITY) {
+ log_dbg(cd, "Unsupported device table detected in %s.", name);
+ r = -EINVAL;
+ goto out;
+ }
+ if (r > 0)
+ r = 0;
+
+ if (isINTEGRITY(cd->type)) {
+ cd->u.integrity.params.tag_size = tgt->u.integrity.tag_size;
+ cd->u.integrity.params.sector_size = tgt->u.integrity.sector_size;
+ cd->u.integrity.params.journal_size = tgt->u.integrity.journal_size;
+ cd->u.integrity.params.journal_watermark = tgt->u.integrity.journal_watermark;
+ cd->u.integrity.params.journal_commit_time = tgt->u.integrity.journal_commit_time;
+ cd->u.integrity.params.interleave_sectors = tgt->u.integrity.interleave_sectors;
+ cd->u.integrity.params.buffer_sectors = tgt->u.integrity.buffer_sectors;
+ MOVE_REF(cd->u.integrity.params.integrity, tgt->u.integrity.integrity);
+ MOVE_REF(cd->u.integrity.params.journal_integrity, tgt->u.integrity.journal_integrity);
+ MOVE_REF(cd->u.integrity.params.journal_crypt, tgt->u.integrity.journal_crypt);
+
+ if (tgt->u.integrity.vk)
+ cd->u.integrity.params.integrity_key_size = tgt->u.integrity.vk->keylength;
+ if (tgt->u.integrity.journal_integrity_key)
+ cd->u.integrity.params.journal_integrity_key_size = tgt->u.integrity.journal_integrity_key->keylength;
+ if (tgt->u.integrity.journal_crypt_key)
+ cd->u.integrity.params.integrity_key_size = tgt->u.integrity.journal_crypt_key->keylength;
+ MOVE_REF(cd->metadata_device, tgt->u.integrity.meta_device);
+ }
+out:
+ dm_targets_free(cd, &dmd);
+ return r;
+}
+
+int crypt_init_by_name_and_header(struct crypt_device **cd,
+ const char *name,
+ const char *header_device)
+{
+ crypt_status_info ci;
+ struct crypt_dm_active_device dmd;
+ struct dm_target *tgt = &dmd.segment;
+ int r;
+
+ if (!cd || !name)
+ return -EINVAL;
+
+ log_dbg(NULL, "Allocating crypt device context by device %s.", name);
+
+ ci = crypt_status(NULL, name);
+ if (ci == CRYPT_INVALID)
+ return -ENODEV;
+
+ if (ci < CRYPT_ACTIVE) {
+ log_err(NULL, _("Device %s is not active."), name);
+ return -ENODEV;
+ }
+
+ r = dm_query_device(NULL, name, DM_ACTIVE_DEVICE | DM_ACTIVE_UUID, &dmd);
+ if (r < 0)
+ return r;
+
+ *cd = NULL;
+
+ if (header_device) {
+ r = crypt_init(cd, header_device);
+ } else {
+ r = crypt_init(cd, device_path(tgt->data_device));
+
+ /* Underlying device disappeared but mapping still active */
+ if (!tgt->data_device || r == -ENOTBLK)
+ log_verbose(NULL, _("Underlying device for crypt device %s disappeared."),
+ name);
+
+ /* Underlying device is not readable but crypt mapping exists */
+ if (r == -ENOTBLK)
+ r = crypt_init(cd, NULL);
+ }
+
+ if (r < 0)
+ goto out;
+
+ if (dmd.uuid) {
+ if (!strncmp(CRYPT_PLAIN, dmd.uuid, sizeof(CRYPT_PLAIN)-1))
+ (*cd)->type = strdup(CRYPT_PLAIN);
+ else if (!strncmp(CRYPT_LOOPAES, dmd.uuid, sizeof(CRYPT_LOOPAES)-1))
+ (*cd)->type = strdup(CRYPT_LOOPAES);
+ else if (!strncmp(CRYPT_LUKS1, dmd.uuid, sizeof(CRYPT_LUKS1)-1))
+ (*cd)->type = strdup(CRYPT_LUKS1);
+ else if (!strncmp(CRYPT_LUKS2, dmd.uuid, sizeof(CRYPT_LUKS2)-1))
+ (*cd)->type = strdup(CRYPT_LUKS2);
+ else if (!strncmp(CRYPT_VERITY, dmd.uuid, sizeof(CRYPT_VERITY)-1))
+ (*cd)->type = strdup(CRYPT_VERITY);
+ else if (!strncmp(CRYPT_TCRYPT, dmd.uuid, sizeof(CRYPT_TCRYPT)-1))
+ (*cd)->type = strdup(CRYPT_TCRYPT);
+ else if (!strncmp(CRYPT_INTEGRITY, dmd.uuid, sizeof(CRYPT_INTEGRITY)-1))
+ (*cd)->type = strdup(CRYPT_INTEGRITY);
+ else if (!strncmp(CRYPT_BITLK, dmd.uuid, sizeof(CRYPT_BITLK)-1))
+ (*cd)->type = strdup(CRYPT_BITLK);
+ else if (!strncmp(CRYPT_FVAULT2, dmd.uuid, sizeof(CRYPT_FVAULT2)-1))
+ (*cd)->type = strdup(CRYPT_FVAULT2);
+ else
+ log_dbg(NULL, "Unknown UUID set, some parameters are not set.");
+ } else
+ log_dbg(NULL, "Active device has no UUID set, some parameters are not set.");
+
+ if (header_device) {
+ r = crypt_set_data_device(*cd, device_path(tgt->data_device));
+ if (r < 0)
+ goto out;
+ }
+
+ /* Try to initialize basic parameters from active device */
+
+ if (tgt->type == DM_CRYPT || tgt->type == DM_LINEAR)
+ r = _init_by_name_crypt(*cd, name);
+ else if (tgt->type == DM_VERITY)
+ r = _init_by_name_verity(*cd, name);
+ else if (tgt->type == DM_INTEGRITY)
+ r = _init_by_name_integrity(*cd, name);
+out:
+ if (r < 0) {
+ crypt_free(*cd);
+ *cd = NULL;
+ } else if (!(*cd)->type) {
+ /* For anonymous device (no header found) remember initialized name */
+ (*cd)->u.none.active_name = strdup(name);
+ }
+
+ free(CONST_CAST(void*)dmd.uuid);
+ dm_targets_free(NULL, &dmd);
+ return r;
+}
+
+int crypt_init_by_name(struct crypt_device **cd, const char *name)
+{
+ return crypt_init_by_name_and_header(cd, name, NULL);
+}
+
+/*
+ * crypt_format() helpers
+ */
+static int _crypt_format_plain(struct crypt_device *cd,
+ const char *cipher,
+ const char *cipher_mode,
+ const char *uuid,
+ size_t volume_key_size,
+ struct crypt_params_plain *params)
+{
+ unsigned int sector_size = params ? params->sector_size : SECTOR_SIZE;
+ uint64_t dev_size;
+
+ if (!cipher || !cipher_mode) {
+ log_err(cd, _("Invalid plain crypt parameters."));
+ return -EINVAL;
+ }
+
+ if (volume_key_size > 1024) {
+ log_err(cd, _("Invalid key size."));
+ return -EINVAL;
+ }
+
+ if (uuid) {
+ log_err(cd, _("UUID is not supported for this crypt type."));
+ return -EINVAL;
+ }
+
+ if (cd->metadata_device) {
+ log_err(cd, _("Detached metadata device is not supported for this crypt type."));
+ return -EINVAL;
+ }
+
+ /* For compatibility with old params structure */
+ if (!sector_size)
+ sector_size = SECTOR_SIZE;
+
+ if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE ||
+ NOTPOW2(sector_size)) {
+ log_err(cd, _("Unsupported encryption sector size."));
+ return -EINVAL;
+ }
+
+ if (sector_size > SECTOR_SIZE && !device_size(cd->device, &dev_size)) {
+ if (params && params->offset)
+ dev_size -= (params->offset * SECTOR_SIZE);
+ if (dev_size % sector_size) {
+ log_err(cd, _("Device size is not aligned to requested sector size."));
+ return -EINVAL;
+ }
+ device_set_block_size(crypt_data_device(cd), sector_size);
+ }
+
+ if (!(cd->type = strdup(CRYPT_PLAIN)))
+ return -ENOMEM;
+
+ cd->u.plain.key_size = volume_key_size;
+ cd->volume_key = crypt_alloc_volume_key(volume_key_size, NULL);
+ if (!cd->volume_key)
+ return -ENOMEM;
+
+ if (asprintf(&cd->u.plain.cipher_spec, "%s-%s", cipher, cipher_mode) < 0) {
+ cd->u.plain.cipher_spec = NULL;
+ return -ENOMEM;
+ }
+ cd->u.plain.cipher = strdup(cipher);
+ cd->u.plain.cipher_mode = cd->u.plain.cipher_spec + strlen(cipher) + 1;
+
+ if (params && params->hash)
+ cd->u.plain.hdr.hash = strdup(params->hash);
+
+ cd->u.plain.hdr.offset = params ? params->offset : 0;
+ cd->u.plain.hdr.skip = params ? params->skip : 0;
+ cd->u.plain.hdr.size = params ? params->size : 0;
+ cd->u.plain.hdr.sector_size = sector_size;
+
+ if (!cd->u.plain.cipher)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int _crypt_format_luks1(struct crypt_device *cd,
+ const char *cipher,
+ const char *cipher_mode,
+ const char *uuid,
+ const char *volume_key,
+ size_t volume_key_size,
+ struct crypt_params_luks1 *params)
+{
+ int r;
+ unsigned long required_alignment = DEFAULT_DISK_ALIGNMENT;
+ unsigned long alignment_offset = 0;
+ uint64_t dev_size;
+
+ if (!cipher || !cipher_mode)
+ return -EINVAL;
+
+ if (!crypt_metadata_device(cd)) {
+ log_err(cd, _("Can't format LUKS without device."));
+ return -EINVAL;
+ }
+
+ if (params && cd->data_offset && params->data_alignment &&
+ (cd->data_offset % params->data_alignment)) {
+ log_err(cd, _("Requested data alignment is not compatible with data offset."));
+ return -EINVAL;
+ }
+
+ if (!(cd->type = strdup(CRYPT_LUKS1)))
+ return -ENOMEM;
+
+ if (volume_key)
+ cd->volume_key = crypt_alloc_volume_key(volume_key_size,
+ volume_key);
+ else
+ cd->volume_key = crypt_generate_volume_key(cd, volume_key_size);
+
+ if (!cd->volume_key)
+ return -ENOMEM;
+
+ if (verify_pbkdf_params(cd, &cd->pbkdf)) {
+ r = init_pbkdf_type(cd, NULL, CRYPT_LUKS1);
+ if (r)
+ return r;
+ }
+
+ if (params && params->hash && strcmp(params->hash, cd->pbkdf.hash)) {
+ free(CONST_CAST(void*)cd->pbkdf.hash);
+ cd->pbkdf.hash = strdup(params->hash);
+ if (!cd->pbkdf.hash)
+ return -ENOMEM;
+ }
+
+ if (params && params->data_device) {
+ if (!cd->metadata_device)
+ cd->metadata_device = cd->device;
+ else
+ device_free(cd, cd->device);
+ cd->device = NULL;
+ if (device_alloc(cd, &cd->device, params->data_device) < 0)
+ return -ENOMEM;
+ }
+
+ if (params && cd->metadata_device) {
+ /* For detached header the alignment is used directly as data offset */
+ if (!cd->data_offset)
+ cd->data_offset = params->data_alignment;
+ required_alignment = params->data_alignment * SECTOR_SIZE;
+ } else if (params && params->data_alignment) {
+ required_alignment = params->data_alignment * SECTOR_SIZE;
+ } else
+ device_topology_alignment(cd, cd->device,
+ &required_alignment,
+ &alignment_offset, DEFAULT_DISK_ALIGNMENT);
+
+ r = LUKS_check_cipher(cd, volume_key_size, cipher, cipher_mode);
+ if (r < 0)
+ return r;
+
+ r = LUKS_generate_phdr(&cd->u.luks1.hdr, cd->volume_key, cipher, cipher_mode,
+ cd->pbkdf.hash, uuid,
+ cd->data_offset * SECTOR_SIZE,
+ alignment_offset, required_alignment, cd);
+ if (r < 0)
+ return r;
+
+ r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL);
+ if (r < 0)
+ return r;
+
+
+ if (asprintf(&cd->u.luks1.cipher_spec, "%s-%s", cipher, cipher_mode) < 0) {
+ cd->u.luks1.cipher_spec = NULL;
+ return -ENOMEM;
+ }
+
+ r = LUKS_wipe_header_areas(&cd->u.luks1.hdr, cd);
+ if (r < 0) {
+ free(cd->u.luks1.cipher_spec);
+ log_err(cd, _("Cannot wipe header on device %s."),
+ mdata_device_path(cd));
+ return r;
+ }
+
+ r = LUKS_write_phdr(&cd->u.luks1.hdr, cd);
+ if (r) {
+ free(cd->u.luks1.cipher_spec);
+ return r;
+ }
+
+ if (!device_size(crypt_data_device(cd), &dev_size) &&
+ dev_size <= (crypt_get_data_offset(cd) * SECTOR_SIZE))
+ log_std(cd, _("Device %s is too small for activation, there is no remaining space for data.\n"),
+ device_path(crypt_data_device(cd)));
+
+ return 0;
+}
+
+static int _crypt_format_luks2(struct crypt_device *cd,
+ const char *cipher,
+ const char *cipher_mode,
+ const char *uuid,
+ const char *volume_key,
+ size_t volume_key_size,
+ struct crypt_params_luks2 *params,
+ bool sector_size_autodetect)
+{
+ int r, integrity_key_size = 0;
+ unsigned long required_alignment = DEFAULT_DISK_ALIGNMENT;
+ unsigned long alignment_offset = 0;
+ unsigned int sector_size;
+ const char *integrity = params ? params->integrity : NULL;
+ uint64_t dev_size;
+ uint32_t dmc_flags;
+
+ cd->u.luks2.hdr.jobj = NULL;
+ cd->u.luks2.keyslot_cipher = NULL;
+
+ if (!cipher || !cipher_mode)
+ return -EINVAL;
+
+ if (!crypt_metadata_device(cd)) {
+ log_err(cd, _("Can't format LUKS without device."));
+ return -EINVAL;
+ }
+
+ if (params && cd->data_offset && params->data_alignment &&
+ (cd->data_offset % params->data_alignment)) {
+ log_err(cd, _("Requested data alignment is not compatible with data offset."));
+ return -EINVAL;
+ }
+
+ if (params && params->sector_size)
+ sector_size_autodetect = false;
+
+ if (params && params->data_device) {
+ if (!cd->metadata_device)
+ cd->metadata_device = cd->device;
+ else
+ device_free(cd, cd->device);
+ cd->device = NULL;
+ if (device_alloc(cd, &cd->device, params->data_device) < 0)
+ return -ENOMEM;
+ }
+
+ if (sector_size_autodetect) {
+ sector_size = device_optimal_encryption_sector_size(cd, crypt_data_device(cd));
+ log_dbg(cd, "Auto-detected optimal encryption sector size for device %s is %d bytes.",
+ device_path(crypt_data_device(cd)), sector_size);
+ } else
+ sector_size = params ? params->sector_size : SECTOR_SIZE;
+
+ if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE ||
+ NOTPOW2(sector_size)) {
+ log_err(cd, _("Unsupported encryption sector size."));
+ return -EINVAL;
+ }
+ if (sector_size != SECTOR_SIZE && !dm_flags(cd, DM_CRYPT, &dmc_flags) &&
+ !(dmc_flags & DM_SECTOR_SIZE_SUPPORTED)) {
+ if (sector_size_autodetect) {
+ log_dbg(cd, "dm-crypt does not support encryption sector size option. Reverting to 512 bytes.");
+ sector_size = SECTOR_SIZE;
+ } else
+ log_std(cd, _("WARNING: The device activation will fail, dm-crypt is missing "
+ "support for requested encryption sector size.\n"));
+ }
+
+ if (integrity) {
+ if (params->integrity_params) {
+ /* Standalone dm-integrity must not be used */
+ if (params->integrity_params->integrity ||
+ params->integrity_params->integrity_key_size)
+ return -EINVAL;
+ /* FIXME: journal encryption and MAC is here not yet supported */
+ if (params->integrity_params->journal_crypt ||
+ params->integrity_params->journal_integrity)
+ return -ENOTSUP;
+ }
+ if (!INTEGRITY_tag_size(integrity, cipher, cipher_mode)) {
+ if (!strcmp(integrity, "none"))
+ integrity = NULL;
+ else
+ return -EINVAL;
+ }
+ integrity_key_size = INTEGRITY_key_size(integrity);
+ if ((integrity_key_size < 0) || (integrity_key_size >= (int)volume_key_size)) {
+ log_err(cd, _("Volume key is too small for encryption with integrity extensions."));
+ return -EINVAL;
+ }
+ }
+
+ r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL);
+ if (r < 0)
+ return r;
+
+ if (!(cd->type = strdup(CRYPT_LUKS2)))
+ return -ENOMEM;
+
+ if (volume_key)
+ cd->volume_key = crypt_alloc_volume_key(volume_key_size,
+ volume_key);
+ else
+ cd->volume_key = crypt_generate_volume_key(cd, volume_key_size);
+
+ if (!cd->volume_key)
+ return -ENOMEM;
+
+ if (params && params->pbkdf)
+ r = crypt_set_pbkdf_type(cd, params->pbkdf);
+ else if (verify_pbkdf_params(cd, &cd->pbkdf))
+ r = init_pbkdf_type(cd, NULL, CRYPT_LUKS2);
+
+ if (r < 0)
+ return r;
+
+ if (params && cd->metadata_device) {
+ /* For detached header the alignment is used directly as data offset */
+ if (!cd->data_offset)
+ cd->data_offset = params->data_alignment;
+ required_alignment = params->data_alignment * SECTOR_SIZE;
+ } else if (params && params->data_alignment) {
+ required_alignment = params->data_alignment * SECTOR_SIZE;
+ } else
+ device_topology_alignment(cd, cd->device,
+ &required_alignment,
+ &alignment_offset, DEFAULT_DISK_ALIGNMENT);
+
+ r = device_size(crypt_data_device(cd), &dev_size);
+ if (r < 0)
+ goto out;
+
+ if (sector_size_autodetect) {
+ if (cd->data_offset && MISALIGNED(cd->data_offset, sector_size)) {
+ log_dbg(cd, "Data offset not aligned to sector size. Reverting to 512 bytes.");
+ sector_size = SECTOR_SIZE;
+ } else if (MISALIGNED(dev_size - (uint64_t)required_alignment - (uint64_t)alignment_offset, sector_size)) {
+ /* underflow does not affect misalignment checks */
+ log_dbg(cd, "Device size is not aligned to sector size. Reverting to 512 bytes.");
+ sector_size = SECTOR_SIZE;
+ }
+ }
+
+ /* FIXME: allow this later also for normal ciphers (check AF_ALG availability. */
+ if (integrity && !integrity_key_size) {
+ r = crypt_cipher_check_kernel(cipher, cipher_mode, integrity, volume_key_size);
+ if (r < 0) {
+ log_err(cd, _("Cipher %s-%s (key size %zd bits) is not available."),
+ cipher, cipher_mode, volume_key_size * 8);
+ goto out;
+ }
+ }
+
+ if ((!integrity || integrity_key_size) && !crypt_cipher_wrapped_key(cipher, cipher_mode) &&
+ !INTEGRITY_tag_size(NULL, cipher, cipher_mode)) {
+ r = LUKS_check_cipher(cd, volume_key_size - integrity_key_size,
+ cipher, cipher_mode);
+ if (r < 0)
+ goto out;
+ }
+
+ r = LUKS2_generate_hdr(cd, &cd->u.luks2.hdr, cd->volume_key,
+ cipher, cipher_mode,
+ integrity, uuid,
+ sector_size,
+ cd->data_offset * SECTOR_SIZE,
+ alignment_offset,
+ required_alignment,
+ cd->metadata_size, cd->keyslots_size);
+ if (r < 0)
+ goto out;
+
+ if (cd->metadata_size && (cd->metadata_size != LUKS2_metadata_size(&cd->u.luks2.hdr)))
+ log_std(cd, _("WARNING: LUKS2 metadata size changed to %" PRIu64 " bytes.\n"),
+ LUKS2_metadata_size(&cd->u.luks2.hdr));
+
+ if (cd->keyslots_size && (cd->keyslots_size != LUKS2_keyslots_size(&cd->u.luks2.hdr)))
+ log_std(cd, _("WARNING: LUKS2 keyslots area size changed to %" PRIu64 " bytes.\n"),
+ LUKS2_keyslots_size(&cd->u.luks2.hdr));
+
+ if (!integrity && sector_size > SECTOR_SIZE) {
+ dev_size -= (crypt_get_data_offset(cd) * SECTOR_SIZE);
+ if (dev_size % sector_size) {
+ log_err(cd, _("Device size is not aligned to requested sector size."));
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (params && (params->label || params->subsystem)) {
+ r = LUKS2_hdr_labels(cd, &cd->u.luks2.hdr,
+ params->label, params->subsystem, 0);
+ if (r < 0)
+ goto out;
+ }
+
+ device_set_block_size(crypt_data_device(cd), sector_size);
+
+ r = LUKS2_wipe_header_areas(cd, &cd->u.luks2.hdr, cd->metadata_device != NULL);
+ if (r < 0) {
+ log_err(cd, _("Cannot wipe header on device %s."),
+ mdata_device_path(cd));
+ if (dev_size < LUKS2_hdr_and_areas_size(&cd->u.luks2.hdr))
+ log_err(cd, _("Device %s is too small."), device_path(crypt_metadata_device(cd)));
+ goto out;
+ }
+
+ /* Wipe integrity superblock and create integrity superblock */
+ if (crypt_get_integrity_tag_size(cd)) {
+ r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_ZERO,
+ crypt_get_data_offset(cd) * SECTOR_SIZE,
+ 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL);
+ if (r < 0) {
+ if (r == -EBUSY)
+ log_err(cd, _("Cannot format device %s in use."),
+ data_device_path(cd));
+ else if (r == -EACCES) {
+ log_err(cd, _("Cannot format device %s, permission denied."),
+ data_device_path(cd));
+ r = -EINVAL;
+ } else
+ log_err(cd, _("Cannot wipe header on device %s."),
+ data_device_path(cd));
+
+ goto out;
+ }
+
+ r = INTEGRITY_format(cd, params ? params->integrity_params : NULL, NULL, NULL);
+ if (r)
+ log_err(cd, _("Cannot format integrity for device %s."),
+ data_device_path(cd));
+ }
+
+ if (r < 0)
+ goto out;
+
+ /* override sequence id check with format */
+ r = LUKS2_hdr_write_force(cd, &cd->u.luks2.hdr);
+ if (r < 0) {
+ if (r == -EBUSY)
+ log_err(cd, _("Cannot format device %s in use."),
+ mdata_device_path(cd));
+ else if (r == -EACCES) {
+ log_err(cd, _("Cannot format device %s, permission denied."),
+ mdata_device_path(cd));
+ r = -EINVAL;
+ } else
+ log_err(cd, _("Cannot format device %s."),
+ mdata_device_path(cd));
+ }
+
+out:
+ if (r) {
+ LUKS2_hdr_free(cd, &cd->u.luks2.hdr);
+ return r;
+ }
+
+ /* Device size can be larger now if it is a file container */
+ if (!device_size(crypt_data_device(cd), &dev_size) &&
+ dev_size <= (crypt_get_data_offset(cd) * SECTOR_SIZE))
+ log_std(cd, _("Device %s is too small for activation, there is no remaining space for data.\n"),
+ device_path(crypt_data_device(cd)));
+
+ return 0;
+}
+
+static int _crypt_format_loopaes(struct crypt_device *cd,
+ const char *cipher,
+ const char *uuid,
+ size_t volume_key_size,
+ struct crypt_params_loopaes *params)
+{
+ if (!crypt_metadata_device(cd)) {
+ log_err(cd, _("Can't format LOOPAES without device."));
+ return -EINVAL;
+ }
+
+ if (volume_key_size > 1024) {
+ log_err(cd, _("Invalid key size."));
+ return -EINVAL;
+ }
+
+ if (uuid) {
+ log_err(cd, _("UUID is not supported for this crypt type."));
+ return -EINVAL;
+ }
+
+ if (cd->metadata_device) {
+ log_err(cd, _("Detached metadata device is not supported for this crypt type."));
+ return -EINVAL;
+ }
+
+ if (!(cd->type = strdup(CRYPT_LOOPAES)))
+ return -ENOMEM;
+
+ cd->u.loopaes.key_size = volume_key_size;
+
+ cd->u.loopaes.cipher = strdup(cipher ?: DEFAULT_LOOPAES_CIPHER);
+
+ if (params && params->hash)
+ cd->u.loopaes.hdr.hash = strdup(params->hash);
+
+ cd->u.loopaes.hdr.offset = params ? params->offset : 0;
+ cd->u.loopaes.hdr.skip = params ? params->skip : 0;
+
+ return 0;
+}
+
+static int _crypt_format_verity(struct crypt_device *cd,
+ const char *uuid,
+ struct crypt_params_verity *params)
+{
+ int r = 0, hash_size;
+ uint64_t data_device_size, hash_blocks_size;
+ struct device *fec_device = NULL;
+ char *fec_device_path = NULL, *hash_name = NULL, *root_hash = NULL, *salt = NULL;
+
+ if (!crypt_metadata_device(cd)) {
+ log_err(cd, _("Can't format VERITY without device."));
+ return -EINVAL;
+ }
+
+ if (!params)
+ return -EINVAL;
+
+ if (!params->data_device && !cd->metadata_device)
+ return -EINVAL;
+
+ if (params->hash_type > VERITY_MAX_HASH_TYPE) {
+ log_err(cd, _("Unsupported VERITY hash type %d."), params->hash_type);
+ return -EINVAL;
+ }
+
+ if (VERITY_BLOCK_SIZE_OK(params->data_block_size) ||
+ VERITY_BLOCK_SIZE_OK(params->hash_block_size)) {
+ log_err(cd, _("Unsupported VERITY block size."));
+ return -EINVAL;
+ }
+
+ if (MISALIGNED_512(params->hash_area_offset)) {
+ log_err(cd, _("Unsupported VERITY hash offset."));
+ return -EINVAL;
+ }
+
+ if (MISALIGNED_512(params->fec_area_offset)) {
+ log_err(cd, _("Unsupported VERITY FEC offset."));
+ return -EINVAL;
+ }
+
+ if (!(cd->type = strdup(CRYPT_VERITY)))
+ return -ENOMEM;
+
+ if (params->data_device) {
+ r = crypt_set_data_device(cd, params->data_device);
+ if (r)
+ return r;
+ }
+
+ if (!params->data_size) {
+ r = device_size(cd->device, &data_device_size);
+ if (r < 0)
+ return r;
+
+ cd->u.verity.hdr.data_size = data_device_size / params->data_block_size;
+ } else
+ cd->u.verity.hdr.data_size = params->data_size;
+
+ if (device_is_identical(crypt_metadata_device(cd), crypt_data_device(cd)) > 0 &&
+ (cd->u.verity.hdr.data_size * params->data_block_size) > params->hash_area_offset) {
+ log_err(cd, _("Data area overlaps with hash area."));
+ return -EINVAL;
+ }
+
+ hash_size = crypt_hash_size(params->hash_name);
+ if (hash_size <= 0) {
+ log_err(cd, _("Hash algorithm %s not supported."),
+ params->hash_name);
+ return -EINVAL;
+ }
+ cd->u.verity.root_hash_size = hash_size;
+
+ if (params->fec_device) {
+ fec_device_path = strdup(params->fec_device);
+ if (!fec_device_path)
+ return -ENOMEM;
+ r = device_alloc(cd, &fec_device, params->fec_device);
+ if (r < 0) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ hash_blocks_size = VERITY_hash_blocks(cd, params) * params->hash_block_size;
+ if (device_is_identical(crypt_metadata_device(cd), fec_device) > 0 &&
+ (params->hash_area_offset + hash_blocks_size) > params->fec_area_offset) {
+ log_err(cd, _("Hash area overlaps with FEC area."));
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (device_is_identical(crypt_data_device(cd), fec_device) > 0 &&
+ (cd->u.verity.hdr.data_size * params->data_block_size) > params->fec_area_offset) {
+ log_err(cd, _("Data area overlaps with FEC area."));
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ root_hash = malloc(cd->u.verity.root_hash_size);
+ hash_name = strdup(params->hash_name);
+ salt = malloc(params->salt_size);
+
+ if (!root_hash || !hash_name || !salt) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ cd->u.verity.hdr.flags = params->flags;
+ cd->u.verity.root_hash = root_hash;
+ cd->u.verity.hdr.hash_name = hash_name;
+ cd->u.verity.hdr.data_device = NULL;
+ cd->u.verity.fec_device = fec_device;
+ cd->u.verity.hdr.fec_device = fec_device_path;
+ cd->u.verity.hdr.fec_roots = params->fec_roots;
+ cd->u.verity.hdr.data_block_size = params->data_block_size;
+ cd->u.verity.hdr.hash_block_size = params->hash_block_size;
+ cd->u.verity.hdr.hash_area_offset = params->hash_area_offset;
+ cd->u.verity.hdr.fec_area_offset = params->fec_area_offset;
+ cd->u.verity.hdr.hash_type = params->hash_type;
+ cd->u.verity.hdr.flags = params->flags;
+ cd->u.verity.hdr.salt_size = params->salt_size;
+ cd->u.verity.hdr.salt = salt;
+
+ if (params->salt)
+ memcpy(salt, params->salt, params->salt_size);
+ else
+ r = crypt_random_get(cd, salt, params->salt_size, CRYPT_RND_SALT);
+ if (r)
+ goto out;
+
+ if (params->flags & CRYPT_VERITY_CREATE_HASH) {
+ r = VERITY_create(cd, &cd->u.verity.hdr,
+ cd->u.verity.root_hash, cd->u.verity.root_hash_size);
+ if (!r && params->fec_device)
+ r = VERITY_FEC_process(cd, &cd->u.verity.hdr, cd->u.verity.fec_device, 0, NULL);
+ if (r)
+ goto out;
+ }
+
+ if (!(params->flags & CRYPT_VERITY_NO_HEADER)) {
+ if (uuid) {
+ if (!(cd->u.verity.uuid = strdup(uuid)))
+ r = -ENOMEM;
+ } else
+ r = VERITY_UUID_generate(&cd->u.verity.uuid);
+
+ if (!r)
+ r = VERITY_write_sb(cd, cd->u.verity.hdr.hash_area_offset,
+ cd->u.verity.uuid,
+ &cd->u.verity.hdr);
+ }
+
+out:
+ if (r) {
+ device_free(cd, fec_device);
+ free(root_hash);
+ free(hash_name);
+ free(fec_device_path);
+ free(salt);
+ }
+
+ return r;
+}
+
+static int _crypt_format_integrity(struct crypt_device *cd,
+ const char *uuid,
+ struct crypt_params_integrity *params)
+{
+ int r;
+ uint32_t integrity_tag_size;
+ char *integrity = NULL, *journal_integrity = NULL, *journal_crypt = NULL;
+ struct volume_key *journal_crypt_key = NULL, *journal_mac_key = NULL;
+
+ if (!params)
+ return -EINVAL;
+
+ if (uuid) {
+ log_err(cd, _("UUID is not supported for this crypt type."));
+ return -EINVAL;
+ }
+
+ r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL);
+ if (r < 0)
+ return r;
+
+ /* Wipe first 8 sectors - fs magic numbers etc. */
+ r = crypt_wipe_device(cd, crypt_metadata_device(cd), CRYPT_WIPE_ZERO, 0,
+ 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL);
+ if (r < 0) {
+ log_err(cd, _("Cannot wipe header on device %s."),
+ mdata_device_path(cd));
+ return r;
+ }
+
+ if (!(cd->type = strdup(CRYPT_INTEGRITY)))
+ return -ENOMEM;
+
+ if (params->journal_crypt_key) {
+ journal_crypt_key = crypt_alloc_volume_key(params->journal_crypt_key_size,
+ params->journal_crypt_key);
+ if (!journal_crypt_key)
+ return -ENOMEM;
+ }
+
+ if (params->journal_integrity_key) {
+ journal_mac_key = crypt_alloc_volume_key(params->journal_integrity_key_size,
+ params->journal_integrity_key);
+ if (!journal_mac_key) {
+ r = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (params->integrity && !(integrity = strdup(params->integrity))) {
+ r = -ENOMEM;
+ goto out;
+ }
+ if (params->journal_integrity && !(journal_integrity = strdup(params->journal_integrity))) {
+ r = -ENOMEM;
+ goto out;
+ }
+ if (params->journal_crypt && !(journal_crypt = strdup(params->journal_crypt))) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ integrity_tag_size = INTEGRITY_hash_tag_size(integrity);
+ if (integrity_tag_size > 0 && params->tag_size && integrity_tag_size != params->tag_size)
+ log_std(cd, _("WARNING: Requested tag size %d bytes differs from %s size output (%d bytes).\n"),
+ params->tag_size, integrity, integrity_tag_size);
+
+ if (params->tag_size)
+ integrity_tag_size = params->tag_size;
+
+ cd->u.integrity.journal_crypt_key = journal_crypt_key;
+ cd->u.integrity.journal_mac_key = journal_mac_key;
+ cd->u.integrity.params.journal_size = params->journal_size;
+ cd->u.integrity.params.journal_watermark = params->journal_watermark;
+ cd->u.integrity.params.journal_commit_time = params->journal_commit_time;
+ cd->u.integrity.params.interleave_sectors = params->interleave_sectors;
+ cd->u.integrity.params.buffer_sectors = params->buffer_sectors;
+ cd->u.integrity.params.sector_size = params->sector_size;
+ cd->u.integrity.params.tag_size = integrity_tag_size;
+ cd->u.integrity.params.integrity = integrity;
+ cd->u.integrity.params.journal_integrity = journal_integrity;
+ cd->u.integrity.params.journal_crypt = journal_crypt;
+
+ r = INTEGRITY_format(cd, params, cd->u.integrity.journal_crypt_key, cd->u.integrity.journal_mac_key);
+ if (r)
+ log_err(cd, _("Cannot format integrity for device %s."),
+ mdata_device_path(cd));
+out:
+ if (r) {
+ crypt_free_volume_key(journal_crypt_key);
+ crypt_free_volume_key(journal_mac_key);
+ free(integrity);
+ free(journal_integrity);
+ free(journal_crypt);
+ }
+
+ return r;
+}
+
+static int _crypt_format(struct crypt_device *cd,
+ const char *type,
+ const char *cipher,
+ const char *cipher_mode,
+ const char *uuid,
+ const char *volume_key,
+ size_t volume_key_size,
+ void *params,
+ bool sector_size_autodetect)
+{
+ int r;
+
+ if (!cd || !type)
+ return -EINVAL;
+
+ if (cd->type) {
+ log_dbg(cd, "Context already formatted as %s.", cd->type);
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Formatting device %s as type %s.", mdata_device_path(cd) ?: "(none)", type);
+
+ crypt_reset_null_type(cd);
+
+ r = init_crypto(cd);
+ if (r < 0)
+ return r;
+
+ if (isPLAIN(type))
+ r = _crypt_format_plain(cd, cipher, cipher_mode,
+ uuid, volume_key_size, params);
+ else if (isLUKS1(type))
+ r = _crypt_format_luks1(cd, cipher, cipher_mode,
+ uuid, volume_key, volume_key_size, params);
+ else if (isLUKS2(type))
+ r = _crypt_format_luks2(cd, cipher, cipher_mode,
+ uuid, volume_key, volume_key_size, params, sector_size_autodetect);
+ else if (isLOOPAES(type))
+ r = _crypt_format_loopaes(cd, cipher, uuid, volume_key_size, params);
+ else if (isVERITY(type))
+ r = _crypt_format_verity(cd, uuid, params);
+ else if (isINTEGRITY(type))
+ r = _crypt_format_integrity(cd, uuid, params);
+ else {
+ log_err(cd, _("Unknown crypt device type %s requested."), type);
+ r = -EINVAL;
+ }
+
+ if (r < 0) {
+ crypt_set_null_type(cd);
+ crypt_free_volume_key(cd->volume_key);
+ cd->volume_key = NULL;
+ }
+
+ return r;
+}
+
+CRYPT_SYMBOL_EXPORT_NEW(int, crypt_format, 2, 4,
+ /* crypt_format parameters follows */
+ struct crypt_device *cd,
+ const char *type,
+ const char *cipher,
+ const char *cipher_mode,
+ const char *uuid,
+ const char *volume_key,
+ size_t volume_key_size,
+ void *params)
+{
+ return _crypt_format(cd, type, cipher, cipher_mode, uuid, volume_key, volume_key_size, params, true);
+}
+
+
+CRYPT_SYMBOL_EXPORT_OLD(int, crypt_format, 2, 0,
+ /* crypt_format parameters follows */
+ struct crypt_device *cd,
+ const char *type,
+ const char *cipher,
+ const char *cipher_mode,
+ const char *uuid,
+ const char *volume_key,
+ size_t volume_key_size,
+ void *params)
+{
+ return _crypt_format(cd, type, cipher, cipher_mode, uuid, volume_key, volume_key_size, params, false);
+}
+
+int crypt_repair(struct crypt_device *cd,
+ const char *requested_type,
+ void *params __attribute__((unused)))
+{
+ int r;
+
+ if (!cd)
+ return -EINVAL;
+
+ log_dbg(cd, "Trying to repair %s crypt type from device %s.",
+ requested_type ?: "any", mdata_device_path(cd) ?: "(none)");
+
+ if (!crypt_metadata_device(cd))
+ return -EINVAL;
+
+ if (requested_type && !isLUKS(requested_type))
+ return -EINVAL;
+
+ /* Load with repair */
+ r = _crypt_load_luks(cd, requested_type, false, true);
+ if (r < 0)
+ return r;
+
+ /* cd->type and header must be set in context */
+ r = crypt_check_data_device_size(cd);
+ if (r < 0)
+ crypt_set_null_type(cd);
+
+ return r;
+}
+
+/* compare volume keys */
+static int _compare_volume_keys(struct volume_key *svk, unsigned skeyring_only,
+ struct volume_key *tvk, unsigned tkeyring_only)
+{
+ if (!svk && !tvk)
+ return 0;
+ else if (!svk || !tvk)
+ return 1;
+
+ if (svk->keylength != tvk->keylength)
+ return 1;
+
+ if (!skeyring_only && !tkeyring_only)
+ return crypt_backend_memeq(svk->key, tvk->key, svk->keylength);
+
+ if (svk->key_description && tvk->key_description)
+ return strcmp(svk->key_description, tvk->key_description);
+
+ return 0;
+}
+
+static int _compare_device_types(struct crypt_device *cd,
+ const struct crypt_dm_active_device *src,
+ const struct crypt_dm_active_device *tgt)
+{
+ if (!tgt->uuid) {
+ log_dbg(cd, "Missing device uuid in target device.");
+ return -EINVAL;
+ }
+
+ if (isLUKS2(cd->type) && !strncmp("INTEGRITY-", tgt->uuid, strlen("INTEGRITY-"))) {
+ if (crypt_uuid_cmp(tgt->uuid, src->uuid)) {
+ log_dbg(cd, "LUKS UUID mismatch.");
+ return -EINVAL;
+ }
+ } else if (isLUKS(cd->type)) {
+ if (!src->uuid || strncmp(cd->type, tgt->uuid, strlen(cd->type)) ||
+ crypt_uuid_cmp(tgt->uuid, src->uuid)) {
+ log_dbg(cd, "LUKS UUID mismatch.");
+ return -EINVAL;
+ }
+ } else if (isPLAIN(cd->type) || isLOOPAES(cd->type)) {
+ if (strncmp(cd->type, tgt->uuid, strlen(cd->type))) {
+ log_dbg(cd, "Unexpected uuid prefix %s in target device.", tgt->uuid);
+ return -EINVAL;
+ }
+ } else if (!isINTEGRITY(cd->type)) {
+ log_dbg(cd, "Unsupported device type %s for reload.", cd->type ?: "<empty>");
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+static int _compare_crypt_devices(struct crypt_device *cd,
+ const struct dm_target *src,
+ const struct dm_target *tgt)
+{
+ char *src_cipher = NULL, *src_integrity = NULL;
+ int r = -EINVAL;
+
+ /* for crypt devices keys are mandatory */
+ if (!src->u.crypt.vk || !tgt->u.crypt.vk)
+ return -EINVAL;
+
+ /* CIPHER checks */
+ if (!src->u.crypt.cipher || !tgt->u.crypt.cipher)
+ return -EINVAL;
+
+ /*
+ * dm_query_target converts capi cipher specification to dm-crypt format.
+ * We need to do same for cipher specification requested in source
+ * device.
+ */
+ if (crypt_capi_to_cipher(&src_cipher, &src_integrity, src->u.crypt.cipher, src->u.crypt.integrity))
+ return -EINVAL;
+
+ if (strcmp(src_cipher, tgt->u.crypt.cipher)) {
+ log_dbg(cd, "Cipher specs do not match.");
+ goto out;
+ }
+
+ if (tgt->u.crypt.vk->keylength == 0 && crypt_is_cipher_null(tgt->u.crypt.cipher))
+ log_dbg(cd, "Existing device uses cipher null. Skipping key comparison.");
+ else if (_compare_volume_keys(src->u.crypt.vk, 0, tgt->u.crypt.vk, tgt->u.crypt.vk->key_description != NULL)) {
+ log_dbg(cd, "Keys in context and target device do not match.");
+ goto out;
+ }
+
+ if (crypt_strcmp(src_integrity, tgt->u.crypt.integrity)) {
+ log_dbg(cd, "Integrity parameters do not match.");
+ goto out;
+ }
+
+ if (src->u.crypt.offset != tgt->u.crypt.offset ||
+ src->u.crypt.sector_size != tgt->u.crypt.sector_size ||
+ src->u.crypt.iv_offset != tgt->u.crypt.iv_offset ||
+ src->u.crypt.tag_size != tgt->u.crypt.tag_size) {
+ log_dbg(cd, "Integer parameters do not match.");
+ goto out;
+ }
+
+ if (device_is_identical(src->data_device, tgt->data_device) <= 0)
+ log_dbg(cd, "Data devices do not match.");
+ else
+ r = 0;
+
+out:
+ free(src_cipher);
+ free(src_integrity);
+
+ return r;
+}
+
+static int _compare_integrity_devices(struct crypt_device *cd,
+ const struct dm_target *src,
+ const struct dm_target *tgt)
+{
+ /*
+ * some parameters may be implicit (and set in dm-integrity ctor)
+ *
+ * journal_size
+ * journal_watermark
+ * journal_commit_time
+ * buffer_sectors
+ * interleave_sectors
+ */
+
+ /* check remaining integer values that makes sense */
+ if (src->u.integrity.tag_size != tgt->u.integrity.tag_size ||
+ src->u.integrity.offset != tgt->u.integrity.offset ||
+ src->u.integrity.sector_size != tgt->u.integrity.sector_size) {
+ log_dbg(cd, "Integer parameters do not match.");
+ return -EINVAL;
+ }
+
+ if (crypt_strcmp(src->u.integrity.integrity, tgt->u.integrity.integrity) ||
+ crypt_strcmp(src->u.integrity.journal_integrity, tgt->u.integrity.journal_integrity) ||
+ crypt_strcmp(src->u.integrity.journal_crypt, tgt->u.integrity.journal_crypt)) {
+ log_dbg(cd, "Journal parameters do not match.");
+ return -EINVAL;
+ }
+
+ /* unfortunately dm-integrity doesn't support keyring */
+ if (_compare_volume_keys(src->u.integrity.vk, 0, tgt->u.integrity.vk, 0) ||
+ _compare_volume_keys(src->u.integrity.journal_integrity_key, 0, tgt->u.integrity.journal_integrity_key, 0) ||
+ _compare_volume_keys(src->u.integrity.journal_crypt_key, 0, tgt->u.integrity.journal_crypt_key, 0)) {
+ log_dbg(cd, "Journal keys do not match.");
+ return -EINVAL;
+ }
+
+ if (device_is_identical(src->data_device, tgt->data_device) <= 0) {
+ log_dbg(cd, "Data devices do not match.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int crypt_compare_dm_devices(struct crypt_device *cd,
+ const struct crypt_dm_active_device *src,
+ const struct crypt_dm_active_device *tgt)
+{
+ int r;
+ const struct dm_target *s, *t;
+
+ if (!src || !tgt)
+ return -EINVAL;
+
+ r = _compare_device_types(cd, src, tgt);
+ if (r)
+ return r;
+
+ s = &src->segment;
+ t = &tgt->segment;
+
+ while (s || t) {
+ if (!s || !t) {
+ log_dbg(cd, "segments count mismatch.");
+ return -EINVAL;
+ }
+ if (s->type != t->type) {
+ log_dbg(cd, "segment type mismatch.");
+ r = -EINVAL;
+ break;
+ }
+
+ switch (s->type) {
+ case DM_CRYPT:
+ r = _compare_crypt_devices(cd, s, t);
+ break;
+ case DM_INTEGRITY:
+ r = _compare_integrity_devices(cd, s, t);
+ break;
+ case DM_LINEAR:
+ r = (s->u.linear.offset == t->u.linear.offset) ? 0 : -EINVAL;
+ break;
+ default:
+ r = -ENOTSUP;
+ }
+
+ if (r)
+ break;
+
+ s = s->next;
+ t = t->next;
+ }
+
+ return r;
+}
+
+static int _reload_device(struct crypt_device *cd, const char *name,
+ struct crypt_dm_active_device *sdmd)
+{
+ int r;
+ struct crypt_dm_active_device tdmd;
+ struct dm_target *src, *tgt = &tdmd.segment;
+
+ if (!cd || !cd->type || !name || !(sdmd->flags & CRYPT_ACTIVATE_REFRESH))
+ return -EINVAL;
+
+ r = dm_query_device(cd, name, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
+ DM_ACTIVE_UUID | DM_ACTIVE_CRYPT_KEYSIZE |
+ DM_ACTIVE_CRYPT_KEY | DM_ACTIVE_INTEGRITY_PARAMS |
+ DM_ACTIVE_JOURNAL_CRYPT_KEY | DM_ACTIVE_JOURNAL_MAC_KEY, &tdmd);
+ if (r < 0) {
+ log_err(cd, _("Device %s is not active."), name);
+ return -EINVAL;
+ }
+
+ if (!single_segment(&tdmd) ||
+ (tgt->type != DM_CRYPT && tgt->type != DM_INTEGRITY) ||
+ (tgt->type == DM_CRYPT && tgt->u.crypt.tag_size)) {
+ r = -ENOTSUP;
+ log_err(cd, _("Unsupported parameters on device %s."), name);
+ goto out;
+ }
+
+ r = crypt_compare_dm_devices(cd, sdmd, &tdmd);
+ if (r) {
+ log_err(cd, _("Mismatching parameters on device %s."), name);
+ goto out;
+ }
+
+ src = &sdmd->segment;
+
+ /* Changing read only flag for active device makes no sense */
+ if (tdmd.flags & CRYPT_ACTIVATE_READONLY)
+ sdmd->flags |= CRYPT_ACTIVATE_READONLY;
+ else
+ sdmd->flags &= ~CRYPT_ACTIVATE_READONLY;
+
+ if (tgt->type == DM_CRYPT && sdmd->flags & CRYPT_ACTIVATE_KEYRING_KEY) {
+ r = crypt_volume_key_set_description(tgt->u.crypt.vk, src->u.crypt.vk->key_description);
+ if (r)
+ goto out;
+ } else if (tgt->type == DM_CRYPT) {
+ crypt_free_volume_key(tgt->u.crypt.vk);
+ tgt->u.crypt.vk = crypt_alloc_volume_key(src->u.crypt.vk->keylength, src->u.crypt.vk->key);
+ if (!tgt->u.crypt.vk) {
+ r = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (tgt->type == DM_CRYPT)
+ r = device_block_adjust(cd, src->data_device, DEV_OK,
+ src->u.crypt.offset, &sdmd->size, NULL);
+ else if (tgt->type == DM_INTEGRITY)
+ r = device_block_adjust(cd, src->data_device, DEV_OK,
+ src->u.integrity.offset, &sdmd->size, NULL);
+ else
+ r = -EINVAL;
+
+ if (r)
+ goto out;
+
+ tdmd.flags = sdmd->flags;
+ tgt->size = tdmd.size = sdmd->size;
+
+ r = dm_reload_device(cd, name, &tdmd, 0, 1);
+out:
+ dm_targets_free(cd, &tdmd);
+ free(CONST_CAST(void*)tdmd.uuid);
+
+ return r;
+}
+
+static int _reload_device_with_integrity(struct crypt_device *cd,
+ const char *name,
+ const char *iname,
+ const char *ipath,
+ struct crypt_dm_active_device *sdmd,
+ struct crypt_dm_active_device *sdmdi)
+{
+ int r;
+ struct crypt_dm_active_device tdmd, tdmdi = {};
+ struct dm_target *src, *srci, *tgt = &tdmd.segment, *tgti = &tdmdi.segment;
+ struct device *data_device = NULL;
+ bool clear = false;
+
+ if (!cd || !cd->type || !name || !iname || !(sdmd->flags & CRYPT_ACTIVATE_REFRESH))
+ return -EINVAL;
+
+ r = dm_query_device(cd, name, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
+ DM_ACTIVE_UUID | DM_ACTIVE_CRYPT_KEYSIZE |
+ DM_ACTIVE_CRYPT_KEY, &tdmd);
+ if (r < 0) {
+ log_err(cd, _("Device %s is not active."), name);
+ return -EINVAL;
+ }
+
+ if (!single_segment(&tdmd) || tgt->type != DM_CRYPT || !tgt->u.crypt.tag_size) {
+ log_err(cd, _("Unsupported parameters on device %s."), name);
+ r = -ENOTSUP;
+ goto out;
+ }
+
+ r = dm_query_device(cd, iname, DM_ACTIVE_DEVICE | DM_ACTIVE_UUID, &tdmdi);
+ if (r < 0) {
+ log_err(cd, _("Device %s is not active."), iname);
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!single_segment(&tdmdi) || tgti->type != DM_INTEGRITY) {
+ log_err(cd, _("Unsupported parameters on device %s."), iname);
+ r = -ENOTSUP;
+ goto out;
+ }
+
+ r = crypt_compare_dm_devices(cd, sdmdi, &tdmdi);
+ if (r) {
+ log_err(cd, _("Mismatching parameters on device %s."), iname);
+ goto out;
+ }
+
+ /* unsupported underneath dm-crypt with auth. encryption */
+ if (sdmdi->segment.u.integrity.meta_device || tdmdi.segment.u.integrity.meta_device)
+ return -ENOTSUP;
+
+ src = &sdmd->segment;
+ srci = &sdmdi->segment;
+
+ r = device_alloc(cd, &data_device, ipath);
+ if (r < 0)
+ goto out;
+
+ r = device_block_adjust(cd, srci->data_device, DEV_OK,
+ srci->u.integrity.offset, &sdmdi->size, NULL);
+ if (r)
+ goto out;
+
+ src->data_device = data_device;
+
+ r = crypt_compare_dm_devices(cd, sdmd, &tdmd);
+ if (r) {
+ log_err(cd, _("Crypt devices mismatch."));
+ goto out;
+ }
+
+ /* Changing read only flag for active device makes no sense */
+ if (tdmd.flags & CRYPT_ACTIVATE_READONLY)
+ sdmd->flags |= CRYPT_ACTIVATE_READONLY;
+ else
+ sdmd->flags &= ~CRYPT_ACTIVATE_READONLY;
+
+ if (tdmdi.flags & CRYPT_ACTIVATE_READONLY)
+ sdmdi->flags |= CRYPT_ACTIVATE_READONLY;
+ else
+ sdmdi->flags &= ~CRYPT_ACTIVATE_READONLY;
+
+ if (sdmd->flags & CRYPT_ACTIVATE_KEYRING_KEY) {
+ r = crypt_volume_key_set_description(tgt->u.crypt.vk, src->u.crypt.vk->key_description);
+ if (r)
+ goto out;
+ } else {
+ crypt_free_volume_key(tgt->u.crypt.vk);
+ tgt->u.crypt.vk = crypt_alloc_volume_key(src->u.crypt.vk->keylength, src->u.crypt.vk->key);
+ if (!tgt->u.crypt.vk) {
+ r = -ENOMEM;
+ goto out;
+ }
+ }
+
+ r = device_block_adjust(cd, src->data_device, DEV_OK,
+ src->u.crypt.offset, &sdmd->size, NULL);
+ if (r)
+ goto out;
+
+ tdmd.flags = sdmd->flags;
+ tdmd.size = sdmd->size;
+
+ if ((r = dm_reload_device(cd, iname, sdmdi, 0, 0))) {
+ log_err(cd, _("Failed to reload device %s."), iname);
+ goto out;
+ }
+
+ if ((r = dm_reload_device(cd, name, &tdmd, 0, 0))) {
+ log_err(cd, _("Failed to reload device %s."), name);
+ clear = true;
+ goto out;
+ }
+
+ if ((r = dm_suspend_device(cd, name, 0))) {
+ log_err(cd, _("Failed to suspend device %s."), name);
+ clear = true;
+ goto out;
+ }
+
+ if ((r = dm_suspend_device(cd, iname, 0))) {
+ log_err(cd, _("Failed to suspend device %s."), iname);
+ clear = true;
+ goto out;
+ }
+
+ if ((r = dm_resume_device(cd, iname, act2dmflags(sdmdi->flags)))) {
+ log_err(cd, _("Failed to resume device %s."), iname);
+ clear = true;
+ goto out;
+ }
+
+ r = dm_resume_device(cd, name, act2dmflags(tdmd.flags));
+ if (!r)
+ goto out;
+
+ /*
+ * This is worst case scenario. We have active underlying dm-integrity device with
+ * new table but dm-crypt resume failed for some reason. Tear everything down and
+ * burn it for good.
+ */
+
+ log_err(cd, _("Fatal error while reloading device %s (on top of device %s)."), name, iname);
+
+ if (dm_error_device(cd, name))
+ log_err(cd, _("Failed to switch device %s to dm-error."), name);
+ if (dm_error_device(cd, iname))
+ log_err(cd, _("Failed to switch device %s to dm-error."), iname);
+out:
+ if (clear) {
+ dm_clear_device(cd, name);
+ dm_clear_device(cd, iname);
+
+ if (dm_status_suspended(cd, name) > 0)
+ dm_resume_device(cd, name, 0);
+ if (dm_status_suspended(cd, iname) > 0)
+ dm_resume_device(cd, iname, 0);
+ }
+
+ dm_targets_free(cd, &tdmd);
+ dm_targets_free(cd, &tdmdi);
+ free(CONST_CAST(void*)tdmdi.uuid);
+ free(CONST_CAST(void*)tdmd.uuid);
+ device_free(cd, data_device);
+
+ return r;
+}
+
+int crypt_resize(struct crypt_device *cd, const char *name, uint64_t new_size)
+{
+ struct crypt_dm_active_device dmdq, dmd = {};
+ struct dm_target *tgt = &dmdq.segment;
+ struct crypt_params_integrity params = {};
+ uint32_t supported_flags = 0;
+ uint64_t old_size;
+ int r;
+
+ /*
+ * FIXME: Also with LUKS2 we must not allow resize when there's
+ * explicit size stored in metadata (length != "dynamic")
+ */
+
+ /* Device context type must be initialized */
+ if (!cd || !cd->type || !name)
+ return -EINVAL;
+
+ if (isTCRYPT(cd->type) || isBITLK(cd->type)) {
+ log_err(cd, _("This operation is not supported for this device type."));
+ return -ENOTSUP;
+ }
+
+ log_dbg(cd, "Resizing device %s to %" PRIu64 " sectors.", name, new_size);
+
+ r = dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
+ DM_ACTIVE_INTEGRITY_PARAMS | DM_ACTIVE_JOURNAL_CRYPT_KEY |
+ DM_ACTIVE_JOURNAL_MAC_KEY, &dmdq);
+ if (r < 0) {
+ log_err(cd, _("Device %s is not active."), name);
+ return -EINVAL;
+ }
+ if (!single_segment(&dmdq) || (tgt->type != DM_CRYPT && tgt->type != DM_INTEGRITY)) {
+ log_dbg(cd, "Unsupported device table detected in %s.", name);
+ r = -EINVAL;
+ goto out;
+ }
+
+ if ((dmdq.flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_key_in_keyring(cd)) {
+ r = -EPERM;
+ goto out;
+ }
+
+ if (crypt_key_in_keyring(cd)) {
+ if (!isLUKS2(cd->type)) {
+ r = -EINVAL;
+ goto out;
+ }
+ r = LUKS2_key_description_by_segment(cd, &cd->u.luks2.hdr,
+ tgt->u.crypt.vk, CRYPT_DEFAULT_SEGMENT);
+ if (r)
+ goto out;
+
+ dmdq.flags |= CRYPT_ACTIVATE_KEYRING_KEY;
+ }
+
+ if (crypt_loop_device(crypt_get_device_name(cd))) {
+ log_dbg(cd, "Trying to resize underlying loop device %s.",
+ crypt_get_device_name(cd));
+ /* Here we always use default size not new_size */
+ if (crypt_loop_resize(crypt_get_device_name(cd)))
+ log_err(cd, _("Cannot resize loop device."));
+ }
+
+
+ /*
+ * Integrity device metadata are maintained by the kernel. We need to
+ * reload the device (with the same parameters) and let the kernel
+ * calculate the maximum size of integrity device and store it in the
+ * superblock.
+ */
+ if (!new_size && tgt->type == DM_INTEGRITY) {
+ r = INTEGRITY_data_sectors(cd, crypt_metadata_device(cd),
+ crypt_get_data_offset(cd) * SECTOR_SIZE, &old_size);
+ if (r < 0)
+ return r;
+
+ dmd.size = dmdq.size;
+ dmd.flags = dmdq.flags | CRYPT_ACTIVATE_REFRESH | CRYPT_ACTIVATE_PRIVATE;
+
+ r = crypt_get_integrity_info(cd, &params);
+ if (r)
+ goto out;
+
+ r = dm_integrity_target_set(cd, &dmd.segment, 0, dmdq.segment.size,
+ crypt_metadata_device(cd), crypt_data_device(cd),
+ crypt_get_integrity_tag_size(cd), crypt_get_data_offset(cd),
+ crypt_get_sector_size(cd), tgt->u.integrity.vk, tgt->u.integrity.journal_crypt_key,
+ tgt->u.integrity.journal_integrity_key, &params);
+ if (r)
+ goto out;
+ r = _reload_device(cd, name, &dmd);
+ if (r)
+ goto out;
+
+ r = INTEGRITY_data_sectors(cd, crypt_metadata_device(cd),
+ crypt_get_data_offset(cd) * SECTOR_SIZE, &new_size);
+ if (r < 0)
+ return r;
+ log_dbg(cd, "Maximum integrity device size from kernel %" PRIu64, new_size);
+
+ if (old_size == new_size && new_size == dmdq.size &&
+ !dm_flags(cd, tgt->type, &supported_flags) &&
+ !(supported_flags & DM_INTEGRITY_RESIZE_SUPPORTED))
+ log_std(cd, _("WARNING: Maximum size already set or kernel doesn't support resize.\n"));
+ }
+
+ r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
+ crypt_get_data_offset(cd), &new_size, &dmdq.flags);
+ if (r)
+ goto out;
+
+ if (MISALIGNED(new_size, (tgt->type == DM_CRYPT ? tgt->u.crypt.sector_size : tgt->u.integrity.sector_size) >> SECTOR_SHIFT)) {
+ log_err(cd, _("Device size is not aligned to requested sector size."));
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (MISALIGNED(new_size, device_block_size(cd, crypt_data_device(cd)) >> SECTOR_SHIFT)) {
+ log_err(cd, _("Device size is not aligned to device logical block size."));
+ r = -EINVAL;
+ goto out;
+ }
+
+ dmd.uuid = crypt_get_uuid(cd);
+ dmd.size = new_size;
+ dmd.flags = dmdq.flags | CRYPT_ACTIVATE_REFRESH;
+
+ if (tgt->type == DM_CRYPT) {
+ r = dm_crypt_target_set(&dmd.segment, 0, new_size, crypt_data_device(cd),
+ tgt->u.crypt.vk, crypt_get_cipher_spec(cd),
+ crypt_get_iv_offset(cd), crypt_get_data_offset(cd),
+ crypt_get_integrity(cd), crypt_get_integrity_tag_size(cd),
+ crypt_get_sector_size(cd));
+ if (r < 0)
+ goto out;
+ } else if (tgt->type == DM_INTEGRITY) {
+ r = crypt_get_integrity_info(cd, &params);
+ if (r)
+ goto out;
+
+ r = dm_integrity_target_set(cd, &dmd.segment, 0, new_size,
+ crypt_metadata_device(cd), crypt_data_device(cd),
+ crypt_get_integrity_tag_size(cd), crypt_get_data_offset(cd),
+ crypt_get_sector_size(cd), tgt->u.integrity.vk, tgt->u.integrity.journal_crypt_key,
+ tgt->u.integrity.journal_integrity_key, &params);
+ if (r)
+ goto out;
+ }
+
+ if (new_size == dmdq.size) {
+ log_dbg(cd, "Device has already requested size %" PRIu64
+ " sectors.", dmdq.size);
+ r = 0;
+ } else {
+ if (isTCRYPT(cd->type))
+ r = -ENOTSUP;
+ else if (isLUKS2(cd->type))
+ r = LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, 0, 0);
+ if (!r)
+ r = _reload_device(cd, name, &dmd);
+
+ if (r && tgt->type == DM_INTEGRITY &&
+ !dm_flags(cd, tgt->type, &supported_flags) &&
+ !(supported_flags & DM_INTEGRITY_RESIZE_SUPPORTED))
+ log_err(cd, _("Resize failed, the kernel doesn't support it."));
+ }
+out:
+ dm_targets_free(cd, &dmd);
+ dm_targets_free(cd, &dmdq);
+
+ return r;
+}
+
+int crypt_set_uuid(struct crypt_device *cd, const char *uuid)
+{
+ const char *active_uuid;
+ int r;
+
+ log_dbg(cd, "%s device uuid.", uuid ? "Setting new" : "Refreshing");
+
+ if ((r = onlyLUKS(cd)))
+ return r;
+
+ active_uuid = crypt_get_uuid(cd);
+
+ if (uuid && active_uuid && !strncmp(uuid, active_uuid, UUID_STRING_L)) {
+ log_dbg(cd, "UUID is the same as requested (%s) for device %s.",
+ uuid, mdata_device_path(cd));
+ return 0;
+ }
+
+ if (uuid)
+ log_dbg(cd, "Requested new UUID change to %s for %s.", uuid, mdata_device_path(cd));
+ else
+ log_dbg(cd, "Requested new UUID refresh for %s.", mdata_device_path(cd));
+
+ if (!crypt_confirm(cd, _("Do you really want to change UUID of device?")))
+ return -EPERM;
+
+ if (isLUKS1(cd->type))
+ return LUKS_hdr_uuid_set(&cd->u.luks1.hdr, uuid, cd);
+ else
+ return LUKS2_hdr_uuid(cd, &cd->u.luks2.hdr, uuid);
+}
+
+int crypt_set_label(struct crypt_device *cd, const char *label, const char *subsystem)
+{
+ int r;
+
+ log_dbg(cd, "Setting new labels.");
+
+ if ((r = onlyLUKS2(cd)))
+ return r;
+
+ return LUKS2_hdr_labels(cd, &cd->u.luks2.hdr, label, subsystem, 1);
+}
+
+const char *crypt_get_label(struct crypt_device *cd)
+{
+ if (_onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0))
+ return NULL;
+
+ return cd->u.luks2.hdr.label;
+}
+
+const char *crypt_get_subsystem(struct crypt_device *cd)
+{
+ if (_onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0))
+ return NULL;
+
+ return cd->u.luks2.hdr.subsystem;
+}
+
+int crypt_header_backup(struct crypt_device *cd,
+ const char *requested_type,
+ const char *backup_file)
+{
+ int r;
+
+ if (requested_type && !isLUKS(requested_type))
+ return -EINVAL;
+
+ if (!backup_file)
+ return -EINVAL;
+
+ /* Load with repair */
+ r = _crypt_load_luks(cd, requested_type, false, false);
+ if (r < 0)
+ return r;
+
+ log_dbg(cd, "Requested header backup of device %s (%s) to "
+ "file %s.", mdata_device_path(cd), requested_type ?: "any type", backup_file);
+
+ if (isLUKS1(cd->type) && (!requested_type || isLUKS1(requested_type)))
+ r = LUKS_hdr_backup(backup_file, cd);
+ else if (isLUKS2(cd->type) && (!requested_type || isLUKS2(requested_type)))
+ r = LUKS2_hdr_backup(cd, &cd->u.luks2.hdr, backup_file);
+ else
+ r = -EINVAL;
+
+ return r;
+}
+
+int crypt_header_restore(struct crypt_device *cd,
+ const char *requested_type,
+ const char *backup_file)
+{
+ struct luks_phdr hdr1;
+ struct luks2_hdr hdr2;
+ int r, version;
+
+ if (requested_type && !isLUKS(requested_type))
+ return -EINVAL;
+
+ if (!cd || (cd->type && !isLUKS(cd->type)) || !backup_file)
+ return -EINVAL;
+
+ r = init_crypto(cd);
+ if (r < 0)
+ return r;
+
+ log_dbg(cd, "Requested header restore to device %s (%s) from "
+ "file %s.", mdata_device_path(cd), requested_type ?: "any type", backup_file);
+
+ version = LUKS2_hdr_version_unlocked(cd, backup_file);
+ if (!version ||
+ (requested_type && version == 1 && !isLUKS1(requested_type)) ||
+ (requested_type && version == 2 && !isLUKS2(requested_type))) {
+ log_err(cd, _("Header backup file does not contain compatible LUKS header."));
+ return -EINVAL;
+ }
+
+ memset(&hdr2, 0, sizeof(hdr2));
+
+ if (!cd->type) {
+ if (version == 1)
+ r = LUKS_hdr_restore(backup_file, &hdr1, cd);
+ else
+ r = LUKS2_hdr_restore(cd, &hdr2, backup_file);
+
+ crypt_safe_memzero(&hdr1, sizeof(hdr1));
+ crypt_safe_memzero(&hdr2, sizeof(hdr2));
+ } else if (isLUKS2(cd->type) && (!requested_type || isLUKS2(requested_type))) {
+ r = LUKS2_hdr_restore(cd, &cd->u.luks2.hdr, backup_file);
+ if (r)
+ (void) _crypt_load_luks2(cd, 1, 0);
+ } else if (isLUKS1(cd->type) && (!requested_type || isLUKS1(requested_type)))
+ r = LUKS_hdr_restore(backup_file, &cd->u.luks1.hdr, cd);
+ else
+ r = -EINVAL;
+
+ if (!r)
+ r = _crypt_load_luks(cd, version == 1 ? CRYPT_LUKS1 : CRYPT_LUKS2, false, true);
+
+ return r;
+}
+
+int crypt_header_is_detached(struct crypt_device *cd)
+{
+ int r;
+
+ if (!cd || (cd->type && !isLUKS(cd->type)))
+ return -EINVAL;
+
+ r = device_is_identical(crypt_data_device(cd), crypt_metadata_device(cd));
+ if (r < 0) {
+ log_dbg(cd, "Failed to compare data and metadata devices path.");
+ return r;
+ }
+
+ return r ? 0 : 1;
+}
+
+void crypt_free(struct crypt_device *cd)
+{
+ if (!cd)
+ return;
+
+ log_dbg(cd, "Releasing crypt device %s context.", mdata_device_path(cd) ?: "empty");
+
+ dm_backend_exit(cd);
+ crypt_free_volume_key(cd->volume_key);
+
+ crypt_free_type(cd, NULL);
+
+ device_free(cd, cd->device);
+ device_free(cd, cd->metadata_device);
+
+ free(CONST_CAST(void*)cd->pbkdf.type);
+ free(CONST_CAST(void*)cd->pbkdf.hash);
+
+ /* Some structures can contain keys (TCRYPT), wipe it */
+ crypt_safe_memzero(cd, sizeof(*cd));
+ free(cd);
+}
+
+static char *crypt_get_device_key_description(struct crypt_device *cd, const char *name)
+{
+ char *desc = NULL;
+ struct crypt_dm_active_device dmd;
+ struct dm_target *tgt = &dmd.segment;
+
+ if (dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEY | DM_ACTIVE_CRYPT_KEYSIZE, &dmd) < 0)
+ return NULL;
+
+ if (single_segment(&dmd) && tgt->type == DM_CRYPT &&
+ (dmd.flags & CRYPT_ACTIVATE_KEYRING_KEY) && tgt->u.crypt.vk->key_description)
+ desc = strdup(tgt->u.crypt.vk->key_description);
+
+ dm_targets_free(cd, &dmd);
+
+ return desc;
+}
+
+int crypt_suspend(struct crypt_device *cd,
+ const char *name)
+{
+ char *key_desc;
+ crypt_status_info ci;
+ int r;
+ uint32_t dmflags = DM_SUSPEND_WIPE_KEY;
+
+ /* FIXME: check context uuid matches the dm-crypt device uuid (onlyLUKS branching) */
+
+ if (!cd || !name)
+ return -EINVAL;
+
+ log_dbg(cd, "Suspending volume %s.", name);
+
+ if (cd->type)
+ r = onlyLUKS(cd);
+ else {
+ r = crypt_uuid_type_cmp(cd, CRYPT_LUKS1);
+ if (r < 0)
+ r = crypt_uuid_type_cmp(cd, CRYPT_LUKS2);
+ if (r < 0)
+ log_err(cd, _("This operation is supported only for LUKS device."));
+ }
+
+ if (r < 0)
+ return r;
+
+ ci = crypt_status(NULL, name);
+ if (ci < CRYPT_ACTIVE) {
+ log_err(cd, _("Volume %s is not active."), name);
+ return -EINVAL;
+ }
+
+ dm_backend_init(cd);
+
+ r = dm_status_suspended(cd, name);
+ if (r < 0)
+ goto out;
+
+ if (r) {
+ log_err(cd, _("Volume %s is already suspended."), name);
+ r = -EINVAL;
+ goto out;
+ }
+
+ key_desc = crypt_get_device_key_description(cd, name);
+
+ /* we can't simply wipe wrapped keys */
+ if (crypt_cipher_wrapped_key(crypt_get_cipher(cd), crypt_get_cipher_mode(cd)))
+ dmflags &= ~DM_SUSPEND_WIPE_KEY;
+
+ r = dm_suspend_device(cd, name, dmflags);
+ if (r == -ENOTSUP)
+ log_err(cd, _("Suspend is not supported for device %s."), name);
+ else if (r)
+ log_err(cd, _("Error during suspending device %s."), name);
+ else
+ crypt_drop_keyring_key_by_description(cd, key_desc, LOGON_KEY);
+ free(key_desc);
+out:
+ dm_backend_exit(cd);
+ return r;
+}
+
+/* key must be properly verified */
+static int resume_by_volume_key(struct crypt_device *cd,
+ struct volume_key *vk,
+ const char *name)
+{
+ int digest, r;
+ struct volume_key *zerokey = NULL;
+
+ if (crypt_is_cipher_null(crypt_get_cipher_spec(cd))) {
+ zerokey = crypt_alloc_volume_key(0, NULL);
+ if (!zerokey)
+ return -ENOMEM;
+ vk = zerokey;
+ } else if (crypt_use_keyring_for_vk(cd)) {
+ /* LUKS2 path only */
+ digest = LUKS2_digest_by_segment(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
+ if (digest < 0)
+ return -EINVAL;
+ r = LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, digest);
+ if (r < 0)
+ return r;
+ }
+
+ r = dm_resume_and_reinstate_key(cd, name, vk);
+
+ if (r == -ENOTSUP)
+ log_err(cd, _("Resume is not supported for device %s."), name);
+ else if (r)
+ log_err(cd, _("Error during resuming device %s."), name);
+
+ if (r < 0)
+ crypt_drop_keyring_key(cd, vk);
+
+ crypt_free_volume_key(zerokey);
+
+ return r;
+}
+
+int crypt_resume_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *passphrase,
+ size_t passphrase_size)
+{
+ struct volume_key *vk = NULL;
+ int r;
+
+ /* FIXME: check context uuid matches the dm-crypt device uuid */
+
+ if (!passphrase || !name)
+ return -EINVAL;
+
+ log_dbg(cd, "Resuming volume %s.", name);
+
+ if ((r = onlyLUKS(cd)))
+ return r;
+
+ r = dm_status_suspended(cd, name);
+ if (r < 0)
+ return r;
+
+ if (!r) {
+ log_err(cd, _("Volume %s is not suspended."), name);
+ return -EINVAL;
+ }
+
+ if (isLUKS1(cd->type))
+ r = LUKS_open_key_with_hdr(keyslot, passphrase, passphrase_size,
+ &cd->u.luks1.hdr, &vk, cd);
+ else
+ r = LUKS2_keyslot_open(cd, keyslot, CRYPT_DEFAULT_SEGMENT, passphrase, passphrase_size, &vk);
+
+ if (r < 0)
+ return r;
+
+ keyslot = r;
+
+ r = resume_by_volume_key(cd, vk, name);
+
+ crypt_free_volume_key(vk);
+ return r < 0 ? r : keyslot;
+}
+
+int crypt_resume_by_keyfile_device_offset(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ uint64_t keyfile_offset)
+{
+ struct volume_key *vk = NULL;
+ char *passphrase_read = NULL;
+ size_t passphrase_size_read;
+ int r;
+
+ /* FIXME: check context uuid matches the dm-crypt device uuid */
+
+ if (!name || !keyfile)
+ return -EINVAL;
+
+ log_dbg(cd, "Resuming volume %s.", name);
+
+ if ((r = onlyLUKS(cd)))
+ return r;
+
+ r = dm_status_suspended(cd, name);
+ if (r < 0)
+ return r;
+
+ if (!r) {
+ log_err(cd, _("Volume %s is not suspended."), name);
+ return -EINVAL;
+ }
+
+ r = crypt_keyfile_device_read(cd, keyfile,
+ &passphrase_read, &passphrase_size_read,
+ keyfile_offset, keyfile_size, 0);
+ if (r < 0)
+ return r;
+
+ if (isLUKS1(cd->type))
+ r = LUKS_open_key_with_hdr(keyslot, passphrase_read, passphrase_size_read,
+ &cd->u.luks1.hdr, &vk, cd);
+ else
+ r = LUKS2_keyslot_open(cd, keyslot, CRYPT_DEFAULT_SEGMENT,
+ passphrase_read, passphrase_size_read, &vk);
+
+ crypt_safe_free(passphrase_read);
+ if (r < 0)
+ return r;
+
+ keyslot = r;
+
+ r = resume_by_volume_key(cd, vk, name);
+
+ crypt_free_volume_key(vk);
+ return r < 0 ? r : keyslot;
+}
+
+int crypt_resume_by_keyfile(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size)
+{
+ return crypt_resume_by_keyfile_device_offset(cd, name, keyslot,
+ keyfile, keyfile_size, 0);
+}
+
+int crypt_resume_by_keyfile_offset(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ size_t keyfile_offset)
+{
+ return crypt_resume_by_keyfile_device_offset(cd, name, keyslot,
+ keyfile, keyfile_size, keyfile_offset);
+}
+
+int crypt_resume_by_volume_key(struct crypt_device *cd,
+ const char *name,
+ const char *volume_key,
+ size_t volume_key_size)
+{
+ struct volume_key *vk = NULL;
+ int r;
+
+ if (!name || !volume_key)
+ return -EINVAL;
+
+ log_dbg(cd, "Resuming volume %s by volume key.", name);
+
+ if ((r = onlyLUKS(cd)))
+ return r;
+
+ r = dm_status_suspended(cd, name);
+ if (r < 0)
+ return r;
+
+ if (!r) {
+ log_err(cd, _("Volume %s is not suspended."), name);
+ return -EINVAL;
+ }
+
+ vk = crypt_alloc_volume_key(volume_key_size, volume_key);
+ if (!vk)
+ return -ENOMEM;
+
+ if (isLUKS1(cd->type))
+ r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
+ else if (isLUKS2(cd->type))
+ r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
+ else
+ r = -EINVAL;
+ if (r == -EPERM || r == -ENOENT)
+ log_err(cd, _("Volume key does not match the volume."));
+
+ if (r >= 0)
+ r = resume_by_volume_key(cd, vk, name);
+
+ crypt_free_volume_key(vk);
+ return r;
+}
+
+int crypt_resume_by_token_pin(struct crypt_device *cd, const char *name,
+ const char *type, int token, const char *pin, size_t pin_size,
+ void *usrptr)
+{
+ struct volume_key *vk = NULL;
+ int r, keyslot;
+
+ if (!name)
+ return -EINVAL;
+
+ log_dbg(cd, "Resuming volume %s by token (%s type) %d.",
+ name, type ?: "any", token);
+
+ if ((r = _onlyLUKS2(cd, CRYPT_CD_QUIET, 0)))
+ return r;
+
+ r = dm_status_suspended(cd, name);
+ if (r < 0)
+ return r;
+
+ if (!r) {
+ log_err(cd, _("Volume %s is not suspended."), name);
+ return -EINVAL;
+ }
+
+ r = LUKS2_token_unlock_key(cd, &cd->u.luks2.hdr, token, type,
+ pin, pin_size, CRYPT_DEFAULT_SEGMENT, usrptr, &vk);
+ keyslot = r;
+ if (r >= 0)
+ r = resume_by_volume_key(cd, vk, name);
+
+ crypt_free_volume_key(vk);
+ return r < 0 ? r : keyslot;
+}
+
+/*
+ * Keyslot manipulation
+ */
+int crypt_keyslot_add_by_passphrase(struct crypt_device *cd,
+ int keyslot, // -1 any
+ const char *passphrase,
+ size_t passphrase_size,
+ const char *new_passphrase,
+ size_t new_passphrase_size)
+{
+ int r;
+ struct crypt_keyslot_context kc, new_kc;
+
+ if (!passphrase || !new_passphrase)
+ return -EINVAL;
+
+ crypt_keyslot_unlock_by_passphrase_init_internal(&kc, passphrase, passphrase_size);
+ crypt_keyslot_unlock_by_passphrase_init_internal(&new_kc, new_passphrase, new_passphrase_size);
+
+ r = crypt_keyslot_add_by_keyslot_context(cd, CRYPT_ANY_SLOT, &kc, keyslot, &new_kc, 0);
+
+ crypt_keyslot_context_destroy_internal(&kc);
+ crypt_keyslot_context_destroy_internal(&new_kc);
+
+ return r;
+}
+
+int crypt_keyslot_change_by_passphrase(struct crypt_device *cd,
+ int keyslot_old,
+ int keyslot_new,
+ const char *passphrase,
+ size_t passphrase_size,
+ const char *new_passphrase,
+ size_t new_passphrase_size)
+{
+ int digest = -1, r, keyslot_new_orig = keyslot_new;
+ struct luks2_keyslot_params params;
+ struct volume_key *vk = NULL;
+
+ if (!passphrase || !new_passphrase)
+ return -EINVAL;
+
+ log_dbg(cd, "Changing passphrase from old keyslot %d to new %d.",
+ keyslot_old, keyslot_new);
+
+ if ((r = onlyLUKS(cd)))
+ return r;
+
+ if (isLUKS1(cd->type))
+ r = LUKS_open_key_with_hdr(keyslot_old, passphrase, passphrase_size,
+ &cd->u.luks1.hdr, &vk, cd);
+ else if (isLUKS2(cd->type)) {
+ r = LUKS2_keyslot_open(cd, keyslot_old, CRYPT_ANY_SEGMENT, passphrase, passphrase_size, &vk);
+ /* will fail for keyslots w/o digest. fix if supported in a future */
+ if (r >= 0) {
+ digest = LUKS2_digest_by_keyslot(&cd->u.luks2.hdr, r);
+ if (digest < 0)
+ r = -EINVAL;
+ }
+ } else
+ r = -EINVAL;
+ if (r < 0)
+ goto out;
+
+ if (keyslot_old != CRYPT_ANY_SLOT && keyslot_old != r) {
+ log_dbg(cd, "Keyslot mismatch.");
+ goto out;
+ }
+ keyslot_old = r;
+
+ if (keyslot_new == CRYPT_ANY_SLOT) {
+ if (isLUKS1(cd->type))
+ keyslot_new = LUKS_keyslot_find_empty(&cd->u.luks1.hdr);
+ else if (isLUKS2(cd->type))
+ keyslot_new = LUKS2_keyslot_find_empty(cd, &cd->u.luks2.hdr, vk->keylength);
+ if (keyslot_new < 0)
+ keyslot_new = keyslot_old;
+ }
+ log_dbg(cd, "Key change, old slot %d, new slot %d.", keyslot_old, keyslot_new);
+
+ if (isLUKS1(cd->type)) {
+ if (keyslot_old == keyslot_new) {
+ log_dbg(cd, "Key slot %d is going to be overwritten.", keyslot_old);
+ (void)crypt_keyslot_destroy(cd, keyslot_old);
+ }
+ r = LUKS_set_key(keyslot_new, new_passphrase, new_passphrase_size,
+ &cd->u.luks1.hdr, vk, cd);
+ } else if (isLUKS2(cd->type)) {
+ r = LUKS2_keyslot_params_default(cd, &cd->u.luks2.hdr, &params);
+ if (r)
+ goto out;
+
+ if (keyslot_old != keyslot_new) {
+ r = LUKS2_digest_assign(cd, &cd->u.luks2.hdr, keyslot_new, digest, 1, 0);
+ if (r < 0)
+ goto out;
+ r = LUKS2_token_assignment_copy(cd, &cd->u.luks2.hdr, keyslot_old, keyslot_new, 0);
+ if (r < 0)
+ goto out;
+ } else {
+ log_dbg(cd, "Key slot %d is going to be overwritten.", keyslot_old);
+ /* FIXME: improve return code so that we can detect area is damaged */
+ r = LUKS2_keyslot_wipe(cd, &cd->u.luks2.hdr, keyslot_old, 1);
+ if (r) {
+ /* (void)crypt_keyslot_destroy(cd, keyslot_old); */
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ r = LUKS2_keyslot_store(cd, &cd->u.luks2.hdr,
+ keyslot_new, new_passphrase,
+ new_passphrase_size, vk, &params);
+ if (r < 0)
+ goto out;
+
+ /* Swap old & new so the final keyslot number remains */
+ if (keyslot_new_orig == CRYPT_ANY_SLOT && keyslot_old != keyslot_new) {
+ r = LUKS2_keyslot_swap(cd, &cd->u.luks2.hdr, keyslot_old, keyslot_new);
+ if (r < 0)
+ goto out;
+
+ /* Swap slot id */
+ r = keyslot_old;
+ keyslot_old = keyslot_new;
+ keyslot_new = r;
+ }
+ } else
+ r = -EINVAL;
+
+ if (r >= 0 && keyslot_old != keyslot_new)
+ r = crypt_keyslot_destroy(cd, keyslot_old);
+
+ if (r < 0)
+ log_err(cd, _("Failed to swap new key slot."));
+out:
+ crypt_free_volume_key(vk);
+ if (r < 0) {
+ _luks2_rollback(cd);
+ return r;
+ }
+ return keyslot_new;
+}
+
+int crypt_keyslot_add_by_keyfile_device_offset(struct crypt_device *cd,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ uint64_t keyfile_offset,
+ const char *new_keyfile,
+ size_t new_keyfile_size,
+ uint64_t new_keyfile_offset)
+{
+ int r;
+ struct crypt_keyslot_context kc, new_kc;
+
+ if (!keyfile || !new_keyfile)
+ return -EINVAL;
+
+ crypt_keyslot_unlock_by_keyfile_init_internal(&kc, keyfile, keyfile_size, keyfile_offset);
+ crypt_keyslot_unlock_by_keyfile_init_internal(&new_kc, new_keyfile, new_keyfile_size, new_keyfile_offset);
+
+ r = crypt_keyslot_add_by_keyslot_context(cd, CRYPT_ANY_SLOT, &kc, keyslot, &new_kc, 0);
+
+ crypt_keyslot_context_destroy_internal(&kc);
+ crypt_keyslot_context_destroy_internal(&new_kc);
+
+ return r;
+}
+
+int crypt_keyslot_add_by_keyfile(struct crypt_device *cd,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ const char *new_keyfile,
+ size_t new_keyfile_size)
+{
+ return crypt_keyslot_add_by_keyfile_device_offset(cd, keyslot,
+ keyfile, keyfile_size, 0,
+ new_keyfile, new_keyfile_size, 0);
+}
+
+int crypt_keyslot_add_by_keyfile_offset(struct crypt_device *cd,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ size_t keyfile_offset,
+ const char *new_keyfile,
+ size_t new_keyfile_size,
+ size_t new_keyfile_offset)
+{
+ return crypt_keyslot_add_by_keyfile_device_offset(cd, keyslot,
+ keyfile, keyfile_size, keyfile_offset,
+ new_keyfile, new_keyfile_size, new_keyfile_offset);
+}
+
+int crypt_keyslot_add_by_volume_key(struct crypt_device *cd,
+ int keyslot,
+ const char *volume_key,
+ size_t volume_key_size,
+ const char *passphrase,
+ size_t passphrase_size)
+{
+ int r;
+ struct crypt_keyslot_context kc, new_kc;
+
+ if (!passphrase)
+ return -EINVAL;
+
+ crypt_keyslot_unlock_by_key_init_internal(&kc, volume_key, volume_key_size);
+ crypt_keyslot_unlock_by_passphrase_init_internal(&new_kc, passphrase, passphrase_size);
+
+ r = crypt_keyslot_add_by_keyslot_context(cd, CRYPT_ANY_SLOT, &kc, keyslot, &new_kc, 0);
+
+ crypt_keyslot_context_destroy_internal(&kc);
+ crypt_keyslot_context_destroy_internal(&new_kc);
+
+ return r;
+}
+
+int crypt_keyslot_destroy(struct crypt_device *cd, int keyslot)
+{
+ crypt_keyslot_info ki;
+ int r;
+
+ log_dbg(cd, "Destroying keyslot %d.", keyslot);
+
+ if ((r = _onlyLUKS(cd, CRYPT_CD_UNRESTRICTED)))
+ return r;
+
+ ki = crypt_keyslot_status(cd, keyslot);
+ if (ki == CRYPT_SLOT_INVALID) {
+ log_err(cd, _("Key slot %d is invalid."), keyslot);
+ return -EINVAL;
+ }
+
+ if (isLUKS1(cd->type)) {
+ if (ki == CRYPT_SLOT_INACTIVE) {
+ log_err(cd, _("Keyslot %d is not active."), keyslot);
+ return -EINVAL;
+ }
+ return LUKS_del_key(keyslot, &cd->u.luks1.hdr, cd);
+ }
+
+ return LUKS2_keyslot_wipe(cd, &cd->u.luks2.hdr, keyslot, 0);
+}
+
+static int _check_header_data_overlap(struct crypt_device *cd, const char *name)
+{
+ if (!name || !isLUKS(cd->type))
+ return 0;
+
+ if (device_is_identical(crypt_data_device(cd), crypt_metadata_device(cd)) <= 0)
+ return 0;
+
+ /* FIXME: check real header size */
+ if (crypt_get_data_offset(cd) == 0) {
+ log_err(cd, _("Device header overlaps with data area."));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_devices(struct crypt_device *cd, const char *name, const char *iname, uint32_t *flags)
+{
+ int r;
+
+ if (!flags || !name)
+ return -EINVAL;
+
+ if (iname) {
+ r = dm_status_device(cd, iname);
+ if (r >= 0 && !(*flags & CRYPT_ACTIVATE_REFRESH))
+ return -EBUSY;
+ if (r < 0 && r != -ENODEV)
+ return r;
+ if (r == -ENODEV)
+ *flags &= ~CRYPT_ACTIVATE_REFRESH;
+ }
+
+ r = dm_status_device(cd, name);
+ if (r >= 0 && !(*flags & CRYPT_ACTIVATE_REFRESH))
+ return -EBUSY;
+ if (r < 0 && r != -ENODEV)
+ return r;
+ if (r == -ENODEV)
+ *flags &= ~CRYPT_ACTIVATE_REFRESH;
+
+ return 0;
+}
+
+static int _create_device_with_integrity(struct crypt_device *cd,
+ const char *type, const char *name, const char *iname,
+ const char *ipath, struct crypt_dm_active_device *dmd,
+ struct crypt_dm_active_device *dmdi)
+{
+ int r;
+ enum devcheck device_check;
+ struct dm_target *tgt;
+ struct device *device = NULL;
+
+ if (!single_segment(dmd))
+ return -EINVAL;
+
+ tgt = &dmd->segment;
+ if (tgt->type != DM_CRYPT)
+ return -EINVAL;
+
+ device_check = dmd->flags & CRYPT_ACTIVATE_SHARED ? DEV_OK : DEV_EXCL;
+
+ r = INTEGRITY_activate_dmd_device(cd, iname, CRYPT_INTEGRITY, dmdi, 0);
+ if (r)
+ return r;
+
+ r = device_alloc(cd, &device, ipath);
+ if (r < 0)
+ goto out;
+ tgt->data_device = device;
+
+ r = device_block_adjust(cd, tgt->data_device, device_check,
+ tgt->u.crypt.offset, &dmd->size, &dmd->flags);
+
+ if (!r)
+ r = dm_create_device(cd, name, type, dmd);
+out:
+ if (r < 0)
+ dm_remove_device(cd, iname, 0);
+
+ device_free(cd, device);
+ return r;
+}
+
+static int kernel_keyring_support(void)
+{
+ static unsigned _checked = 0;
+
+ if (!_checked) {
+ _kernel_keyring_supported = keyring_check();
+ _checked = 1;
+ }
+
+ return _kernel_keyring_supported;
+}
+
+static int dmcrypt_keyring_bug(void)
+{
+ uint64_t kversion;
+
+ if (kernel_version(&kversion))
+ return 1;
+ return kversion < compact_version(4,15,0,0);
+}
+
+int create_or_reload_device(struct crypt_device *cd, const char *name,
+ const char *type, struct crypt_dm_active_device *dmd)
+{
+ int r;
+ enum devcheck device_check;
+ struct dm_target *tgt;
+
+ if (!type || !name || !single_segment(dmd))
+ return -EINVAL;
+
+ tgt = &dmd->segment;
+ if (tgt->type != DM_CRYPT && tgt->type != DM_INTEGRITY)
+ return -EINVAL;
+
+ /* drop CRYPT_ACTIVATE_REFRESH flag if any device is inactive */
+ r = check_devices(cd, name, NULL, &dmd->flags);
+ if (r)
+ return r;
+
+ if (dmd->flags & CRYPT_ACTIVATE_REFRESH)
+ r = _reload_device(cd, name, dmd);
+ else {
+ if (tgt->type == DM_CRYPT) {
+ device_check = dmd->flags & CRYPT_ACTIVATE_SHARED ? DEV_OK : DEV_EXCL;
+
+ r = device_block_adjust(cd, tgt->data_device, device_check,
+ tgt->u.crypt.offset, &dmd->size, &dmd->flags);
+ if (!r) {
+ tgt->size = dmd->size;
+ r = dm_create_device(cd, name, type, dmd);
+ }
+ } else if (tgt->type == DM_INTEGRITY) {
+ r = device_block_adjust(cd, tgt->data_device, DEV_EXCL,
+ tgt->u.integrity.offset, NULL, &dmd->flags);
+ if (r)
+ return r;
+
+ if (tgt->u.integrity.meta_device) {
+ r = device_block_adjust(cd, tgt->u.integrity.meta_device, DEV_EXCL, 0, NULL, NULL);
+ if (r)
+ return r;
+ }
+
+ r = dm_create_device(cd, name, type, dmd);
+ }
+ }
+
+ return r;
+}
+
+int create_or_reload_device_with_integrity(struct crypt_device *cd, const char *name,
+ const char *type, struct crypt_dm_active_device *dmd,
+ struct crypt_dm_active_device *dmdi)
+{
+ int r;
+ const char *iname = NULL;
+ char *ipath = NULL;
+
+ if (!type || !name || !dmd || !dmdi)
+ return -EINVAL;
+
+ if (asprintf(&ipath, "%s/%s_dif", dm_get_dir(), name) < 0)
+ return -ENOMEM;
+ iname = ipath + strlen(dm_get_dir()) + 1;
+
+ /* drop CRYPT_ACTIVATE_REFRESH flag if any device is inactive */
+ r = check_devices(cd, name, iname, &dmd->flags);
+ if (r)
+ goto out;
+
+ if (dmd->flags & CRYPT_ACTIVATE_REFRESH)
+ r = _reload_device_with_integrity(cd, name, iname, ipath, dmd, dmdi);
+ else
+ r = _create_device_with_integrity(cd, type, name, iname, ipath, dmd, dmdi);
+out:
+ free(ipath);
+
+ return r;
+}
+
+static int _open_and_activate(struct crypt_device *cd,
+ int keyslot,
+ const char *name,
+ const char *passphrase,
+ size_t passphrase_size,
+ uint32_t flags)
+{
+ bool use_keyring;
+ int r;
+ struct volume_key *vk = NULL;
+
+ r = LUKS2_keyslot_open(cd, keyslot,
+ (flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) ?
+ CRYPT_ANY_SEGMENT : CRYPT_DEFAULT_SEGMENT,
+ passphrase, passphrase_size, &vk);
+ if (r < 0)
+ return r;
+ keyslot = r;
+
+ if (!crypt_use_keyring_for_vk(cd))
+ use_keyring = false;
+ else
+ use_keyring = ((name && !crypt_is_cipher_null(crypt_get_cipher(cd))) ||
+ (flags & CRYPT_ACTIVATE_KEYRING_KEY));
+
+ if (use_keyring) {
+ r = LUKS2_volume_key_load_in_keyring_by_keyslot(cd,
+ &cd->u.luks2.hdr, vk, keyslot);
+ if (r < 0)
+ goto out;
+ flags |= CRYPT_ACTIVATE_KEYRING_KEY;
+ }
+
+ if (name)
+ r = LUKS2_activate(cd, name, vk, flags);
+out:
+ if (r < 0)
+ crypt_drop_keyring_key(cd, vk);
+ crypt_free_volume_key(vk);
+
+ return r < 0 ? r : keyslot;
+}
+
+#if USE_LUKS2_REENCRYPTION
+static int load_all_keys(struct crypt_device *cd, struct luks2_hdr *hdr, struct volume_key *vks)
+{
+ int r;
+ struct volume_key *vk = vks;
+
+ while (vk) {
+ r = LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, crypt_volume_key_get_id(vk));
+ if (r < 0)
+ return r;
+ vk = crypt_volume_key_next(vk);
+ }
+
+ return 0;
+}
+
+static int _open_all_keys(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const char *passphrase,
+ size_t passphrase_size,
+ uint32_t flags,
+ struct volume_key **vks)
+{
+ int r, segment;
+ struct volume_key *_vks = NULL;
+ crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
+
+ segment = (flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) ? CRYPT_ANY_SEGMENT : CRYPT_DEFAULT_SEGMENT;
+
+ switch (ri) {
+ case CRYPT_REENCRYPT_NONE:
+ r = LUKS2_keyslot_open(cd, keyslot, segment, passphrase, passphrase_size, &_vks);
+ break;
+ case CRYPT_REENCRYPT_CLEAN:
+ case CRYPT_REENCRYPT_CRASH:
+ if (segment == CRYPT_ANY_SEGMENT)
+ r = LUKS2_keyslot_open(cd, keyslot, segment, passphrase,
+ passphrase_size, &_vks);
+ else
+ r = LUKS2_keyslot_open_all_segments(cd, keyslot,
+ keyslot, passphrase, passphrase_size,
+ &_vks);
+ break;
+ default:
+ r = -EINVAL;
+ }
+
+ if (keyslot == CRYPT_ANY_SLOT)
+ keyslot = r;
+
+ if (r >= 0 && (flags & CRYPT_ACTIVATE_KEYRING_KEY))
+ r = load_all_keys(cd, hdr, _vks);
+
+ if (r >= 0 && vks)
+ MOVE_REF(*vks, _vks);
+
+ if (r < 0)
+ crypt_drop_keyring_key(cd, _vks);
+ crypt_free_volume_key(_vks);
+
+ return r < 0 ? r : keyslot;
+}
+
+static int _open_and_activate_reencrypt_device(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const char *name,
+ const char *passphrase,
+ size_t passphrase_size,
+ uint32_t flags)
+{
+ bool dynamic_size;
+ crypt_reencrypt_info ri;
+ uint64_t minimal_size, device_size;
+ struct volume_key *vks = NULL;
+ int r = 0;
+ struct crypt_lock_handle *reencrypt_lock = NULL;
+
+ if (crypt_use_keyring_for_vk(cd))
+ flags |= CRYPT_ACTIVATE_KEYRING_KEY;
+
+ r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
+ if (r) {
+ if (r == -EBUSY)
+ log_err(cd, _("Reencryption in-progress. Cannot activate device."));
+ else
+ log_err(cd, _("Failed to get reencryption lock."));
+ return r;
+ }
+
+ if ((r = crypt_load(cd, CRYPT_LUKS2, NULL)))
+ goto out;
+
+ ri = LUKS2_reencrypt_status(hdr);
+
+ if (ri == CRYPT_REENCRYPT_CRASH) {
+ r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot,
+ keyslot, passphrase, passphrase_size, &vks);
+ if (r < 0) {
+ log_err(cd, _("LUKS2 reencryption recovery failed."));
+ goto out;
+ }
+ keyslot = r;
+
+ ri = LUKS2_reencrypt_status(hdr);
+ }
+
+ /* recovery finished reencryption or it's already finished */
+ if (ri == CRYPT_REENCRYPT_NONE) {
+ crypt_drop_keyring_key(cd, vks);
+ crypt_free_volume_key(vks);
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ return _open_and_activate(cd, keyslot, name, passphrase, passphrase_size, flags);
+ }
+
+ if (ri > CRYPT_REENCRYPT_CLEAN) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic_size))
+ goto out;
+
+ if (!vks) {
+ r = _open_all_keys(cd, hdr, keyslot, passphrase, passphrase_size, flags, &vks);
+ if (r >= 0)
+ keyslot = r;
+ }
+
+ if (r >= 0) {
+ r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
+ if (r < 0)
+ goto out;
+ }
+
+ log_dbg(cd, "Entering clean reencryption state mode.");
+
+ if (r >= 0)
+ r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, dynamic_size);
+
+ if (r >= 0)
+ r = LUKS2_activate_multi(cd, name, vks, device_size >> SECTOR_SHIFT, flags);
+out:
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ if (r < 0)
+ crypt_drop_keyring_key(cd, vks);
+ crypt_free_volume_key(vks);
+
+ return r < 0 ? r : keyslot;
+}
+
+/*
+ * Activation/deactivation of a device
+ */
+static int _open_and_activate_luks2(struct crypt_device *cd,
+ int keyslot,
+ const char *name,
+ const char *passphrase,
+ size_t passphrase_size,
+ uint32_t flags)
+{
+ crypt_reencrypt_info ri;
+ int r, rv;
+ struct luks2_hdr *hdr = &cd->u.luks2.hdr;
+ struct volume_key *vks = NULL;
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_INVALID)
+ return -EINVAL;
+
+ if (ri > CRYPT_REENCRYPT_NONE) {
+ if (name)
+ r = _open_and_activate_reencrypt_device(cd, hdr, keyslot, name, passphrase,
+ passphrase_size, flags);
+ else {
+ r = _open_all_keys(cd, hdr, keyslot, passphrase,
+ passphrase_size, flags, &vks);
+ if (r < 0)
+ return r;
+
+ rv = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
+ crypt_free_volume_key(vks);
+ if (rv < 0)
+ return rv;
+ }
+ } else
+ r = _open_and_activate(cd, keyslot, name, passphrase,
+ passphrase_size, flags);
+
+ return r;
+}
+#else
+static int _open_and_activate_luks2(struct crypt_device *cd,
+ int keyslot,
+ const char *name,
+ const char *passphrase,
+ size_t passphrase_size,
+ uint32_t flags)
+{
+ crypt_reencrypt_info ri;
+
+ ri = LUKS2_reencrypt_status(&cd->u.luks2.hdr);
+ if (ri == CRYPT_REENCRYPT_INVALID)
+ return -EINVAL;
+
+ if (ri > CRYPT_REENCRYPT_NONE) {
+ log_err(cd, _("This operation is not supported for this device type."));
+ return -ENOTSUP;
+ }
+
+ return _open_and_activate(cd, keyslot, name, passphrase, passphrase_size, flags);
+}
+#endif
+
+static int _activate_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *passphrase,
+ size_t passphrase_size,
+ uint32_t flags)
+{
+ int r;
+ struct volume_key *vk = NULL;
+
+ if ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd))
+ return -EINVAL;
+
+ if ((flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) && name)
+ return -EINVAL;
+
+ r = _check_header_data_overlap(cd, name);
+ if (r < 0)
+ return r;
+
+ if (flags & CRYPT_ACTIVATE_SERIALIZE_MEMORY_HARD_PBKDF)
+ cd->memory_hard_pbkdf_lock_enabled = true;
+
+ /* plain, use hashed passphrase */
+ if (isPLAIN(cd->type)) {
+ r = -EINVAL;
+ if (!name)
+ goto out;
+
+ r = process_key(cd, cd->u.plain.hdr.hash,
+ cd->u.plain.key_size,
+ passphrase, passphrase_size, &vk);
+ if (r < 0)
+ goto out;
+
+ r = PLAIN_activate(cd, name, vk, cd->u.plain.hdr.size, flags);
+ keyslot = 0;
+ } else if (isLUKS1(cd->type)) {
+ r = LUKS_open_key_with_hdr(keyslot, passphrase,
+ passphrase_size, &cd->u.luks1.hdr, &vk, cd);
+ if (r >= 0) {
+ keyslot = r;
+ if (name)
+ r = LUKS1_activate(cd, name, vk, flags);
+ }
+ } else if (isLUKS2(cd->type)) {
+ r = _open_and_activate_luks2(cd, keyslot, name, passphrase, passphrase_size, flags);
+ keyslot = r;
+ } else if (isBITLK(cd->type)) {
+ r = BITLK_activate_by_passphrase(cd, name, passphrase, passphrase_size,
+ &cd->u.bitlk.params, flags);
+ keyslot = 0;
+ } else if (isFVAULT2(cd->type)) {
+ r = FVAULT2_activate_by_passphrase(cd, name, passphrase, passphrase_size,
+ &cd->u.fvault2.params, flags);
+ keyslot = 0;
+ } else {
+ log_err(cd, _("Device type is not properly initialized."));
+ r = -EINVAL;
+ }
+out:
+ if (r < 0)
+ crypt_drop_keyring_key(cd, vk);
+ crypt_free_volume_key(vk);
+
+ cd->memory_hard_pbkdf_lock_enabled = false;
+
+ return r < 0 ? r : keyslot;
+}
+
+static int _activate_loopaes(struct crypt_device *cd,
+ const char *name,
+ char *buffer,
+ size_t buffer_size,
+ uint32_t flags)
+{
+ int r;
+ unsigned int key_count = 0;
+ struct volume_key *vk = NULL;
+
+ r = LOOPAES_parse_keyfile(cd, &vk, cd->u.loopaes.hdr.hash, &key_count,
+ buffer, buffer_size);
+
+ if (!r && name)
+ r = LOOPAES_activate(cd, name, cd->u.loopaes.cipher, key_count,
+ vk, flags);
+
+ crypt_free_volume_key(vk);
+
+ return r;
+}
+
+static int _activate_check_status(struct crypt_device *cd, const char *name, unsigned reload)
+{
+ int r;
+
+ if (!name)
+ return 0;
+
+ r = dm_status_device(cd, name);
+
+ if (r >= 0 && reload)
+ return 0;
+
+ if (r >= 0 || r == -EEXIST) {
+ log_err(cd, _("Device %s already exists."), name);
+ return -EEXIST;
+ }
+
+ if (r == -ENODEV)
+ return 0;
+
+ log_err(cd, _("Cannot use device %s, name is invalid or still in use."), name);
+ return r;
+}
+
+// activation/deactivation of device mapping
+int crypt_activate_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *passphrase,
+ size_t passphrase_size,
+ uint32_t flags)
+{
+ int r;
+
+ if (!cd || !passphrase || (!name && (flags & CRYPT_ACTIVATE_REFRESH)))
+ return -EINVAL;
+
+ log_dbg(cd, "%s volume %s [keyslot %d] using passphrase.",
+ name ? "Activating" : "Checking", name ?: "passphrase",
+ keyslot);
+
+ r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
+ if (r < 0)
+ return r;
+
+ return _activate_by_passphrase(cd, name, keyslot, passphrase, passphrase_size, flags);
+}
+
+int crypt_activate_by_keyfile_device_offset(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ uint64_t keyfile_offset,
+ uint32_t flags)
+{
+ char *passphrase_read = NULL;
+ size_t passphrase_size_read;
+ int r;
+
+ if (!cd || !keyfile ||
+ ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd)))
+ return -EINVAL;
+
+ log_dbg(cd, "%s volume %s [keyslot %d] using keyfile %s.",
+ name ? "Activating" : "Checking", name ?: "passphrase", keyslot, keyfile);
+
+ r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
+ if (r < 0)
+ return r;
+
+ r = crypt_keyfile_device_read(cd, keyfile,
+ &passphrase_read, &passphrase_size_read,
+ keyfile_offset, keyfile_size, 0);
+ if (r < 0)
+ goto out;
+
+ if (isLOOPAES(cd->type))
+ r = _activate_loopaes(cd, name, passphrase_read, passphrase_size_read, flags);
+ else
+ r = _activate_by_passphrase(cd, name, keyslot, passphrase_read, passphrase_size_read, flags);
+
+out:
+ crypt_safe_free(passphrase_read);
+ return r;
+}
+
+int crypt_activate_by_keyfile(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ uint32_t flags)
+{
+ return crypt_activate_by_keyfile_device_offset(cd, name, keyslot, keyfile,
+ keyfile_size, 0, flags);
+}
+
+int crypt_activate_by_keyfile_offset(struct crypt_device *cd,
+ const char *name,
+ int keyslot,
+ const char *keyfile,
+ size_t keyfile_size,
+ size_t keyfile_offset,
+ uint32_t flags)
+{
+ return crypt_activate_by_keyfile_device_offset(cd, name, keyslot, keyfile,
+ keyfile_size, keyfile_offset, flags);
+}
+int crypt_activate_by_volume_key(struct crypt_device *cd,
+ const char *name,
+ const char *volume_key,
+ size_t volume_key_size,
+ uint32_t flags)
+{
+ bool use_keyring;
+ struct volume_key *vk = NULL;
+ int r;
+
+ if (!cd ||
+ ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd)))
+ return -EINVAL;
+
+ log_dbg(cd, "%s volume %s by volume key.", name ? "Activating" : "Checking",
+ name ?: "");
+
+ r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
+ if (r < 0)
+ return r;
+
+ r = _check_header_data_overlap(cd, name);
+ if (r < 0)
+ return r;
+
+ /* use key directly, no hash */
+ if (isPLAIN(cd->type)) {
+ if (!name)
+ return -EINVAL;
+
+ if (!volume_key || !volume_key_size || volume_key_size != cd->u.plain.key_size) {
+ log_err(cd, _("Incorrect volume key specified for plain device."));
+ return -EINVAL;
+ }
+
+ vk = crypt_alloc_volume_key(volume_key_size, volume_key);
+ if (!vk)
+ return -ENOMEM;
+
+ r = PLAIN_activate(cd, name, vk, cd->u.plain.hdr.size, flags);
+ } else if (isLUKS1(cd->type)) {
+ /* If key is not provided, try to use internal key */
+ if (!volume_key) {
+ if (!cd->volume_key) {
+ log_err(cd, _("Volume key does not match the volume."));
+ return -EINVAL;
+ }
+ volume_key_size = cd->volume_key->keylength;
+ volume_key = cd->volume_key->key;
+ }
+
+ vk = crypt_alloc_volume_key(volume_key_size, volume_key);
+ if (!vk)
+ return -ENOMEM;
+ r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
+
+ if (r == -EPERM)
+ log_err(cd, _("Volume key does not match the volume."));
+
+ if (!r && name)
+ r = LUKS1_activate(cd, name, vk, flags);
+ } else if (isLUKS2(cd->type)) {
+ /* If key is not provided, try to use internal key */
+ if (!volume_key) {
+ if (!cd->volume_key) {
+ log_err(cd, _("Volume key does not match the volume."));
+ return -EINVAL;
+ }
+ volume_key_size = cd->volume_key->keylength;
+ volume_key = cd->volume_key->key;
+ }
+
+ vk = crypt_alloc_volume_key(volume_key_size, volume_key);
+ if (!vk)
+ return -ENOMEM;
+
+ r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
+ if (r == -EPERM || r == -ENOENT)
+ log_err(cd, _("Volume key does not match the volume."));
+ if (r > 0)
+ r = 0;
+
+ if (!crypt_use_keyring_for_vk(cd))
+ use_keyring = false;
+ else
+ use_keyring = (name && !crypt_is_cipher_null(crypt_get_cipher(cd))) ||
+ (flags & CRYPT_ACTIVATE_KEYRING_KEY);
+
+ if (!r && use_keyring) {
+ r = LUKS2_key_description_by_segment(cd,
+ &cd->u.luks2.hdr, vk, CRYPT_DEFAULT_SEGMENT);
+ if (!r)
+ r = crypt_volume_key_load_in_keyring(cd, vk);
+ if (!r)
+ flags |= CRYPT_ACTIVATE_KEYRING_KEY;
+ }
+
+ if (!r && name)
+ r = LUKS2_activate(cd, name, vk, flags);
+ } else if (isVERITY(cd->type)) {
+ r = crypt_activate_by_signed_key(cd, name, volume_key, volume_key_size, NULL, 0, flags);
+ } else if (isTCRYPT(cd->type)) {
+ if (!name)
+ return 0;
+ r = TCRYPT_activate(cd, name, &cd->u.tcrypt.hdr,
+ &cd->u.tcrypt.params, flags);
+ } else if (isINTEGRITY(cd->type)) {
+ if (!name)
+ return 0;
+ if (volume_key) {
+ vk = crypt_alloc_volume_key(volume_key_size, volume_key);
+ if (!vk)
+ return -ENOMEM;
+ }
+ r = INTEGRITY_activate(cd, name, &cd->u.integrity.params, vk,
+ cd->u.integrity.journal_crypt_key,
+ cd->u.integrity.journal_mac_key, flags,
+ cd->u.integrity.sb_flags);
+ } else if (isBITLK(cd->type)) {
+ r = BITLK_activate_by_volume_key(cd, name, volume_key, volume_key_size,
+ &cd->u.bitlk.params, flags);
+ } else {
+ log_err(cd, _("Device type is not properly initialized."));
+ r = -EINVAL;
+ }
+
+ if (r < 0)
+ crypt_drop_keyring_key(cd, vk);
+ crypt_free_volume_key(vk);
+
+ return r;
+}
+
+int crypt_activate_by_signed_key(struct crypt_device *cd,
+ const char *name,
+ const char *volume_key,
+ size_t volume_key_size,
+ const char *signature,
+ size_t signature_size,
+ uint32_t flags)
+{
+ char description[512];
+ int r;
+
+ if (!cd || !isVERITY(cd->type))
+ return -EINVAL;
+
+ if (!volume_key || !volume_key_size || (!name && signature)) {
+ log_err(cd, _("Incorrect root hash specified for verity device."));
+ return -EINVAL;
+ }
+
+ if (name)
+ log_dbg(cd, "Activating volume %s by %skey.", name, signature ? "signed " : "");
+ else
+ log_dbg(cd, "Checking volume by key.");
+
+ if (cd->u.verity.hdr.flags & CRYPT_VERITY_ROOT_HASH_SIGNATURE && !signature) {
+ log_err(cd, _("Root hash signature required."));
+ return -EINVAL;
+ }
+
+ r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
+ if (r < 0)
+ return r;
+
+ if (signature && !kernel_keyring_support()) {
+ log_err(cd, _("Kernel keyring missing: required for passing signature to kernel."));
+ return -EINVAL;
+ }
+
+ /* volume_key == root hash */
+ free(CONST_CAST(void*)cd->u.verity.root_hash);
+ cd->u.verity.root_hash = NULL;
+
+ if (signature) {
+ r = snprintf(description, sizeof(description)-1, "cryptsetup:%s%s%s",
+ crypt_get_uuid(cd) ?: "", crypt_get_uuid(cd) ? "-" : "", name);
+ if (r < 0)
+ return -EINVAL;
+
+ log_dbg(cd, "Adding signature into keyring %s", description);
+ r = keyring_add_key_in_thread_keyring(USER_KEY, description, signature, signature_size);
+ if (r) {
+ log_err(cd, _("Failed to load key in kernel keyring."));
+ return r;
+ }
+ }
+
+ r = VERITY_activate(cd, name, volume_key, volume_key_size,
+ signature ? description : NULL,
+ cd->u.verity.fec_device,
+ &cd->u.verity.hdr, flags | CRYPT_ACTIVATE_READONLY);
+
+ if (!r) {
+ cd->u.verity.root_hash_size = volume_key_size;
+ cd->u.verity.root_hash = malloc(volume_key_size);
+ if (cd->u.verity.root_hash)
+ memcpy(CONST_CAST(void*)cd->u.verity.root_hash, volume_key, volume_key_size);
+ }
+
+ if (signature)
+ crypt_drop_keyring_key_by_description(cd, description, USER_KEY);
+
+ return r;
+}
+
+int crypt_deactivate_by_name(struct crypt_device *cd, const char *name, uint32_t flags)
+{
+ struct crypt_device *fake_cd = NULL;
+ struct luks2_hdr *hdr2 = NULL;
+ struct crypt_dm_active_device dmd = {};
+ int r;
+ uint32_t get_flags = DM_ACTIVE_DEVICE | DM_ACTIVE_UUID | DM_ACTIVE_HOLDERS;
+
+ if (!name)
+ return -EINVAL;
+
+ if ((flags & CRYPT_DEACTIVATE_DEFERRED) && (flags & CRYPT_DEACTIVATE_DEFERRED_CANCEL))
+ return -EINVAL;
+
+ log_dbg(cd, "Deactivating volume %s.", name);
+
+ if (!cd) {
+ r = crypt_init_by_name(&fake_cd, name);
+ if (r < 0)
+ return r;
+ cd = fake_cd;
+ }
+
+ /* skip holders detection and early abort when some flags raised */
+ if (flags & (CRYPT_DEACTIVATE_FORCE | CRYPT_DEACTIVATE_DEFERRED | CRYPT_DEACTIVATE_DEFERRED_CANCEL))
+ get_flags &= ~DM_ACTIVE_HOLDERS;
+
+ switch (crypt_status(cd, name)) {
+ case CRYPT_ACTIVE:
+ case CRYPT_BUSY:
+ if (flags & CRYPT_DEACTIVATE_DEFERRED_CANCEL) {
+ r = dm_cancel_deferred_removal(name);
+ if (r < 0)
+ log_err(cd, _("Could not cancel deferred remove from device %s."), name);
+ break;
+ }
+
+ r = dm_query_device(cd, name, get_flags, &dmd);
+ if (r >= 0) {
+ if (dmd.holders) {
+ log_err(cd, _("Device %s is still in use."), name);
+ r = -EBUSY;
+ break;
+ }
+ }
+
+ if (isLUKS2(cd->type))
+ hdr2 = crypt_get_hdr(cd, CRYPT_LUKS2);
+
+ if ((dmd.uuid && !strncmp(CRYPT_LUKS2, dmd.uuid, sizeof(CRYPT_LUKS2)-1)) || hdr2)
+ r = LUKS2_deactivate(cd, name, hdr2, &dmd, flags);
+ else if (isTCRYPT(cd->type))
+ r = TCRYPT_deactivate(cd, name, flags);
+ else
+ r = dm_remove_device(cd, name, flags);
+ if (r < 0 && crypt_status(cd, name) == CRYPT_BUSY) {
+ log_err(cd, _("Device %s is still in use."), name);
+ r = -EBUSY;
+ }
+ break;
+ case CRYPT_INACTIVE:
+ log_err(cd, _("Device %s is not active."), name);
+ r = -ENODEV;
+ break;
+ default:
+ log_err(cd, _("Invalid device %s."), name);
+ r = -EINVAL;
+ }
+
+ dm_targets_free(cd, &dmd);
+ free(CONST_CAST(void*)dmd.uuid);
+ crypt_free(fake_cd);
+
+ return r;
+}
+
+int crypt_deactivate(struct crypt_device *cd, const char *name)
+{
+ return crypt_deactivate_by_name(cd, name, 0);
+}
+
+int crypt_get_active_device(struct crypt_device *cd, const char *name,
+ struct crypt_active_device *cad)
+{
+ int r;
+ struct crypt_dm_active_device dmd, dmdi = {};
+ const char *namei = NULL;
+ struct dm_target *tgt = &dmd.segment;
+ uint64_t min_offset = UINT64_MAX;
+
+ if (!cd || !name || !cad)
+ return -EINVAL;
+
+ r = dm_query_device(cd, name, DM_ACTIVE_DEVICE, &dmd);
+ if (r < 0)
+ return r;
+
+ /* For LUKS2 with integrity we need flags from underlying dm-integrity */
+ if (isLUKS2(cd->type) && crypt_get_integrity_tag_size(cd) && single_segment(&dmd)) {
+ namei = device_dm_name(tgt->data_device);
+ if (namei && dm_query_device(cd, namei, 0, &dmdi) >= 0)
+ dmd.flags |= dmdi.flags;
+ }
+
+ if (cd && isTCRYPT(cd->type)) {
+ cad->offset = TCRYPT_get_data_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
+ cad->iv_offset = TCRYPT_get_iv_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
+ } else {
+ while (tgt) {
+ if (tgt->type == DM_CRYPT && (min_offset > tgt->u.crypt.offset)) {
+ min_offset = tgt->u.crypt.offset;
+ cad->iv_offset = tgt->u.crypt.iv_offset;
+ } else if (tgt->type == DM_INTEGRITY && (min_offset > tgt->u.integrity.offset)) {
+ min_offset = tgt->u.integrity.offset;
+ cad->iv_offset = 0;
+ } else if (tgt->type == DM_LINEAR && (min_offset > tgt->u.linear.offset)) {
+ min_offset = tgt->u.linear.offset;
+ cad->iv_offset = 0;
+ }
+ tgt = tgt->next;
+ }
+ }
+
+ if (min_offset != UINT64_MAX)
+ cad->offset = min_offset;
+
+ cad->size = dmd.size;
+ cad->flags = dmd.flags;
+
+ r = 0;
+ dm_targets_free(cd, &dmd);
+ dm_targets_free(cd, &dmdi);
+
+ return r;
+}
+
+uint64_t crypt_get_active_integrity_failures(struct crypt_device *cd, const char *name)
+{
+ struct crypt_dm_active_device dmd;
+ uint64_t failures = 0;
+
+ if (!name)
+ return 0;
+
+ /* LUKS2 / dm-crypt does not provide this count. */
+ if (dm_query_device(cd, name, 0, &dmd) < 0)
+ return 0;
+
+ if (single_segment(&dmd) && dmd.segment.type == DM_INTEGRITY)
+ (void)dm_status_integrity_failures(cd, name, &failures);
+
+ dm_targets_free(cd, &dmd);
+
+ return failures;
+}
+
+/*
+ * Volume key handling
+ */
+int crypt_volume_key_get(struct crypt_device *cd,
+ int keyslot,
+ char *volume_key,
+ size_t *volume_key_size,
+ const char *passphrase,
+ size_t passphrase_size)
+{
+ int r;
+ struct crypt_keyslot_context kc;
+
+ if (!passphrase)
+ return crypt_volume_key_get_by_keyslot_context(cd, keyslot, volume_key, volume_key_size, NULL);
+
+ crypt_keyslot_unlock_by_passphrase_init_internal(&kc, passphrase, passphrase_size);
+
+ r = crypt_volume_key_get_by_keyslot_context(cd, keyslot, volume_key, volume_key_size, &kc);
+
+ crypt_keyslot_context_destroy_internal(&kc);
+
+ return r;
+}
+
+int crypt_volume_key_get_by_keyslot_context(struct crypt_device *cd,
+ int keyslot,
+ char *volume_key,
+ size_t *volume_key_size,
+ struct crypt_keyslot_context *kc)
+{
+ size_t passphrase_size;
+ int key_len, r;
+ const char *passphrase = NULL;
+ struct volume_key *vk = NULL;
+
+ if (!cd || !volume_key || !volume_key_size ||
+ (!kc && !isLUKS(cd->type) && !isTCRYPT(cd->type) && !isVERITY(cd->type)))
+ return -EINVAL;
+
+ if (isLUKS2(cd->type) && keyslot != CRYPT_ANY_SLOT)
+ key_len = LUKS2_get_keyslot_stored_key_size(&cd->u.luks2.hdr, keyslot);
+ else
+ key_len = crypt_get_volume_key_size(cd);
+
+ if (key_len < 0)
+ return -EINVAL;
+
+ if (key_len > (int)*volume_key_size) {
+ log_err(cd, _("Volume key buffer too small."));
+ return -ENOMEM;
+ }
+
+ if (kc && (!kc->get_passphrase || kc->type == CRYPT_KC_TYPE_KEY))
+ return -EINVAL;
+
+ if (kc) {
+ r = kc->get_passphrase(cd, kc, &passphrase, &passphrase_size);
+ if (r < 0)
+ return r;
+ }
+
+ r = -EINVAL;
+
+ if (isLUKS2(cd->type)) {
+ if (kc && !kc->get_luks2_key)
+ log_err(cd, _("Cannot retrieve volume key for LUKS2 device."));
+ else if (!kc)
+ r = -ENOENT;
+ else
+ r = kc->get_luks2_key(cd, kc, keyslot,
+ keyslot == CRYPT_ANY_SLOT ? CRYPT_DEFAULT_SEGMENT : CRYPT_ANY_SEGMENT,
+ &vk);
+ } else if (isLUKS1(cd->type)) {
+ if (kc && !kc->get_luks1_volume_key)
+ log_err(cd, _("Cannot retrieve volume key for LUKS1 device."));
+ else if (!kc)
+ r = -ENOENT;
+ else
+ r = kc->get_luks1_volume_key(cd, kc, keyslot, &vk);
+ } else if (isPLAIN(cd->type)) {
+ if (passphrase && cd->u.plain.hdr.hash)
+ r = process_key(cd, cd->u.plain.hdr.hash, key_len,
+ passphrase, passphrase_size, &vk);
+ if (r < 0)
+ log_err(cd, _("Cannot retrieve volume key for plain device."));
+ } else if (isVERITY(cd->type)) {
+ /* volume_key == root hash */
+ if (cd->u.verity.root_hash) {
+ memcpy(volume_key, cd->u.verity.root_hash, cd->u.verity.root_hash_size);
+ *volume_key_size = cd->u.verity.root_hash_size;
+ r = 0;
+ } else
+ log_err(cd, _("Cannot retrieve root hash for verity device."));
+ } else if (isTCRYPT(cd->type)) {
+ r = TCRYPT_get_volume_key(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params, &vk);
+ } else if (isBITLK(cd->type)) {
+ if (passphrase)
+ r = BITLK_get_volume_key(cd, passphrase, passphrase_size, &cd->u.bitlk.params, &vk);
+ if (r < 0)
+ log_err(cd, _("Cannot retrieve volume key for BITLK device."));
+ } else if (isFVAULT2(cd->type)) {
+ if (passphrase)
+ r = FVAULT2_get_volume_key(cd, passphrase, passphrase_size, &cd->u.fvault2.params, &vk);
+ if (r < 0)
+ log_err(cd, _("Cannot retrieve volume key for FVAULT2 device."));
+ } else
+ log_err(cd, _("This operation is not supported for %s crypt device."), cd->type ?: "(none)");
+
+ if (r == -ENOENT && isLUKS(cd->type) && cd->volume_key) {
+ vk = crypt_alloc_volume_key(cd->volume_key->keylength, cd->volume_key->key);
+ r = vk ? 0 : -ENOMEM;
+ }
+
+ if (r >= 0 && vk) {
+ memcpy(volume_key, vk->key, vk->keylength);
+ *volume_key_size = vk->keylength;
+ }
+
+ crypt_free_volume_key(vk);
+ return r;
+}
+
+int crypt_volume_key_verify(struct crypt_device *cd,
+ const char *volume_key,
+ size_t volume_key_size)
+{
+ struct volume_key *vk;
+ int r;
+
+ if ((r = _onlyLUKS(cd, CRYPT_CD_UNRESTRICTED)))
+ return r;
+
+ vk = crypt_alloc_volume_key(volume_key_size, volume_key);
+ if (!vk)
+ return -ENOMEM;
+
+ if (isLUKS1(cd->type))
+ r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
+ else if (isLUKS2(cd->type))
+ r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
+ else
+ r = -EINVAL;
+
+ crypt_free_volume_key(vk);
+
+ return r >= 0 ? 0 : r;
+}
+
+/*
+ * RNG and memory locking
+ */
+void crypt_set_rng_type(struct crypt_device *cd, int rng_type)
+{
+ if (!cd)
+ return;
+
+ switch (rng_type) {
+ case CRYPT_RNG_URANDOM:
+ case CRYPT_RNG_RANDOM:
+ log_dbg(cd, "RNG set to %d (%s).", rng_type, rng_type ? "random" : "urandom");
+ cd->rng_type = rng_type;
+ }
+}
+
+int crypt_get_rng_type(struct crypt_device *cd)
+{
+ if (!cd)
+ return -EINVAL;
+
+ return cd->rng_type;
+}
+
+int crypt_memory_lock(struct crypt_device *cd, int lock)
+{
+ return 0;
+}
+
+void crypt_set_compatibility(struct crypt_device *cd, uint32_t flags)
+{
+ if (cd)
+ cd->compatibility = flags;
+}
+
+uint32_t crypt_get_compatibility(struct crypt_device *cd)
+{
+ if (cd)
+ return cd->compatibility;
+
+ return 0;
+}
+
+/*
+ * Reporting
+ */
+crypt_status_info crypt_status(struct crypt_device *cd, const char *name)
+{
+ int r;
+
+ if (!name)
+ return CRYPT_INVALID;
+
+ if (!cd)
+ dm_backend_init(cd);
+
+ r = dm_status_device(cd, name);
+
+ if (!cd)
+ dm_backend_exit(cd);
+
+ if (r < 0 && r != -ENODEV)
+ return CRYPT_INVALID;
+
+ if (r == 0)
+ return CRYPT_ACTIVE;
+
+ if (r > 0)
+ return CRYPT_BUSY;
+
+ return CRYPT_INACTIVE;
+}
+
+static int _luks_dump(struct crypt_device *cd)
+{
+ int i;
+
+ log_std(cd, "LUKS header information for %s\n\n", mdata_device_path(cd));
+ log_std(cd, "Version: \t%" PRIu16 "\n", cd->u.luks1.hdr.version);
+ log_std(cd, "Cipher name: \t%s\n", cd->u.luks1.hdr.cipherName);
+ log_std(cd, "Cipher mode: \t%s\n", cd->u.luks1.hdr.cipherMode);
+ log_std(cd, "Hash spec: \t%s\n", cd->u.luks1.hdr.hashSpec);
+ log_std(cd, "Payload offset:\t%" PRIu32 "\n", cd->u.luks1.hdr.payloadOffset);
+ log_std(cd, "MK bits: \t%" PRIu32 "\n", cd->u.luks1.hdr.keyBytes * 8);
+ log_std(cd, "MK digest: \t");
+ crypt_log_hex(cd, cd->u.luks1.hdr.mkDigest, LUKS_DIGESTSIZE, " ", 0, NULL);
+ log_std(cd, "\n");
+ log_std(cd, "MK salt: \t");
+ crypt_log_hex(cd, cd->u.luks1.hdr.mkDigestSalt, LUKS_SALTSIZE/2, " ", 0, NULL);
+ log_std(cd, "\n \t");
+ crypt_log_hex(cd, cd->u.luks1.hdr.mkDigestSalt+LUKS_SALTSIZE/2, LUKS_SALTSIZE/2, " ", 0, NULL);
+ log_std(cd, "\n");
+ log_std(cd, "MK iterations: \t%" PRIu32 "\n", cd->u.luks1.hdr.mkDigestIterations);
+ log_std(cd, "UUID: \t%s\n\n", cd->u.luks1.hdr.uuid);
+ for(i = 0; i < LUKS_NUMKEYS; i++) {
+ if(cd->u.luks1.hdr.keyblock[i].active == LUKS_KEY_ENABLED) {
+ log_std(cd, "Key Slot %d: ENABLED\n",i);
+ log_std(cd, "\tIterations: \t%" PRIu32 "\n",
+ cd->u.luks1.hdr.keyblock[i].passwordIterations);
+ log_std(cd, "\tSalt: \t");
+ crypt_log_hex(cd, cd->u.luks1.hdr.keyblock[i].passwordSalt,
+ LUKS_SALTSIZE/2, " ", 0, NULL);
+ log_std(cd, "\n\t \t");
+ crypt_log_hex(cd, cd->u.luks1.hdr.keyblock[i].passwordSalt +
+ LUKS_SALTSIZE/2, LUKS_SALTSIZE/2, " ", 0, NULL);
+ log_std(cd, "\n");
+
+ log_std(cd, "\tKey material offset:\t%" PRIu32 "\n",
+ cd->u.luks1.hdr.keyblock[i].keyMaterialOffset);
+ log_std(cd, "\tAF stripes: \t%" PRIu32 "\n",
+ cd->u.luks1.hdr.keyblock[i].stripes);
+ }
+ else
+ log_std(cd, "Key Slot %d: DISABLED\n", i);
+ }
+ return 0;
+}
+
+int crypt_dump(struct crypt_device *cd)
+{
+ if (!cd)
+ return -EINVAL;
+ if (isLUKS1(cd->type))
+ return _luks_dump(cd);
+ else if (isLUKS2(cd->type))
+ return LUKS2_hdr_dump(cd, &cd->u.luks2.hdr);
+ else if (isVERITY(cd->type))
+ return VERITY_dump(cd, &cd->u.verity.hdr,
+ cd->u.verity.root_hash, cd->u.verity.root_hash_size,
+ cd->u.verity.fec_device);
+ else if (isTCRYPT(cd->type))
+ return TCRYPT_dump(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
+ else if (isINTEGRITY(cd->type))
+ return INTEGRITY_dump(cd, crypt_data_device(cd), 0);
+ else if (isBITLK(cd->type))
+ return BITLK_dump(cd, crypt_data_device(cd), &cd->u.bitlk.params);
+ else if (isFVAULT2(cd->type))
+ return FVAULT2_dump(cd, crypt_data_device(cd), &cd->u.fvault2.params);
+
+ log_err(cd, _("Dump operation is not supported for this device type."));
+ return -EINVAL;
+}
+
+int crypt_dump_json(struct crypt_device *cd, const char **json, uint32_t flags)
+{
+ if (!cd || flags)
+ return -EINVAL;
+ if (isLUKS2(cd->type))
+ return LUKS2_hdr_dump_json(cd, &cd->u.luks2.hdr, json);
+
+ log_err(cd, _("Dump operation is not supported for this device type."));
+ return -EINVAL;
+}
+
+/* internal only */
+const char *crypt_get_cipher_spec(struct crypt_device *cd)
+{
+ if (!cd)
+ return NULL;
+ else if (isLUKS2(cd->type))
+ return LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
+ else if (isLUKS1(cd->type))
+ return cd->u.luks1.cipher_spec;
+ else if (isPLAIN(cd->type))
+ return cd->u.plain.cipher_spec;
+ else if (isLOOPAES(cd->type))
+ return cd->u.loopaes.cipher_spec;
+ else if (isBITLK(cd->type))
+ return cd->u.bitlk.cipher_spec;
+ else if (!cd->type && !_init_by_name_crypt_none(cd))
+ return cd->u.none.cipher_spec;
+
+ return NULL;
+}
+
+const char *crypt_get_cipher(struct crypt_device *cd)
+{
+ if (!cd)
+ return NULL;
+
+ if (isPLAIN(cd->type))
+ return cd->u.plain.cipher;
+
+ if (isLUKS1(cd->type))
+ return cd->u.luks1.hdr.cipherName;
+
+ if (isLUKS2(cd->type)) {
+ if (crypt_parse_name_and_mode(LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT),
+ cd->u.luks2.cipher, NULL, cd->u.luks2.cipher_mode))
+ return NULL;
+ return cd->u.luks2.cipher;
+ }
+
+ if (isLOOPAES(cd->type))
+ return cd->u.loopaes.cipher;
+
+ if (isTCRYPT(cd->type))
+ return cd->u.tcrypt.params.cipher;
+
+ if (isBITLK(cd->type))
+ return cd->u.bitlk.params.cipher;
+
+ if (isFVAULT2(cd->type))
+ return cd->u.fvault2.params.cipher;
+
+ if (!cd->type && !_init_by_name_crypt_none(cd))
+ return cd->u.none.cipher;
+
+ return NULL;
+}
+
+const char *crypt_get_cipher_mode(struct crypt_device *cd)
+{
+ if (!cd)
+ return NULL;
+
+ if (isPLAIN(cd->type))
+ return cd->u.plain.cipher_mode;
+
+ if (isLUKS1(cd->type))
+ return cd->u.luks1.hdr.cipherMode;
+
+ if (isLUKS2(cd->type)) {
+ if (crypt_parse_name_and_mode(LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT),
+ cd->u.luks2.cipher, NULL, cd->u.luks2.cipher_mode))
+ return NULL;
+ return cd->u.luks2.cipher_mode;
+ }
+
+ if (isLOOPAES(cd->type))
+ return cd->u.loopaes.cipher_mode;
+
+ if (isTCRYPT(cd->type))
+ return cd->u.tcrypt.params.mode;
+
+ if (isBITLK(cd->type))
+ return cd->u.bitlk.params.cipher_mode;
+
+ if (isFVAULT2(cd->type))
+ return cd->u.fvault2.params.cipher_mode;
+
+ if (!cd->type && !_init_by_name_crypt_none(cd))
+ return cd->u.none.cipher_mode;
+
+ return NULL;
+}
+
+/* INTERNAL only */
+const char *crypt_get_integrity(struct crypt_device *cd)
+{
+ if (!cd)
+ return NULL;
+
+ if (isINTEGRITY(cd->type))
+ return cd->u.integrity.params.integrity;
+
+ if (isLUKS2(cd->type))
+ return LUKS2_get_integrity(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
+
+ return NULL;
+}
+
+/* INTERNAL only */
+int crypt_get_integrity_key_size(struct crypt_device *cd)
+{
+ int key_size = 0;
+
+ if (isINTEGRITY(cd->type))
+ key_size = INTEGRITY_key_size(crypt_get_integrity(cd));
+
+ if (isLUKS2(cd->type))
+ key_size = INTEGRITY_key_size(crypt_get_integrity(cd));
+
+ return key_size > 0 ? key_size : 0;
+}
+
+/* INTERNAL only */
+int crypt_get_integrity_tag_size(struct crypt_device *cd)
+{
+ if (isINTEGRITY(cd->type))
+ return cd->u.integrity.params.tag_size;
+
+ if (isLUKS2(cd->type))
+ return INTEGRITY_tag_size(crypt_get_integrity(cd),
+ crypt_get_cipher(cd),
+ crypt_get_cipher_mode(cd));
+ return 0;
+}
+
+int crypt_get_sector_size(struct crypt_device *cd)
+{
+ if (!cd)
+ return SECTOR_SIZE;
+
+ if (isPLAIN(cd->type))
+ return cd->u.plain.hdr.sector_size;
+
+ if (isINTEGRITY(cd->type))
+ return cd->u.integrity.params.sector_size;
+
+ if (isLUKS2(cd->type))
+ return LUKS2_get_sector_size(&cd->u.luks2.hdr);
+
+ return SECTOR_SIZE;
+}
+
+const char *crypt_get_uuid(struct crypt_device *cd)
+{
+ if (!cd)
+ return NULL;
+
+ if (isLUKS1(cd->type))
+ return cd->u.luks1.hdr.uuid;
+
+ if (isLUKS2(cd->type))
+ return cd->u.luks2.hdr.uuid;
+
+ if (isVERITY(cd->type))
+ return cd->u.verity.uuid;
+
+ if (isBITLK(cd->type))
+ return cd->u.bitlk.params.guid;
+
+ if (isFVAULT2(cd->type))
+ return cd->u.fvault2.params.family_uuid;
+
+ return NULL;
+}
+
+const char *crypt_get_device_name(struct crypt_device *cd)
+{
+ const char *path;
+
+ if (!cd)
+ return NULL;
+
+ path = device_block_path(cd->device);
+ if (!path)
+ path = device_path(cd->device);
+
+ return path;
+}
+
+const char *crypt_get_metadata_device_name(struct crypt_device *cd)
+{
+ const char *path;
+
+ if (!cd || !cd->metadata_device)
+ return NULL;
+
+ path = device_block_path(cd->metadata_device);
+ if (!path)
+ path = device_path(cd->metadata_device);
+
+ return path;
+}
+
+int crypt_get_volume_key_size(struct crypt_device *cd)
+{
+ int r;
+
+ if (!cd)
+ return 0;
+
+ if (isPLAIN(cd->type))
+ return cd->u.plain.key_size;
+
+ if (isLUKS1(cd->type))
+ return cd->u.luks1.hdr.keyBytes;
+
+ if (isLUKS2(cd->type)) {
+ r = LUKS2_get_volume_key_size(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
+ if (r < 0 && cd->volume_key)
+ r = cd->volume_key->keylength;
+ return r < 0 ? 0 : r;
+ }
+
+ if (isLOOPAES(cd->type))
+ return cd->u.loopaes.key_size;
+
+ if (isVERITY(cd->type))
+ return cd->u.verity.root_hash_size;
+
+ if (isTCRYPT(cd->type))
+ return cd->u.tcrypt.params.key_size;
+
+ if (isBITLK(cd->type))
+ return cd->u.bitlk.params.key_size / 8;
+
+ if (isFVAULT2(cd->type))
+ return cd->u.fvault2.params.key_size;
+
+ if (!cd->type && !_init_by_name_crypt_none(cd))
+ return cd->u.none.key_size;
+
+ return 0;
+}
+
+int crypt_keyslot_get_key_size(struct crypt_device *cd, int keyslot)
+{
+ if (!cd || !isLUKS(cd->type))
+ return -EINVAL;
+
+ if (keyslot < 0 || keyslot >= crypt_keyslot_max(cd->type))
+ return -EINVAL;
+
+ if (isLUKS1(cd->type))
+ return cd->u.luks1.hdr.keyBytes;
+
+ if (isLUKS2(cd->type))
+ return LUKS2_get_keyslot_stored_key_size(&cd->u.luks2.hdr, keyslot);
+
+ return -EINVAL;
+}
+
+int crypt_keyslot_set_encryption(struct crypt_device *cd,
+ const char *cipher,
+ size_t key_size)
+{
+ char *tmp;
+
+ if (!cd || !cipher || !key_size || !isLUKS2(cd->type))
+ return -EINVAL;
+
+ if (LUKS2_keyslot_cipher_incompatible(cd, cipher))
+ return -EINVAL;
+
+ if (!(tmp = strdup(cipher)))
+ return -ENOMEM;
+
+ free(cd->u.luks2.keyslot_cipher);
+ cd->u.luks2.keyslot_cipher = tmp;
+ cd->u.luks2.keyslot_key_size = key_size;
+
+ return 0;
+}
+
+const char *crypt_keyslot_get_encryption(struct crypt_device *cd, int keyslot, size_t *key_size)
+{
+ const char *cipher;
+
+ if (!cd || !isLUKS(cd->type) || !key_size)
+ return NULL;
+
+ if (isLUKS1(cd->type)) {
+ if (keyslot != CRYPT_ANY_SLOT &&
+ LUKS_keyslot_info(&cd->u.luks1.hdr, keyslot) < CRYPT_SLOT_ACTIVE)
+ return NULL;
+ *key_size = crypt_get_volume_key_size(cd);
+ return cd->u.luks1.cipher_spec;
+ }
+
+ if (keyslot != CRYPT_ANY_SLOT)
+ return LUKS2_get_keyslot_cipher(&cd->u.luks2.hdr, keyslot, key_size);
+
+ /* Keyslot encryption was set through crypt_keyslot_set_encryption() */
+ if (cd->u.luks2.keyslot_cipher) {
+ *key_size = cd->u.luks2.keyslot_key_size;
+ return cd->u.luks2.keyslot_cipher;
+ }
+
+ /* Try to reuse volume encryption parameters */
+ cipher = LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
+ if (!LUKS2_keyslot_cipher_incompatible(cd, cipher)) {
+ *key_size = crypt_get_volume_key_size(cd);
+ if (*key_size)
+ return cipher;
+ }
+
+ /* Fallback to default LUKS2 keyslot encryption */
+ *key_size = DEFAULT_LUKS2_KEYSLOT_KEYBITS / 8;
+ return DEFAULT_LUKS2_KEYSLOT_CIPHER;
+}
+
+int crypt_keyslot_get_pbkdf(struct crypt_device *cd, int keyslot, struct crypt_pbkdf_type *pbkdf)
+{
+ if (!cd || !pbkdf || keyslot == CRYPT_ANY_SLOT)
+ return -EINVAL;
+
+ if (isLUKS1(cd->type))
+ return LUKS_keyslot_pbkdf(&cd->u.luks1.hdr, keyslot, pbkdf);
+ else if (isLUKS2(cd->type))
+ return LUKS2_keyslot_pbkdf(&cd->u.luks2.hdr, keyslot, pbkdf);
+
+ return -EINVAL;
+}
+
+int crypt_set_data_offset(struct crypt_device *cd, uint64_t data_offset)
+{
+ if (!cd)
+ return -EINVAL;
+ if (data_offset % (MAX_SECTOR_SIZE >> SECTOR_SHIFT)) {
+ log_err(cd, _("Data offset is not multiple of %u bytes."), MAX_SECTOR_SIZE);
+ return -EINVAL;
+ }
+
+ cd->data_offset = data_offset;
+ log_dbg(cd, "Data offset set to %" PRIu64 " (512-byte) sectors.", data_offset);
+
+ return 0;
+}
+
+int crypt_set_metadata_size(struct crypt_device *cd,
+ uint64_t metadata_size,
+ uint64_t keyslots_size)
+{
+ if (!cd)
+ return -EINVAL;
+
+ if (cd->type && !isLUKS2(cd->type))
+ return -EINVAL;
+
+ if (metadata_size && LUKS2_check_metadata_area_size(metadata_size))
+ return -EINVAL;
+
+ if (keyslots_size && LUKS2_check_keyslots_area_size(keyslots_size))
+ return -EINVAL;
+
+ cd->metadata_size = metadata_size;
+ cd->keyslots_size = keyslots_size;
+
+ return 0;
+}
+
+int crypt_get_metadata_size(struct crypt_device *cd,
+ uint64_t *metadata_size,
+ uint64_t *keyslots_size)
+{
+ uint64_t msize, ksize;
+
+ if (!cd)
+ return -EINVAL;
+
+ if (!cd->type) {
+ msize = cd->metadata_size;
+ ksize = cd->keyslots_size;
+ } else if (isLUKS1(cd->type)) {
+ msize = LUKS_ALIGN_KEYSLOTS;
+ ksize = LUKS_device_sectors(&cd->u.luks1.hdr) * SECTOR_SIZE - msize;
+ } else if (isLUKS2(cd->type)) {
+ msize = LUKS2_metadata_size(&cd->u.luks2.hdr);
+ ksize = LUKS2_keyslots_size(&cd->u.luks2.hdr);
+ } else
+ return -EINVAL;
+
+ if (metadata_size)
+ *metadata_size = msize;
+ if (keyslots_size)
+ *keyslots_size = ksize;
+
+ return 0;
+}
+
+uint64_t crypt_get_data_offset(struct crypt_device *cd)
+{
+ if (!cd)
+ return 0;
+
+ if (isPLAIN(cd->type))
+ return cd->u.plain.hdr.offset;
+
+ if (isLUKS1(cd->type))
+ return cd->u.luks1.hdr.payloadOffset;
+
+ if (isLUKS2(cd->type))
+ return LUKS2_get_data_offset(&cd->u.luks2.hdr);
+
+ if (isLOOPAES(cd->type))
+ return cd->u.loopaes.hdr.offset;
+
+ if (isTCRYPT(cd->type))
+ return TCRYPT_get_data_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
+
+ if (isBITLK(cd->type))
+ return cd->u.bitlk.params.volume_header_size / SECTOR_SIZE;
+
+ if (isFVAULT2(cd->type))
+ return cd->u.fvault2.params.log_vol_off / SECTOR_SIZE;
+
+ return cd->data_offset;
+}
+
+uint64_t crypt_get_iv_offset(struct crypt_device *cd)
+{
+ if (!cd)
+ return 0;
+
+ if (isPLAIN(cd->type))
+ return cd->u.plain.hdr.skip;
+
+ if (isLOOPAES(cd->type))
+ return cd->u.loopaes.hdr.skip;
+
+ if (isTCRYPT(cd->type))
+ return TCRYPT_get_iv_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
+
+ return 0;
+}
+
+crypt_keyslot_info crypt_keyslot_status(struct crypt_device *cd, int keyslot)
+{
+ if (_onlyLUKS(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED) < 0)
+ return CRYPT_SLOT_INVALID;
+
+ if (isLUKS1(cd->type))
+ return LUKS_keyslot_info(&cd->u.luks1.hdr, keyslot);
+ else if(isLUKS2(cd->type))
+ return LUKS2_keyslot_info(&cd->u.luks2.hdr, keyslot);
+
+ return CRYPT_SLOT_INVALID;
+}
+
+int crypt_keyslot_max(const char *type)
+{
+ if (isLUKS1(type))
+ return LUKS_NUMKEYS;
+
+ if (isLUKS2(type))
+ return LUKS2_KEYSLOTS_MAX;
+
+ return -EINVAL;
+}
+
+int crypt_keyslot_area(struct crypt_device *cd,
+ int keyslot,
+ uint64_t *offset,
+ uint64_t *length)
+{
+ if (_onlyLUKS(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED) || !offset || !length)
+ return -EINVAL;
+
+ if (isLUKS2(cd->type))
+ return LUKS2_keyslot_area(&cd->u.luks2.hdr, keyslot, offset, length);
+
+ return LUKS_keyslot_area(&cd->u.luks1.hdr, keyslot, offset, length);
+}
+
+crypt_keyslot_priority crypt_keyslot_get_priority(struct crypt_device *cd, int keyslot)
+{
+ if (_onlyLUKS(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED))
+ return CRYPT_SLOT_PRIORITY_INVALID;
+
+ if (keyslot < 0 || keyslot >= crypt_keyslot_max(cd->type))
+ return CRYPT_SLOT_PRIORITY_INVALID;
+
+ if (isLUKS2(cd->type))
+ return LUKS2_keyslot_priority_get(&cd->u.luks2.hdr, keyslot);
+
+ return CRYPT_SLOT_PRIORITY_NORMAL;
+}
+
+int crypt_keyslot_set_priority(struct crypt_device *cd, int keyslot, crypt_keyslot_priority priority)
+{
+ int r;
+
+ log_dbg(cd, "Setting keyslot %d to priority %d.", keyslot, priority);
+
+ if (priority == CRYPT_SLOT_PRIORITY_INVALID)
+ return -EINVAL;
+
+ if (keyslot < 0 || keyslot >= crypt_keyslot_max(cd->type))
+ return -EINVAL;
+
+ if ((r = onlyLUKS2(cd)))
+ return r;
+
+ return LUKS2_keyslot_priority_set(cd, &cd->u.luks2.hdr, keyslot, priority, 1);
+}
+
+const char *crypt_get_type(struct crypt_device *cd)
+{
+ return cd ? cd->type : NULL;
+}
+
+const char *crypt_get_default_type(void)
+{
+ return DEFAULT_LUKS_FORMAT;
+}
+
+int crypt_get_verity_info(struct crypt_device *cd,
+ struct crypt_params_verity *vp)
+{
+ if (!cd || !isVERITY(cd->type) || !vp)
+ return -EINVAL;
+
+ vp->data_device = device_path(cd->device);
+ vp->hash_device = mdata_device_path(cd);
+ vp->fec_device = device_path(cd->u.verity.fec_device);
+ vp->fec_area_offset = cd->u.verity.hdr.fec_area_offset;
+ vp->fec_roots = cd->u.verity.hdr.fec_roots;
+ vp->hash_name = cd->u.verity.hdr.hash_name;
+ vp->salt = cd->u.verity.hdr.salt;
+ vp->salt_size = cd->u.verity.hdr.salt_size;
+ vp->data_block_size = cd->u.verity.hdr.data_block_size;
+ vp->hash_block_size = cd->u.verity.hdr.hash_block_size;
+ vp->data_size = cd->u.verity.hdr.data_size;
+ vp->hash_area_offset = cd->u.verity.hdr.hash_area_offset;
+ vp->hash_type = cd->u.verity.hdr.hash_type;
+ vp->flags = cd->u.verity.hdr.flags & (CRYPT_VERITY_NO_HEADER | CRYPT_VERITY_ROOT_HASH_SIGNATURE);
+ return 0;
+}
+
+int crypt_get_integrity_info(struct crypt_device *cd,
+ struct crypt_params_integrity *ip)
+{
+ if (!cd || !ip)
+ return -EINVAL;
+
+ if (isINTEGRITY(cd->type)) {
+ ip->journal_size = cd->u.integrity.params.journal_size;
+ ip->journal_watermark = cd->u.integrity.params.journal_watermark;
+ ip->journal_commit_time = cd->u.integrity.params.journal_commit_time;
+ ip->interleave_sectors = cd->u.integrity.params.interleave_sectors;
+ ip->tag_size = cd->u.integrity.params.tag_size;
+ ip->sector_size = cd->u.integrity.params.sector_size;
+ ip->buffer_sectors = cd->u.integrity.params.buffer_sectors;
+
+ ip->integrity = cd->u.integrity.params.integrity;
+ ip->integrity_key_size = crypt_get_integrity_key_size(cd);
+
+ ip->journal_integrity = cd->u.integrity.params.journal_integrity;
+ ip->journal_integrity_key_size = cd->u.integrity.params.journal_integrity_key_size;
+ ip->journal_integrity_key = NULL;
+
+ ip->journal_crypt = cd->u.integrity.params.journal_crypt;
+ ip->journal_crypt_key_size = cd->u.integrity.params.journal_crypt_key_size;
+ ip->journal_crypt_key = NULL;
+ return 0;
+ } else if (isLUKS2(cd->type)) {
+ ip->journal_size = 0; // FIXME
+ ip->journal_watermark = 0; // FIXME
+ ip->journal_commit_time = 0; // FIXME
+ ip->interleave_sectors = 0; // FIXME
+ ip->sector_size = crypt_get_sector_size(cd);
+ ip->buffer_sectors = 0; // FIXME
+
+ ip->integrity = LUKS2_get_integrity(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
+ ip->integrity_key_size = crypt_get_integrity_key_size(cd);
+ ip->tag_size = INTEGRITY_tag_size(ip->integrity, crypt_get_cipher(cd), crypt_get_cipher_mode(cd));
+
+ ip->journal_integrity = NULL;
+ ip->journal_integrity_key_size = 0;
+ ip->journal_integrity_key = NULL;
+
+ ip->journal_crypt = NULL;
+ ip->journal_crypt_key_size = 0;
+ ip->journal_crypt_key = NULL;
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+int crypt_convert(struct crypt_device *cd,
+ const char *type,
+ void *params)
+{
+ struct luks_phdr hdr1;
+ struct luks2_hdr hdr2;
+ int r;
+
+ if (!type)
+ return -EINVAL;
+
+ log_dbg(cd, "Converting LUKS device to type %s", type);
+
+ if ((r = onlyLUKS(cd)))
+ return r;
+
+ if (isLUKS1(cd->type) && isLUKS2(type))
+ r = LUKS2_luks1_to_luks2(cd, &cd->u.luks1.hdr, &hdr2);
+ else if (isLUKS2(cd->type) && isLUKS1(type))
+ r = LUKS2_luks2_to_luks1(cd, &cd->u.luks2.hdr, &hdr1);
+ else
+ return -EINVAL;
+
+ if (r < 0) {
+ /* in-memory header may be invalid after failed conversion */
+ _luks2_rollback(cd);
+ if (r == -EBUSY)
+ log_err(cd, _("Cannot convert device %s which is still in use."), mdata_device_path(cd));
+ return r;
+ }
+
+ crypt_free_type(cd, NULL);
+
+ return crypt_load(cd, type, params);
+}
+
+/* Internal access function to header pointer */
+void *crypt_get_hdr(struct crypt_device *cd, const char *type)
+{
+ /* If requested type differs, ignore it */
+ if (strcmp(cd->type, type))
+ return NULL;
+
+ if (isPLAIN(cd->type))
+ return &cd->u.plain;
+
+ if (isLUKS1(cd->type))
+ return &cd->u.luks1.hdr;
+
+ if (isLUKS2(cd->type))
+ return &cd->u.luks2.hdr;
+
+ if (isLOOPAES(cd->type))
+ return &cd->u.loopaes;
+
+ if (isVERITY(cd->type))
+ return &cd->u.verity;
+
+ if (isTCRYPT(cd->type))
+ return &cd->u.tcrypt;
+
+ return NULL;
+}
+
+/* internal only */
+struct luks2_reencrypt *crypt_get_luks2_reencrypt(struct crypt_device *cd)
+{
+ return cd->u.luks2.rh;
+}
+
+/* internal only */
+void crypt_set_luks2_reencrypt(struct crypt_device *cd, struct luks2_reencrypt *rh)
+{
+ cd->u.luks2.rh = rh;
+}
+
+/*
+ * Token handling
+ */
+int crypt_activate_by_token_pin(struct crypt_device *cd, const char *name,
+ const char *type, int token, const char *pin, size_t pin_size,
+ void *usrptr, uint32_t flags)
+{
+ int r;
+
+ log_dbg(cd, "%s volume %s using token (%s type) %d.",
+ name ? "Activating" : "Checking", name ?: "passphrase",
+ type ?: "any", token);
+
+ if ((r = _onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0)))
+ return r;
+
+ if ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd))
+ return -EINVAL;
+
+ if ((flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) && name)
+ return -EINVAL;
+
+ r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
+ if (r < 0)
+ return r;
+
+ return LUKS2_token_open_and_activate(cd, &cd->u.luks2.hdr, token, name, type,
+ pin, pin_size, flags, usrptr);
+}
+
+int crypt_activate_by_token(struct crypt_device *cd,
+ const char *name, int token, void *usrptr, uint32_t flags)
+{
+ return crypt_activate_by_token_pin(cd, name, NULL, token, NULL, 0, usrptr, flags);
+}
+
+int crypt_token_json_get(struct crypt_device *cd, int token, const char **json)
+{
+ int r;
+
+ if (!json)
+ return -EINVAL;
+
+ log_dbg(cd, "Requesting JSON for token %d.", token);
+
+ if ((r = _onlyLUKS2(cd, CRYPT_CD_UNRESTRICTED, 0)))
+ return r;
+
+ return LUKS2_token_json_get(&cd->u.luks2.hdr, token, json) ?: token;
+}
+
+int crypt_token_json_set(struct crypt_device *cd, int token, const char *json)
+{
+ int r;
+
+ log_dbg(cd, "Updating JSON for token %d.", token);
+
+ if ((r = onlyLUKS2(cd)))
+ return r;
+
+ return LUKS2_token_create(cd, &cd->u.luks2.hdr, token, json, 1);
+}
+
+crypt_token_info crypt_token_status(struct crypt_device *cd, int token, const char **type)
+{
+ if (_onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0))
+ return CRYPT_TOKEN_INVALID;
+
+ return LUKS2_token_status(cd, &cd->u.luks2.hdr, token, type);
+}
+
+int crypt_token_max(const char *type)
+{
+ if (isLUKS2(type))
+ return LUKS2_TOKENS_MAX;
+
+ return -EINVAL;
+}
+
+int crypt_token_luks2_keyring_get(struct crypt_device *cd,
+ int token,
+ struct crypt_token_params_luks2_keyring *params)
+{
+ crypt_token_info token_info;
+ const char *type;
+ int r;
+
+ if (!params)
+ return -EINVAL;
+
+ log_dbg(cd, "Requesting LUKS2 keyring token %d.", token);
+
+ if ((r = _onlyLUKS2(cd, CRYPT_CD_UNRESTRICTED, 0)))
+ return r;
+
+ token_info = LUKS2_token_status(cd, &cd->u.luks2.hdr, token, &type);
+ switch (token_info) {
+ case CRYPT_TOKEN_INVALID:
+ log_dbg(cd, "Token %d is invalid.", token);
+ return -EINVAL;
+ case CRYPT_TOKEN_INACTIVE:
+ log_dbg(cd, "Token %d is inactive.", token);
+ return -EINVAL;
+ case CRYPT_TOKEN_INTERNAL:
+ if (!strcmp(type, LUKS2_TOKEN_KEYRING))
+ break;
+ /* Fall through */
+ case CRYPT_TOKEN_INTERNAL_UNKNOWN:
+ case CRYPT_TOKEN_EXTERNAL:
+ case CRYPT_TOKEN_EXTERNAL_UNKNOWN:
+ log_dbg(cd, "Token %d has unexpected type %s.", token, type);
+ return -EINVAL;
+ }
+
+ return LUKS2_token_keyring_get(&cd->u.luks2.hdr, token, params);
+}
+
+int crypt_token_luks2_keyring_set(struct crypt_device *cd,
+ int token,
+ const struct crypt_token_params_luks2_keyring *params)
+{
+ int r;
+ char json[4096];
+
+ if (!params || !params->key_description)
+ return -EINVAL;
+
+ log_dbg(cd, "Creating new LUKS2 keyring token (%d).", token);
+
+ if ((r = onlyLUKS2(cd)))
+ return r;
+
+ r = LUKS2_token_keyring_json(json, sizeof(json), params);
+ if (r < 0)
+ return r;
+
+ return LUKS2_token_create(cd, &cd->u.luks2.hdr, token, json, 1);
+}
+
+int crypt_token_assign_keyslot(struct crypt_device *cd, int token, int keyslot)
+{
+ int r;
+
+ if ((r = onlyLUKS2(cd)))
+ return r;
+
+ return LUKS2_token_assign(cd, &cd->u.luks2.hdr, keyslot, token, 1, 1);
+}
+
+int crypt_token_unassign_keyslot(struct crypt_device *cd, int token, int keyslot)
+{
+ int r;
+
+ if ((r = onlyLUKS2(cd)))
+ return r;
+
+ return LUKS2_token_assign(cd, &cd->u.luks2.hdr, keyslot, token, 0, 1);
+}
+
+int crypt_token_is_assigned(struct crypt_device *cd, int token, int keyslot)
+{
+ int r;
+
+ if ((r = _onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0)))
+ return r;
+
+ return LUKS2_token_is_assigned(&cd->u.luks2.hdr, keyslot, token);
+}
+
+/* Internal only */
+int crypt_metadata_locking_enabled(void)
+{
+ return _metadata_locking;
+}
+
+int crypt_metadata_locking(struct crypt_device *cd __attribute__((unused)), int enable)
+{
+ if (enable && !_metadata_locking)
+ return -EPERM;
+
+ _metadata_locking = enable ? 1 : 0;
+ return 0;
+}
+
+int crypt_persistent_flags_set(struct crypt_device *cd, crypt_flags_type type, uint32_t flags)
+{
+ int r;
+
+ if ((r = onlyLUKS2(cd)))
+ return r;
+
+ if (type == CRYPT_FLAGS_ACTIVATION)
+ return LUKS2_config_set_flags(cd, &cd->u.luks2.hdr, flags);
+
+ if (type == CRYPT_FLAGS_REQUIREMENTS)
+ return LUKS2_config_set_requirements(cd, &cd->u.luks2.hdr, flags, true);
+
+ return -EINVAL;
+}
+
+int crypt_persistent_flags_get(struct crypt_device *cd, crypt_flags_type type, uint32_t *flags)
+{
+ int r;
+
+ if (!flags)
+ return -EINVAL;
+
+ if ((r = _onlyLUKS2(cd, CRYPT_CD_UNRESTRICTED, 0)))
+ return r;
+
+ if (type == CRYPT_FLAGS_ACTIVATION)
+ return LUKS2_config_get_flags(cd, &cd->u.luks2.hdr, flags);
+
+ if (type == CRYPT_FLAGS_REQUIREMENTS)
+ return LUKS2_config_get_requirements(cd, &cd->u.luks2.hdr, flags);
+
+ return -EINVAL;
+}
+
+static int update_volume_key_segment_digest(struct crypt_device *cd, struct luks2_hdr *hdr, int digest, int commit)
+{
+ int r;
+
+ /* Remove any assignments in memory */
+ r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_DEFAULT_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
+ if (r)
+ return r;
+
+ /* Assign it to the specific digest */
+ return LUKS2_digest_segment_assign(cd, hdr, CRYPT_DEFAULT_SEGMENT, digest, 1, commit);
+}
+
+static int verify_and_update_segment_digest(struct crypt_device *cd,
+ struct luks2_hdr *hdr, int keyslot, struct crypt_keyslot_context *kc)
+{
+ int digest, r;
+ struct volume_key *vk = NULL;
+
+ assert(kc);
+ assert(kc->get_luks2_key);
+ assert(keyslot >= 0);
+
+ r = kc->get_luks2_key(cd, kc, keyslot, CRYPT_ANY_SEGMENT, &vk);
+ if (r < 0)
+ return r;
+
+ /* check volume_key (param) digest matches keyslot digest */
+ r = LUKS2_digest_verify(cd, hdr, vk, keyslot);
+ if (r < 0)
+ goto out;
+ digest = r;
+
+ /* nothing to do, volume key in keyslot is already assigned to default segment */
+ r = LUKS2_digest_verify_by_segment(cd, hdr, CRYPT_DEFAULT_SEGMENT, vk);
+ if (r >= 0)
+ goto out;
+
+ /* FIXME: check new volume key is usable with current default segment */
+
+ r = update_volume_key_segment_digest(cd, &cd->u.luks2.hdr, digest, 1);
+ if (r)
+ log_err(cd, _("Failed to assign keyslot %u as the new volume key."), keyslot);
+out:
+ crypt_free_volume_key(vk);
+
+ return r < 0 ? r : keyslot;
+}
+
+static int luks2_keyslot_add_by_verified_volume_key(struct crypt_device *cd,
+ int keyslot_new,
+ const char *new_passphrase,
+ size_t new_passphrase_size,
+ struct volume_key *vk)
+{
+ int r;
+ struct luks2_keyslot_params params;
+
+ assert(cd);
+ assert(keyslot_new >= 0);
+ assert(new_passphrase);
+ assert(vk);
+ assert(crypt_volume_key_get_id(vk) >= 0);
+
+ r = LUKS2_keyslot_params_default(cd, &cd->u.luks2.hdr, &params);
+ if (r < 0) {
+ log_err(cd, _("Failed to initialize default LUKS2 keyslot parameters."));
+ return r;
+ }
+
+ r = LUKS2_digest_assign(cd, &cd->u.luks2.hdr, keyslot_new, crypt_volume_key_get_id(vk), 1, 0);
+ if (r < 0) {
+ log_err(cd, _("Failed to assign keyslot %d to digest."), keyslot_new);
+ return r;
+ }
+
+ r = LUKS2_keyslot_store(cd, &cd->u.luks2.hdr, keyslot_new,
+ CONST_CAST(char*)new_passphrase,
+ new_passphrase_size, vk, &params);
+
+ return r < 0 ? r : keyslot_new;
+}
+
+static int luks2_keyslot_add_by_volume_key(struct crypt_device *cd,
+ int keyslot_new,
+ const char *new_passphrase,
+ size_t new_passphrase_size,
+ struct volume_key *vk)
+{
+ int r;
+
+ assert(cd);
+ assert(keyslot_new >= 0);
+ assert(new_passphrase);
+ assert(vk);
+
+ r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
+ if (r >= 0)
+ crypt_volume_key_set_id(vk, r);
+
+ if (r < 0) {
+ log_err(cd, _("Volume key does not match the volume."));
+ return r;
+ }
+
+ return luks2_keyslot_add_by_verified_volume_key(cd, keyslot_new, new_passphrase, new_passphrase_size, vk);
+}
+
+static int luks1_keyslot_add_by_volume_key(struct crypt_device *cd,
+ int keyslot_new,
+ const char *new_passphrase,
+ size_t new_passphrase_size,
+ struct volume_key *vk)
+{
+ int r;
+
+ assert(cd);
+ assert(keyslot_new >= 0);
+ assert(new_passphrase);
+ assert(vk);
+
+ r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
+ if (r < 0) {
+ log_err(cd, _("Volume key does not match the volume."));
+ return r;
+ }
+
+ r = LUKS_set_key(keyslot_new, CONST_CAST(char*)new_passphrase,
+ new_passphrase_size, &cd->u.luks1.hdr, vk, cd);
+
+ return r < 0 ? r : keyslot_new;
+}
+
+static int keyslot_add_by_key(struct crypt_device *cd,
+ bool is_luks1,
+ int keyslot_new,
+ const char *new_passphrase,
+ size_t new_passphrase_size,
+ struct volume_key *vk,
+ uint32_t flags)
+{
+ int r, digest;
+
+ assert(cd);
+ assert(keyslot_new >= 0);
+ assert(new_passphrase);
+ assert(vk);
+
+ if (!flags)
+ return is_luks1 ? luks1_keyslot_add_by_volume_key(cd, keyslot_new, new_passphrase, new_passphrase_size, vk) :
+ luks2_keyslot_add_by_volume_key(cd, keyslot_new, new_passphrase, new_passphrase_size, vk);
+
+ if (is_luks1)
+ return -EINVAL;
+
+ digest = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
+ if (digest >= 0) /* if key matches volume key digest tear down new vk flag */
+ flags &= ~CRYPT_VOLUME_KEY_SET;
+ else {
+ /* if key matches any existing digest, do not create new digest */
+ if ((flags & CRYPT_VOLUME_KEY_DIGEST_REUSE))
+ digest = LUKS2_digest_any_matching(cd, &cd->u.luks2.hdr, vk);
+
+ /* no segment flag or new vk flag requires new key digest */
+ if (flags & (CRYPT_VOLUME_KEY_NO_SEGMENT | CRYPT_VOLUME_KEY_SET)) {
+ if (digest < 0 || !(flags & CRYPT_VOLUME_KEY_DIGEST_REUSE))
+ digest = LUKS2_digest_create(cd, "pbkdf2", &cd->u.luks2.hdr, vk);
+ }
+ }
+
+ r = digest;
+ if (r < 0) {
+ log_err(cd, _("Volume key does not match the volume."));
+ return r;
+ }
+
+ crypt_volume_key_set_id(vk, digest);
+
+ if (flags & CRYPT_VOLUME_KEY_SET) {
+ r = update_volume_key_segment_digest(cd, &cd->u.luks2.hdr, digest, 0);
+ if (r < 0)
+ log_err(cd, _("Failed to assign keyslot %u as the new volume key."), keyslot_new);
+ }
+
+ if (r >= 0)
+ r = luks2_keyslot_add_by_verified_volume_key(cd, keyslot_new, new_passphrase, new_passphrase_size, vk);
+
+ return r < 0 ? r : keyslot_new;
+}
+
+int crypt_keyslot_add_by_key(struct crypt_device *cd,
+ int keyslot,
+ const char *volume_key,
+ size_t volume_key_size,
+ const char *passphrase,
+ size_t passphrase_size,
+ uint32_t flags)
+{
+ int r;
+ struct crypt_keyslot_context kc, new_kc;
+
+ if (!passphrase || ((flags & CRYPT_VOLUME_KEY_NO_SEGMENT) &&
+ (flags & CRYPT_VOLUME_KEY_SET)))
+ return -EINVAL;
+
+ if ((r = onlyLUKS(cd)) < 0)
+ return r;
+
+ if ((flags & CRYPT_VOLUME_KEY_SET) && crypt_keyslot_status(cd, keyslot) > CRYPT_SLOT_INACTIVE &&
+ isLUKS2(cd->type)) {
+ if (volume_key)
+ crypt_keyslot_unlock_by_key_init_internal(&kc, volume_key, volume_key_size);
+ else
+ crypt_keyslot_unlock_by_passphrase_init_internal(&kc, passphrase, passphrase_size);
+
+ r = verify_and_update_segment_digest(cd, &cd->u.luks2.hdr, keyslot, &kc);
+
+ crypt_keyslot_context_destroy_internal(&kc);
+
+ return r;
+ }
+
+ crypt_keyslot_unlock_by_key_init_internal(&kc, volume_key, volume_key_size);
+ crypt_keyslot_unlock_by_passphrase_init_internal(&new_kc, passphrase, passphrase_size);
+
+ r = crypt_keyslot_add_by_keyslot_context(cd, CRYPT_ANY_SLOT, &kc, keyslot, &new_kc, flags);
+
+ crypt_keyslot_context_destroy_internal(&kc);
+ crypt_keyslot_context_destroy_internal(&new_kc);
+
+ return r;
+}
+
+int crypt_keyslot_add_by_keyslot_context(struct crypt_device *cd,
+ int keyslot_existing,
+ struct crypt_keyslot_context *kc,
+ int keyslot_new,
+ struct crypt_keyslot_context *new_kc,
+ uint32_t flags)
+{
+ bool is_luks1;
+ int active_slots, r;
+ const char *new_passphrase;
+ size_t new_passphrase_size;
+ struct volume_key *vk = NULL;
+
+ if (!kc || ((flags & CRYPT_VOLUME_KEY_NO_SEGMENT) &&
+ (flags & CRYPT_VOLUME_KEY_SET)))
+ return -EINVAL;
+
+ r = flags ? onlyLUKS2(cd) : onlyLUKS(cd);
+ if (r)
+ return r;
+
+ if ((flags & CRYPT_VOLUME_KEY_SET) && crypt_keyslot_status(cd, keyslot_existing) > CRYPT_SLOT_INACTIVE)
+ return verify_and_update_segment_digest(cd, &cd->u.luks2.hdr, keyslot_existing, kc);
+
+ if (!new_kc || !new_kc->get_passphrase)
+ return -EINVAL;
+
+ log_dbg(cd, "Adding new keyslot %d by %s%s, volume key provided by %s (%d).",
+ keyslot_new, keyslot_context_type_string(new_kc),
+ (flags & CRYPT_VOLUME_KEY_NO_SEGMENT) ? " unassigned to a crypt segment" : "",
+ keyslot_context_type_string(kc), keyslot_existing);
+
+ r = keyslot_verify_or_find_empty(cd, &keyslot_new);
+ if (r < 0)
+ return r;
+
+ is_luks1 = isLUKS1(cd->type);
+ if (is_luks1)
+ active_slots = LUKS_keyslot_active_count(&cd->u.luks1.hdr);
+ else
+ active_slots = LUKS2_keyslot_active_count(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
+
+ if (active_slots < 0)
+ return -EINVAL;
+
+ if (active_slots == 0 && kc->type != CRYPT_KC_TYPE_KEY)
+ r = -ENOENT;
+ else if (is_luks1 && kc->get_luks1_volume_key)
+ r = kc->get_luks1_volume_key(cd, kc, keyslot_existing, &vk);
+ else if (!is_luks1 && kc->get_luks2_volume_key)
+ r = kc->get_luks2_volume_key(cd, kc, keyslot_existing, &vk);
+ else
+ return -EINVAL;
+
+ if (r == -ENOENT) {
+ if ((flags & CRYPT_VOLUME_KEY_NO_SEGMENT) && kc->type == CRYPT_KC_TYPE_KEY) {
+ if (!(vk = crypt_generate_volume_key(cd, kc->u.k.volume_key_size)))
+ return -ENOMEM;
+ r = 0;
+ } else if (cd->volume_key) {
+ if (!(vk = crypt_alloc_volume_key(cd->volume_key->keylength, cd->volume_key->key)))
+ return -ENOMEM;
+ r = 0;
+ } else if (active_slots == 0) {
+ log_err(cd, _("Cannot add key slot, all slots disabled and no volume key provided."));
+ r = -EINVAL;
+ }
+ }
+
+ if (r < 0)
+ return r;
+
+ r = new_kc->get_passphrase(cd, new_kc, &new_passphrase, &new_passphrase_size);
+ /* If new keyslot context is token just assign it to new keyslot */
+ if (r >= 0 && new_kc->type == CRYPT_KC_TYPE_TOKEN && !is_luks1)
+ r = LUKS2_token_assign(cd, &cd->u.luks2.hdr, keyslot_new, new_kc->u.t.id, 1, 0);
+ if (r >= 0)
+ r = keyslot_add_by_key(cd, is_luks1, keyslot_new, new_passphrase, new_passphrase_size, vk, flags);
+
+ crypt_free_volume_key(vk);
+
+ if (r < 0) {
+ _luks2_rollback(cd);
+ return r;
+ }
+
+ return keyslot_new;
+}
+
+/*
+ * Keyring handling
+ */
+int crypt_use_keyring_for_vk(struct crypt_device *cd)
+{
+ uint32_t dmc_flags;
+
+ /* dm backend must be initialized */
+ if (!cd || !isLUKS2(cd->type))
+ return 0;
+
+ if (!_vk_via_keyring || !kernel_keyring_support())
+ return 0;
+
+ if (dm_flags(cd, DM_CRYPT, &dmc_flags))
+ return dmcrypt_keyring_bug() ? 0 : 1;
+
+ return (dmc_flags & DM_KERNEL_KEYRING_SUPPORTED);
+}
+
+int crypt_volume_key_keyring(struct crypt_device *cd __attribute__((unused)), int enable)
+{
+ _vk_via_keyring = enable ? 1 : 0;
+ return 0;
+}
+
+/* internal only */
+int crypt_volume_key_load_in_keyring(struct crypt_device *cd, struct volume_key *vk)
+{
+ int r;
+ const char *type_name = key_type_name(LOGON_KEY);
+
+ if (!vk || !cd || !type_name)
+ return -EINVAL;
+
+ if (!vk->key_description) {
+ log_dbg(cd, "Invalid key description");
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Loading key (%zu bytes, type %s) in thread keyring.", vk->keylength, type_name);
+
+ r = keyring_add_key_in_thread_keyring(LOGON_KEY, vk->key_description, vk->key, vk->keylength);
+ if (r) {
+ log_dbg(cd, "keyring_add_key_in_thread_keyring failed (error %d)", r);
+ log_err(cd, _("Failed to load key in kernel keyring."));
+ } else
+ crypt_set_key_in_keyring(cd, 1);
+
+ return r;
+}
+
+/* internal only */
+int crypt_key_in_keyring(struct crypt_device *cd)
+{
+ return cd ? cd->key_in_keyring : 0;
+}
+
+/* internal only */
+void crypt_set_key_in_keyring(struct crypt_device *cd, unsigned key_in_keyring)
+{
+ if (!cd)
+ return;
+
+ cd->key_in_keyring = key_in_keyring;
+}
+
+/* internal only */
+void crypt_drop_keyring_key_by_description(struct crypt_device *cd, const char *key_description, key_type_t ktype)
+{
+ int r;
+ const char *type_name = key_type_name(ktype);
+
+ if (!key_description || !type_name)
+ return;
+
+ log_dbg(cd, "Requesting keyring %s key for revoke and unlink.", type_name);
+
+ r = keyring_revoke_and_unlink_key(ktype, key_description);
+ if (r)
+ log_dbg(cd, "keyring_revoke_and_unlink_key failed (error %d)", r);
+ crypt_set_key_in_keyring(cd, 0);
+}
+
+/* internal only */
+void crypt_drop_keyring_key(struct crypt_device *cd, struct volume_key *vks)
+{
+ struct volume_key *vk = vks;
+
+ while (vk) {
+ crypt_drop_keyring_key_by_description(cd, vk->key_description, LOGON_KEY);
+ vk = crypt_volume_key_next(vk);
+ }
+}
+
+int crypt_activate_by_keyring(struct crypt_device *cd,
+ const char *name,
+ const char *key_description,
+ int keyslot,
+ uint32_t flags)
+{
+ char *passphrase;
+ size_t passphrase_size;
+ int r;
+
+ if (!cd || !key_description)
+ return -EINVAL;
+
+ log_dbg(cd, "%s volume %s [keyslot %d] using passphrase in keyring.",
+ name ? "Activating" : "Checking", name ?: "passphrase", keyslot);
+
+ if (!kernel_keyring_support()) {
+ log_err(cd, _("Kernel keyring is not supported by the kernel."));
+ return -EINVAL;
+ }
+
+ r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
+ if (r < 0)
+ return r;
+
+ r = keyring_get_passphrase(key_description, &passphrase, &passphrase_size);
+ if (r < 0) {
+ log_err(cd, _("Failed to read passphrase from keyring (error %d)."), r);
+ return -EINVAL;
+ }
+
+ r = _activate_by_passphrase(cd, name, keyslot, passphrase, passphrase_size, flags);
+
+ crypt_safe_free(passphrase);
+
+ return r;
+}
+
+/*
+ * Workaround for serialization of parallel activation and memory-hard PBKDF
+ * In specific situation (systemd activation) this causes OOM killer activation.
+ * For now, let's provide this ugly way to serialize unlocking of devices.
+ */
+int crypt_serialize_lock(struct crypt_device *cd)
+{
+ if (!cd->memory_hard_pbkdf_lock_enabled)
+ return 0;
+
+ log_dbg(cd, "Taking global memory-hard access serialization lock.");
+ if (crypt_write_lock(cd, "memory-hard-access", true, &cd->pbkdf_memory_hard_lock)) {
+ log_err(cd, _("Failed to acquire global memory-hard access serialization lock."));
+ cd->pbkdf_memory_hard_lock = NULL;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void crypt_serialize_unlock(struct crypt_device *cd)
+{
+ if (!cd->memory_hard_pbkdf_lock_enabled)
+ return;
+
+ crypt_unlock_internal(cd, cd->pbkdf_memory_hard_lock);
+ cd->pbkdf_memory_hard_lock = NULL;
+}
+
+crypt_reencrypt_info crypt_reencrypt_status(struct crypt_device *cd,
+ struct crypt_params_reencrypt *params)
+{
+ if (params)
+ memset(params, 0, sizeof(*params));
+
+ if (!cd || !isLUKS(cd->type))
+ return CRYPT_REENCRYPT_INVALID;
+
+ if (isLUKS1(cd->type))
+ return CRYPT_REENCRYPT_NONE;
+
+ if (_onlyLUKS2(cd, CRYPT_CD_QUIET, CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
+ return CRYPT_REENCRYPT_INVALID;
+
+ return LUKS2_reencrypt_get_params(&cd->u.luks2.hdr, params);
+}
+
+static void __attribute__((destructor)) libcryptsetup_exit(void)
+{
+ crypt_token_unload_external_all(NULL);
+
+ crypt_backend_destroy();
+ crypt_random_exit();
+}
diff --git a/lib/tcrypt/tcrypt.c b/lib/tcrypt/tcrypt.c
new file mode 100644
index 0000000..60e4966
--- /dev/null
+++ b/lib/tcrypt/tcrypt.c
@@ -0,0 +1,1136 @@
+/*
+ * TCRYPT (TrueCrypt-compatible) and VeraCrypt volume handling
+ *
+ * Copyright (C) 2012-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2012-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "libcryptsetup.h"
+#include "tcrypt.h"
+#include "internal.h"
+
+/* TCRYPT PBKDF variants */
+static const struct {
+ unsigned int legacy:1;
+ unsigned int veracrypt:1;
+ const char *name;
+ const char *hash;
+ unsigned int iterations;
+ uint32_t veracrypt_pim_const;
+ uint32_t veracrypt_pim_mult;
+} tcrypt_kdf[] = {
+ { 0, 0, "pbkdf2", "ripemd160", 2000, 0, 0 },
+ { 0, 0, "pbkdf2", "ripemd160", 1000, 0, 0 },
+ { 0, 0, "pbkdf2", "sha512", 1000, 0, 0 },
+ { 0, 0, "pbkdf2", "whirlpool", 1000, 0, 0 },
+ { 1, 0, "pbkdf2", "sha1", 2000, 0, 0 },
+ { 0, 1, "pbkdf2", "sha512", 500000, 15000, 1000 },
+ { 0, 1, "pbkdf2", "whirlpool", 500000, 15000, 1000 },
+ { 0, 1, "pbkdf2", "sha256", 500000, 15000, 1000 }, // VeraCrypt 1.0f
+ { 0, 1, "pbkdf2", "sha256", 200000, 0, 2048 }, // boot only
+ { 0, 1, "pbkdf2", "ripemd160", 655331, 15000, 1000 },
+ { 0, 1, "pbkdf2", "ripemd160", 327661, 0, 2048 }, // boot only
+ { 0, 1, "pbkdf2", "stribog512",500000, 15000, 1000 },
+// { 0, 1, "pbkdf2", "stribog512",200000, 0, 2048 }, // boot only
+ { 0, 0, NULL, NULL, 0, 0, 0 }
+};
+
+struct tcrypt_alg {
+ const char *name;
+ unsigned int key_size;
+ unsigned int iv_size;
+ unsigned int key_offset;
+ unsigned int iv_offset; /* or tweak key offset */
+ unsigned int key_extra_size;
+};
+
+struct tcrypt_algs {
+ unsigned int legacy:1;
+ unsigned int chain_count;
+ unsigned int chain_key_size;
+ const char *long_name;
+ const char *mode;
+ struct tcrypt_alg cipher[3];
+};
+
+/* TCRYPT cipher variants */
+static struct tcrypt_algs tcrypt_cipher[] = {
+/* XTS mode */
+{0,1,64,"aes","xts-plain64",
+ {{"aes", 64,16,0,32,0}}},
+{0,1,64,"serpent","xts-plain64",
+ {{"serpent",64,16,0,32,0}}},
+{0,1,64,"twofish","xts-plain64",
+ {{"twofish",64,16,0,32,0}}},
+{0,2,128,"twofish-aes","xts-plain64",
+ {{"twofish",64,16, 0,64,0},
+ {"aes", 64,16,32,96,0}}},
+{0,3,192,"serpent-twofish-aes","xts-plain64",
+ {{"serpent",64,16, 0, 96,0},
+ {"twofish",64,16,32,128,0},
+ {"aes", 64,16,64,160,0}}},
+{0,2,128,"aes-serpent","xts-plain64",
+ {{"aes", 64,16, 0,64,0},
+ {"serpent",64,16,32,96,0}}},
+{0,3,192,"aes-twofish-serpent","xts-plain64",
+ {{"aes", 64,16, 0, 96,0},
+ {"twofish",64,16,32,128,0},
+ {"serpent",64,16,64,160,0}}},
+{0,2,128,"serpent-twofish","xts-plain64",
+ {{"serpent",64,16, 0,64,0},
+ {"twofish",64,16,32,96,0}}},
+{0,1,64,"camellia","xts-plain64",
+ {{"camellia", 64,16,0,32,0}}},
+{0,1,64,"kuznyechik","xts-plain64",
+ {{"kuznyechik", 64,16,0,32,0}}},
+{0,2,128,"kuznyechik-camellia","xts-plain64",
+ {{"kuznyechik",64,16, 0,64,0},
+ {"camellia", 64,16,32,96,0}}},
+{0,2,128,"twofish-kuznyechik","xts-plain64",
+ {{"twofish", 64,16, 0,64,0},
+ {"kuznyechik",64,16,32,96,0}}},
+{0,2,128,"serpent-camellia","xts-plain64",
+ {{"serpent", 64,16, 0,64,0},
+ {"camellia", 64,16,32,96,0}}},
+{0,2,128,"aes-kuznyechik","xts-plain64",
+ {{"aes", 64,16, 0,64,0},
+ {"kuznyechik",64,16,32,96,0}}},
+{0,3,192,"camellia-serpent-kuznyechik","xts-plain64",
+ {{"camellia", 64,16, 0, 96,0},
+ {"serpent", 64,16,32,128,0},
+ {"kuznyechik",64,16,64,160,0}}},
+
+/* LRW mode */
+{0,1,48,"aes","lrw-benbi",
+ {{"aes", 48,16,32,0,0}}},
+{0,1,48,"serpent","lrw-benbi",
+ {{"serpent",48,16,32,0,0}}},
+{0,1,48,"twofish","lrw-benbi",
+ {{"twofish",48,16,32,0,0}}},
+{0,2,96,"twofish-aes","lrw-benbi",
+ {{"twofish",48,16,32,0,0},
+ {"aes", 48,16,64,0,0}}},
+{0,3,144,"serpent-twofish-aes","lrw-benbi",
+ {{"serpent",48,16,32,0,0},
+ {"twofish",48,16,64,0,0},
+ {"aes", 48,16,96,0,0}}},
+{0,2,96,"aes-serpent","lrw-benbi",
+ {{"aes", 48,16,32,0,0},
+ {"serpent",48,16,64,0,0}}},
+{0,3,144,"aes-twofish-serpent","lrw-benbi",
+ {{"aes", 48,16,32,0,0},
+ {"twofish",48,16,64,0,0},
+ {"serpent",48,16,96,0,0}}},
+{0,2,96,"serpent-twofish", "lrw-benbi",
+ {{"serpent",48,16,32,0,0},
+ {"twofish",48,16,64,0,0}}},
+
+/* Kernel LRW block size is fixed to 16 bytes for GF(2^128)
+ * thus cannot be used with blowfish where block is 8 bytes.
+ * There also no GF(2^64) support.
+{1,1,64,"blowfish_le","lrw-benbi",
+ {{"blowfish_le",64,8,32,0,0}}},
+{1,2,112,"blowfish_le-aes","lrw-benbi",
+ {{"blowfish_le",64, 8,32,0,0},
+ {"aes", 48,16,88,0,0}}},
+{1,3,160,"serpent-blowfish_le-aes","lrw-benbi",
+ {{"serpent", 48,16, 32,0,0},
+ {"blowfish_le",64, 8, 64,0,0},
+ {"aes", 48,16,120,0,0}}},*/
+
+/*
+ * CBC + "outer" CBC (both with whitening)
+ * chain_key_size: alg_keys_bytes + IV_seed_bytes + whitening_bytes
+ */
+{1,1,32+16+16,"aes","cbc-tcw",
+ {{"aes", 32,16,32,0,32}}},
+{1,1,32+16+16,"serpent","cbc-tcw",
+ {{"serpent",32,16,32,0,32}}},
+{1,1,32+16+16,"twofish","cbc-tcw",
+ {{"twofish",32,16,32,0,32}}},
+{1,2,64+16+16,"twofish-aes","cbci-tcrypt",
+ {{"twofish",32,16,32,0,0},
+ {"aes", 32,16,64,0,32}}},
+{1,3,96+16+16,"serpent-twofish-aes","cbci-tcrypt",
+ {{"serpent",32,16,32,0,0},
+ {"twofish",32,16,64,0,0},
+ {"aes", 32,16,96,0,32}}},
+{1,2,64+16+16,"aes-serpent","cbci-tcrypt",
+ {{"aes", 32,16,32,0,0},
+ {"serpent",32,16,64,0,32}}},
+{1,3,96+16+16,"aes-twofish-serpent", "cbci-tcrypt",
+ {{"aes", 32,16,32,0,0},
+ {"twofish",32,16,64,0,0},
+ {"serpent",32,16,96,0,32}}},
+{1,2,64+16+16,"serpent-twofish", "cbci-tcrypt",
+ {{"serpent",32,16,32,0,0},
+ {"twofish",32,16,64,0,32}}},
+{1,1,16+8+16,"cast5","cbc-tcw",
+ {{"cast5", 16,8,32,0,24}}},
+{1,1,24+8+16,"des3_ede","cbc-tcw",
+ {{"des3_ede",24,8,32,0,24}}},
+{1,1,56+8+16,"blowfish_le","cbc-tcrypt",
+ {{"blowfish_le",56,8,32,0,24}}},
+{1,2,88+16+16,"blowfish_le-aes","cbc-tcrypt",
+ {{"blowfish_le",56, 8,32,0,0},
+ {"aes", 32,16,88,0,32}}},
+{1,3,120+16+16,"serpent-blowfish_le-aes","cbc-tcrypt",
+ {{"serpent", 32,16, 32,0,0},
+ {"blowfish_le",56, 8, 64,0,0},
+ {"aes", 32,16,120,0,32}}},
+{}
+};
+
+static int TCRYPT_hdr_from_disk(struct crypt_device *cd,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params,
+ int kdf_index, int cipher_index)
+{
+ uint32_t crc32;
+ size_t size;
+
+ /* Check CRC32 of header */
+ size = TCRYPT_HDR_LEN - sizeof(hdr->d.keys) - sizeof(hdr->d.header_crc32);
+ crc32 = crypt_crc32(~0, (unsigned char*)&hdr->d, size) ^ ~0;
+ if (be16_to_cpu(hdr->d.version) > 3 &&
+ crc32 != be32_to_cpu(hdr->d.header_crc32)) {
+ log_dbg(cd, "TCRYPT header CRC32 mismatch.");
+ return -EINVAL;
+ }
+
+ /* Check CRC32 of keys */
+ crc32 = crypt_crc32(~0, (unsigned char*)hdr->d.keys, sizeof(hdr->d.keys)) ^ ~0;
+ if (crc32 != be32_to_cpu(hdr->d.keys_crc32)) {
+ log_dbg(cd, "TCRYPT keys CRC32 mismatch.");
+ return -EINVAL;
+ }
+
+ /* Convert header to cpu format */
+ hdr->d.version = be16_to_cpu(hdr->d.version);
+ hdr->d.version_tc = be16_to_cpu(hdr->d.version_tc);
+
+ hdr->d.keys_crc32 = be32_to_cpu(hdr->d.keys_crc32);
+
+ hdr->d.hidden_volume_size = be64_to_cpu(hdr->d.hidden_volume_size);
+ hdr->d.volume_size = be64_to_cpu(hdr->d.volume_size);
+
+ hdr->d.mk_offset = be64_to_cpu(hdr->d.mk_offset);
+ if (!hdr->d.mk_offset)
+ hdr->d.mk_offset = 512;
+
+ hdr->d.mk_size = be64_to_cpu(hdr->d.mk_size);
+
+ hdr->d.flags = be32_to_cpu(hdr->d.flags);
+
+ hdr->d.sector_size = be32_to_cpu(hdr->d.sector_size);
+ if (!hdr->d.sector_size)
+ hdr->d.sector_size = 512;
+
+ hdr->d.header_crc32 = be32_to_cpu(hdr->d.header_crc32);
+
+ /* Set params */
+ params->passphrase = NULL;
+ params->passphrase_size = 0;
+ params->hash_name = tcrypt_kdf[kdf_index].hash;
+ params->key_size = tcrypt_cipher[cipher_index].chain_key_size;
+ params->cipher = tcrypt_cipher[cipher_index].long_name;
+ params->mode = tcrypt_cipher[cipher_index].mode;
+
+ return 0;
+}
+
+/*
+ * Kernel implements just big-endian version of blowfish, hack it here
+ */
+static void TCRYPT_swab_le(char *buf)
+{
+ uint32_t *l = VOIDP_CAST(uint32_t*)&buf[0];
+ uint32_t *r = VOIDP_CAST(uint32_t*)&buf[4];
+ *l = swab32(*l);
+ *r = swab32(*r);
+}
+
+static int decrypt_blowfish_le_cbc(struct tcrypt_alg *alg,
+ const char *key, char *buf)
+{
+ int bs = alg->iv_size;
+ char iv[8], iv_old[8];
+ struct crypt_cipher *cipher = NULL;
+ int i, j, r;
+
+ assert(bs == 8);
+
+ r = crypt_cipher_init(&cipher, "blowfish", "ecb",
+ &key[alg->key_offset], alg->key_size);
+ if (r < 0)
+ return r;
+
+ memcpy(iv, &key[alg->iv_offset], alg->iv_size);
+ for (i = 0; i < TCRYPT_HDR_LEN; i += bs) {
+ memcpy(iv_old, &buf[i], bs);
+ TCRYPT_swab_le(&buf[i]);
+ r = crypt_cipher_decrypt(cipher, &buf[i], &buf[i],
+ bs, NULL, 0);
+ TCRYPT_swab_le(&buf[i]);
+ if (r < 0)
+ break;
+ for (j = 0; j < bs; j++)
+ buf[i + j] ^= iv[j];
+ memcpy(iv, iv_old, bs);
+ }
+
+ crypt_cipher_destroy(cipher);
+ crypt_safe_memzero(iv, bs);
+ crypt_safe_memzero(iv_old, bs);
+ return r;
+}
+
+static void TCRYPT_remove_whitening(char *buf, const char *key)
+{
+ int j;
+
+ for (j = 0; j < TCRYPT_HDR_LEN; j++)
+ buf[j] ^= key[j % 8];
+}
+
+static void TCRYPT_copy_key(struct tcrypt_alg *alg, const char *mode,
+ char *out_key, const char *key)
+{
+ int ks2;
+ if (!strncmp(mode, "xts", 3)) {
+ ks2 = alg->key_size / 2;
+ memcpy(out_key, &key[alg->key_offset], ks2);
+ memcpy(&out_key[ks2], &key[alg->iv_offset], ks2);
+ } else if (!strncmp(mode, "lrw", 3)) {
+ ks2 = alg->key_size - TCRYPT_LRW_IKEY_LEN;
+ memcpy(out_key, &key[alg->key_offset], ks2);
+ memcpy(&out_key[ks2], key, TCRYPT_LRW_IKEY_LEN);
+ } else if (!strncmp(mode, "cbc", 3)) {
+ memcpy(out_key, &key[alg->key_offset], alg->key_size);
+ /* IV + whitening */
+ memcpy(&out_key[alg->key_size], &key[alg->iv_offset],
+ alg->key_extra_size);
+ }
+}
+
+static int TCRYPT_decrypt_hdr_one(struct tcrypt_alg *alg, const char *mode,
+ const char *key,struct tcrypt_phdr *hdr)
+{
+ char backend_key[TCRYPT_HDR_KEY_LEN];
+ char iv[TCRYPT_HDR_IV_LEN] = {};
+ char mode_name[MAX_CIPHER_LEN + 1];
+ struct crypt_cipher *cipher;
+ char *c, *buf = (char*)&hdr->e;
+ int r;
+
+ /* Remove IV if present */
+ mode_name[MAX_CIPHER_LEN] = '\0';
+ strncpy(mode_name, mode, MAX_CIPHER_LEN);
+ c = strchr(mode_name, '-');
+ if (c)
+ *c = '\0';
+
+ if (!strncmp(mode, "lrw", 3))
+ iv[alg->iv_size - 1] = 1;
+ else if (!strncmp(mode, "cbc", 3)) {
+ TCRYPT_remove_whitening(buf, &key[8]);
+ if (!strcmp(alg->name, "blowfish_le"))
+ return decrypt_blowfish_le_cbc(alg, key, buf);
+ memcpy(iv, &key[alg->iv_offset], alg->iv_size);
+ }
+
+ TCRYPT_copy_key(alg, mode, backend_key, key);
+ r = crypt_cipher_init(&cipher, alg->name, mode_name,
+ backend_key, alg->key_size);
+ if (!r) {
+ r = crypt_cipher_decrypt(cipher, buf, buf, TCRYPT_HDR_LEN,
+ iv, alg->iv_size);
+ crypt_cipher_destroy(cipher);
+ }
+
+ crypt_safe_memzero(backend_key, sizeof(backend_key));
+ crypt_safe_memzero(iv, TCRYPT_HDR_IV_LEN);
+ return r;
+}
+
+/*
+ * For chained ciphers and CBC mode we need "outer" decryption.
+ * Backend doesn't provide this, so implement it here directly using ECB.
+ */
+static int TCRYPT_decrypt_cbci(struct tcrypt_algs *ciphers,
+ const char *key, struct tcrypt_phdr *hdr)
+{
+ struct crypt_cipher *cipher[3];
+ unsigned int bs = ciphers->cipher[0].iv_size;
+ char *buf = (char*)&hdr->e, iv[16], iv_old[16];
+ unsigned int i, j;
+ int r = -EINVAL;
+
+ assert(ciphers->chain_count <= 3);
+ assert(bs <= 16);
+
+ TCRYPT_remove_whitening(buf, &key[8]);
+
+ memcpy(iv, &key[ciphers->cipher[0].iv_offset], bs);
+
+ /* Initialize all ciphers in chain in ECB mode */
+ for (j = 0; j < ciphers->chain_count; j++)
+ cipher[j] = NULL;
+ for (j = 0; j < ciphers->chain_count; j++) {
+ r = crypt_cipher_init(&cipher[j], ciphers->cipher[j].name, "ecb",
+ &key[ciphers->cipher[j].key_offset],
+ ciphers->cipher[j].key_size);
+ if (r < 0)
+ goto out;
+ }
+
+ /* Implements CBC with chained ciphers in loop inside */
+ for (i = 0; i < TCRYPT_HDR_LEN; i += bs) {
+ memcpy(iv_old, &buf[i], bs);
+ for (j = ciphers->chain_count; j > 0; j--) {
+ r = crypt_cipher_decrypt(cipher[j - 1], &buf[i], &buf[i],
+ bs, NULL, 0);
+ if (r < 0)
+ goto out;
+ }
+ for (j = 0; j < bs; j++)
+ buf[i + j] ^= iv[j];
+ memcpy(iv, iv_old, bs);
+ }
+out:
+ for (j = 0; j < ciphers->chain_count; j++)
+ if (cipher[j])
+ crypt_cipher_destroy(cipher[j]);
+
+ crypt_safe_memzero(iv, bs);
+ crypt_safe_memzero(iv_old, bs);
+ return r;
+}
+
+static int TCRYPT_decrypt_hdr(struct crypt_device *cd, struct tcrypt_phdr *hdr,
+ const char *key, struct crypt_params_tcrypt *params)
+{
+ struct tcrypt_phdr hdr2;
+ int i, j, r = -EINVAL;
+
+ for (i = 0; tcrypt_cipher[i].chain_count; i++) {
+ if (params->cipher && !strstr(tcrypt_cipher[i].long_name, params->cipher))
+ continue;
+ if (!(params->flags & CRYPT_TCRYPT_LEGACY_MODES) && tcrypt_cipher[i].legacy)
+ continue;
+ log_dbg(cd, "TCRYPT: trying cipher %s-%s",
+ tcrypt_cipher[i].long_name, tcrypt_cipher[i].mode);
+
+ memcpy(&hdr2.e, &hdr->e, TCRYPT_HDR_LEN);
+
+ if (!strncmp(tcrypt_cipher[i].mode, "cbci", 4))
+ r = TCRYPT_decrypt_cbci(&tcrypt_cipher[i], key, &hdr2);
+ else for (j = tcrypt_cipher[i].chain_count - 1; j >= 0 ; j--) {
+ if (!tcrypt_cipher[i].cipher[j].name)
+ continue;
+ r = TCRYPT_decrypt_hdr_one(&tcrypt_cipher[i].cipher[j],
+ tcrypt_cipher[i].mode, key, &hdr2);
+ if (r < 0)
+ break;
+ }
+
+ if (r < 0) {
+ log_dbg(cd, "TCRYPT: returned error %d, skipped.", r);
+ if (r == -ENOTSUP)
+ break;
+ r = -ENOENT;
+ continue;
+ }
+
+ if (!strncmp(hdr2.d.magic, TCRYPT_HDR_MAGIC, TCRYPT_HDR_MAGIC_LEN)) {
+ log_dbg(cd, "TCRYPT: Signature magic detected.");
+ memcpy(&hdr->e, &hdr2.e, TCRYPT_HDR_LEN);
+ r = i;
+ break;
+ }
+ if ((params->flags & CRYPT_TCRYPT_VERA_MODES) &&
+ !strncmp(hdr2.d.magic, VCRYPT_HDR_MAGIC, TCRYPT_HDR_MAGIC_LEN)) {
+ log_dbg(cd, "TCRYPT: Signature magic detected (Veracrypt).");
+ memcpy(&hdr->e, &hdr2.e, TCRYPT_HDR_LEN);
+ r = i;
+ break;
+ }
+ r = -EPERM;
+ }
+
+ crypt_safe_memzero(&hdr2, sizeof(hdr2));
+ return r;
+}
+
+static int TCRYPT_pool_keyfile(struct crypt_device *cd,
+ unsigned char pool[VCRYPT_KEY_POOL_LEN],
+ const char *keyfile, int keyfiles_pool_length)
+{
+ unsigned char *data;
+ int i, j, fd, data_size, r = -EIO;
+ uint32_t crc;
+
+ log_dbg(cd, "TCRYPT: using keyfile %s.", keyfile);
+
+ data = malloc(TCRYPT_KEYFILE_LEN);
+ if (!data)
+ return -ENOMEM;
+ memset(data, 0, TCRYPT_KEYFILE_LEN);
+
+ fd = open(keyfile, O_RDONLY);
+ if (fd < 0) {
+ log_err(cd, _("Failed to open key file."));
+ goto out;
+ }
+
+ data_size = read_buffer(fd, data, TCRYPT_KEYFILE_LEN);
+ close(fd);
+ if (data_size < 0) {
+ log_err(cd, _("Error reading keyfile %s."), keyfile);
+ goto out;
+ }
+
+ for (i = 0, j = 0, crc = ~0U; i < data_size; i++) {
+ crc = crypt_crc32(crc, &data[i], 1);
+ pool[j++] += (unsigned char)(crc >> 24);
+ pool[j++] += (unsigned char)(crc >> 16);
+ pool[j++] += (unsigned char)(crc >> 8);
+ pool[j++] += (unsigned char)(crc);
+ j %= keyfiles_pool_length;
+ }
+ r = 0;
+out:
+ crypt_safe_memzero(&crc, sizeof(crc));
+ crypt_safe_memzero(data, TCRYPT_KEYFILE_LEN);
+ free(data);
+
+ return r;
+}
+
+static int TCRYPT_init_hdr(struct crypt_device *cd,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params)
+{
+ unsigned char pwd[VCRYPT_KEY_POOL_LEN] = {};
+ size_t passphrase_size, max_passphrase_size;
+ char *key;
+ unsigned int i, skipped = 0, iterations;
+ int r = -EPERM, keyfiles_pool_length;
+
+ if (posix_memalign((void*)&key, crypt_getpagesize(), TCRYPT_HDR_KEY_LEN))
+ return -ENOMEM;
+
+ if (params->flags & CRYPT_TCRYPT_VERA_MODES &&
+ params->passphrase_size > TCRYPT_KEY_POOL_LEN) {
+ /* Really. Keyfile pool length depends on passphrase size in Veracrypt. */
+ max_passphrase_size = VCRYPT_KEY_POOL_LEN;
+ keyfiles_pool_length = VCRYPT_KEY_POOL_LEN;
+ } else {
+ max_passphrase_size = TCRYPT_KEY_POOL_LEN;
+ keyfiles_pool_length = TCRYPT_KEY_POOL_LEN;
+ }
+
+ if (params->keyfiles_count)
+ passphrase_size = max_passphrase_size;
+ else
+ passphrase_size = params->passphrase_size;
+
+ if (params->passphrase_size > max_passphrase_size) {
+ log_err(cd, _("Maximum TCRYPT passphrase length (%zu) exceeded."),
+ max_passphrase_size);
+ goto out;
+ }
+
+ /* Calculate pool content from keyfiles */
+ for (i = 0; i < params->keyfiles_count; i++) {
+ r = TCRYPT_pool_keyfile(cd, pwd, params->keyfiles[i], keyfiles_pool_length);
+ if (r < 0)
+ goto out;
+ }
+
+ /* If provided password, combine it with pool */
+ for (i = 0; i < params->passphrase_size; i++)
+ pwd[i] += params->passphrase[i];
+
+ for (i = 0; tcrypt_kdf[i].name; i++) {
+ if (params->hash_name && strcmp(params->hash_name, tcrypt_kdf[i].hash))
+ continue;
+ if (!(params->flags & CRYPT_TCRYPT_LEGACY_MODES) && tcrypt_kdf[i].legacy)
+ continue;
+ if (!(params->flags & CRYPT_TCRYPT_VERA_MODES) && tcrypt_kdf[i].veracrypt)
+ continue;
+ if ((params->flags & CRYPT_TCRYPT_VERA_MODES) && params->veracrypt_pim) {
+ /* Do not try TrueCrypt modes if we have PIM value */
+ if (!tcrypt_kdf[i].veracrypt)
+ continue;
+ /* adjust iterations to given PIM cmdline parameter */
+ iterations = tcrypt_kdf[i].veracrypt_pim_const +
+ (tcrypt_kdf[i].veracrypt_pim_mult * params->veracrypt_pim);
+ } else
+ iterations = tcrypt_kdf[i].iterations;
+ /* Derive header key */
+ log_dbg(cd, "TCRYPT: trying KDF: %s-%s-%d%s.",
+ tcrypt_kdf[i].name, tcrypt_kdf[i].hash, tcrypt_kdf[i].iterations,
+ params->veracrypt_pim && tcrypt_kdf[i].veracrypt ? "-PIM" : "");
+ r = crypt_pbkdf(tcrypt_kdf[i].name, tcrypt_kdf[i].hash,
+ (char*)pwd, passphrase_size,
+ hdr->salt, TCRYPT_HDR_SALT_LEN,
+ key, TCRYPT_HDR_KEY_LEN,
+ iterations, 0, 0);
+ if (r < 0) {
+ log_verbose(cd, _("PBKDF2 hash algorithm %s not available, skipping."),
+ tcrypt_kdf[i].hash);
+ skipped++;
+ r = -EPERM;
+ continue;
+ }
+
+ /* Decrypt header */
+ r = TCRYPT_decrypt_hdr(cd, hdr, key, params);
+ if (r == -ENOENT) {
+ skipped++;
+ r = -EPERM;
+ continue;
+ }
+ if (r != -EPERM)
+ break;
+ }
+
+ if ((r < 0 && skipped && skipped == i) || r == -ENOTSUP) {
+ log_err(cd, _("Required kernel crypto interface not available."));
+#ifdef ENABLE_AF_ALG
+ log_err(cd, _("Ensure you have algif_skcipher kernel module loaded."));
+#endif
+ r = -ENOTSUP;
+ }
+ if (r < 0)
+ goto out;
+
+ r = TCRYPT_hdr_from_disk(cd, hdr, params, i, r);
+ if (!r) {
+ log_dbg(cd, "TCRYPT: Magic: %s, Header version: %d, req. %d, sector %d"
+ ", mk_offset %" PRIu64 ", hidden_size %" PRIu64
+ ", volume size %" PRIu64, tcrypt_kdf[i].veracrypt ?
+ VCRYPT_HDR_MAGIC : TCRYPT_HDR_MAGIC,
+ (int)hdr->d.version, (int)hdr->d.version_tc, (int)hdr->d.sector_size,
+ hdr->d.mk_offset, hdr->d.hidden_volume_size, hdr->d.volume_size);
+ log_dbg(cd, "TCRYPT: Header cipher %s-%s, key size %zu",
+ params->cipher, params->mode, params->key_size);
+ }
+out:
+ crypt_safe_memzero(pwd, TCRYPT_KEY_POOL_LEN);
+ if (key)
+ crypt_safe_memzero(key, TCRYPT_HDR_KEY_LEN);
+ free(key);
+ return r;
+}
+
+int TCRYPT_read_phdr(struct crypt_device *cd,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params)
+{
+ struct device *base_device = NULL, *device = crypt_metadata_device(cd);
+ ssize_t hdr_size = sizeof(struct tcrypt_phdr);
+ char *base_device_path;
+ int devfd, r;
+
+ assert(sizeof(struct tcrypt_phdr) == 512);
+
+ log_dbg(cd, "Reading TCRYPT header of size %zu bytes from device %s.",
+ hdr_size, device_path(device));
+
+ if (params->flags & CRYPT_TCRYPT_SYSTEM_HEADER &&
+ crypt_dev_is_partition(device_path(device))) {
+ base_device_path = crypt_get_base_device(device_path(device));
+
+ log_dbg(cd, "Reading TCRYPT system header from device %s.", base_device_path ?: "?");
+ if (!base_device_path)
+ return -EINVAL;
+
+ r = device_alloc(cd, &base_device, base_device_path);
+ free(base_device_path);
+ if (r < 0)
+ return r;
+ devfd = device_open(cd, base_device, O_RDONLY);
+ } else
+ devfd = device_open(cd, device, O_RDONLY);
+
+ if (devfd < 0) {
+ device_free(cd, base_device);
+ log_err(cd, _("Cannot open device %s."), device_path(device));
+ return -EINVAL;
+ }
+
+ r = -EIO;
+ if (params->flags & CRYPT_TCRYPT_SYSTEM_HEADER) {
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), hdr, hdr_size,
+ TCRYPT_HDR_SYSTEM_OFFSET) == hdr_size) {
+ r = TCRYPT_init_hdr(cd, hdr, params);
+ }
+ } else if (params->flags & CRYPT_TCRYPT_HIDDEN_HEADER) {
+ if (params->flags & CRYPT_TCRYPT_BACKUP_HEADER) {
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), hdr, hdr_size,
+ TCRYPT_HDR_HIDDEN_OFFSET_BCK) == hdr_size)
+ r = TCRYPT_init_hdr(cd, hdr, params);
+ } else {
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), hdr, hdr_size,
+ TCRYPT_HDR_HIDDEN_OFFSET) == hdr_size)
+ r = TCRYPT_init_hdr(cd, hdr, params);
+ if (r && read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), hdr, hdr_size,
+ TCRYPT_HDR_HIDDEN_OFFSET_OLD) == hdr_size)
+ r = TCRYPT_init_hdr(cd, hdr, params);
+ }
+ } else if (params->flags & CRYPT_TCRYPT_BACKUP_HEADER) {
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), hdr, hdr_size,
+ TCRYPT_HDR_OFFSET_BCK) == hdr_size)
+ r = TCRYPT_init_hdr(cd, hdr, params);
+ } else if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), hdr, hdr_size, 0) == hdr_size)
+ r = TCRYPT_init_hdr(cd, hdr, params);
+
+ device_free(cd, base_device);
+ if (r < 0)
+ memset(hdr, 0, sizeof (*hdr));
+ return r;
+}
+
+static struct tcrypt_algs *TCRYPT_get_algs(const char *cipher, const char *mode)
+{
+ int i;
+
+ if (!cipher || !mode)
+ return NULL;
+
+ for (i = 0; tcrypt_cipher[i].chain_count; i++)
+ if (!strcmp(tcrypt_cipher[i].long_name, cipher) &&
+ !strcmp(tcrypt_cipher[i].mode, mode))
+ return &tcrypt_cipher[i];
+
+ return NULL;
+}
+
+int TCRYPT_activate(struct crypt_device *cd,
+ const char *name,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params,
+ uint32_t flags)
+{
+ char dm_name[PATH_MAX], dm_dev_name[PATH_MAX], cipher_spec[MAX_CIPHER_LEN*2+1];
+ char *part_path;
+ unsigned int i;
+ int r;
+ uint32_t req_flags, dmc_flags;
+ struct tcrypt_algs *algs;
+ enum devcheck device_check;
+ uint64_t offset = crypt_get_data_offset(cd);
+ struct volume_key *vk = NULL;
+ struct device *ptr_dev = crypt_data_device(cd), *device = NULL, *part_device = NULL;
+ struct crypt_dm_active_device dmd = {
+ .flags = flags
+ };
+
+ if (!hdr->d.version) {
+ log_dbg(cd, "TCRYPT: this function is not supported without encrypted header load.");
+ return -ENOTSUP;
+ }
+
+ if (hdr->d.sector_size % SECTOR_SIZE) {
+ log_err(cd, _("Activation is not supported for %d sector size."),
+ hdr->d.sector_size);
+ return -ENOTSUP;
+ }
+
+ if (strstr(params->mode, "-tcrypt")) {
+ log_err(cd, _("Kernel does not support activation for this TCRYPT legacy mode."));
+ return -ENOTSUP;
+ }
+
+ if (strstr(params->mode, "-tcw"))
+ req_flags = DM_TCW_SUPPORTED;
+ else
+ req_flags = DM_PLAIN64_SUPPORTED;
+
+ algs = TCRYPT_get_algs(params->cipher, params->mode);
+ if (!algs)
+ return -EINVAL;
+
+ if (params->flags & CRYPT_TCRYPT_SYSTEM_HEADER)
+ dmd.size = 0;
+ else if (params->flags & CRYPT_TCRYPT_HIDDEN_HEADER)
+ dmd.size = hdr->d.hidden_volume_size / SECTOR_SIZE;
+ else
+ dmd.size = hdr->d.volume_size / SECTOR_SIZE;
+
+ if (dmd.flags & CRYPT_ACTIVATE_SHARED)
+ device_check = DEV_OK;
+ else
+ device_check = DEV_EXCL;
+
+ if ((params->flags & CRYPT_TCRYPT_SYSTEM_HEADER) &&
+ !crypt_dev_is_partition(device_path(crypt_data_device(cd)))) {
+ part_path = crypt_get_partition_device(device_path(crypt_data_device(cd)),
+ crypt_get_data_offset(cd), dmd.size);
+ if (part_path) {
+ if (!device_alloc(cd, &part_device, part_path)) {
+ log_verbose(cd, _("Activating TCRYPT system encryption for partition %s."),
+ part_path);
+ ptr_dev = part_device;
+ offset = 0;
+ }
+ free(part_path);
+ } else
+ /*
+ * System encryption use the whole device mapping, there can
+ * be active partitions.
+ */
+ device_check = DEV_OK;
+ }
+
+ r = device_block_adjust(cd, ptr_dev, device_check,
+ offset, &dmd.size, &dmd.flags);
+ if (r)
+ goto out;
+
+ /* From here, key size for every cipher must be the same */
+ vk = crypt_alloc_volume_key(algs->cipher[0].key_size +
+ algs->cipher[0].key_extra_size, NULL);
+ if (!vk) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ for (i = algs->chain_count; i > 0; i--) {
+ if (i == 1) {
+ dm_name[sizeof(dm_name)-1] = '\0';
+ strncpy(dm_name, name, sizeof(dm_name)-1);
+ dmd.flags = flags;
+ } else {
+ if (snprintf(dm_name, sizeof(dm_name), "%s_%d", name, i-1) < 0) {
+ r = -EINVAL;
+ break;
+ }
+ dmd.flags = flags | CRYPT_ACTIVATE_PRIVATE;
+ }
+
+ TCRYPT_copy_key(&algs->cipher[i-1], algs->mode,
+ vk->key, hdr->d.keys);
+
+ if (algs->chain_count != i) {
+ if (snprintf(dm_dev_name, sizeof(dm_dev_name), "%s/%s_%d", dm_get_dir(), name, i) < 0) {
+ r = -EINVAL;
+ break;
+ }
+ r = device_alloc(cd, &device, dm_dev_name);
+ if (r)
+ break;
+ ptr_dev = device;
+ offset = 0;
+ }
+
+ r = snprintf(cipher_spec, sizeof(cipher_spec), "%s-%s", algs->cipher[i-1].name, algs->mode);
+ if (r < 0 || (size_t)r >= sizeof(cipher_spec)) {
+ r = -ENOMEM;
+ break;
+ }
+
+ r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, ptr_dev, vk,
+ cipher_spec, crypt_get_iv_offset(cd), offset,
+ crypt_get_integrity(cd),
+ crypt_get_integrity_tag_size(cd),
+ crypt_get_sector_size(cd));
+ if (r)
+ break;
+
+ log_dbg(cd, "Trying to activate TCRYPT device %s using cipher %s.",
+ dm_name, dmd.segment.u.crypt.cipher);
+ r = dm_create_device(cd, dm_name, CRYPT_TCRYPT, &dmd);
+
+ dm_targets_free(cd, &dmd);
+ device_free(cd, device);
+ device = NULL;
+
+ if (r)
+ break;
+ }
+
+ if (r < 0 &&
+ (dm_flags(cd, DM_CRYPT, &dmc_flags) || ((dmc_flags & req_flags) != req_flags))) {
+ log_err(cd, _("Kernel does not support TCRYPT compatible mapping."));
+ r = -ENOTSUP;
+ }
+
+out:
+ crypt_free_volume_key(vk);
+ device_free(cd, device);
+ device_free(cd, part_device);
+ return r;
+}
+
+static int TCRYPT_remove_one(struct crypt_device *cd, const char *name,
+ const char *base_uuid, int index, uint32_t flags)
+{
+ struct crypt_dm_active_device dmd;
+ char dm_name[PATH_MAX];
+ int r;
+
+ if (snprintf(dm_name, sizeof(dm_name), "%s_%d", name, index) < 0)
+ return -ENOMEM;
+
+ r = dm_status_device(cd, dm_name);
+ if (r < 0)
+ return r;
+
+ r = dm_query_device(cd, dm_name, DM_ACTIVE_UUID, &dmd);
+ if (!r && !strncmp(dmd.uuid, base_uuid, strlen(base_uuid)))
+ r = dm_remove_device(cd, dm_name, flags);
+
+ free(CONST_CAST(void*)dmd.uuid);
+ return r;
+}
+
+int TCRYPT_deactivate(struct crypt_device *cd, const char *name, uint32_t flags)
+{
+ struct crypt_dm_active_device dmd;
+ int r;
+
+ r = dm_query_device(cd, name, DM_ACTIVE_UUID, &dmd);
+ if (r < 0)
+ return r;
+ if (!dmd.uuid)
+ return -EINVAL;
+
+ r = dm_remove_device(cd, name, flags);
+ if (r < 0)
+ goto out;
+
+ r = TCRYPT_remove_one(cd, name, dmd.uuid, 1, flags);
+ if (r < 0)
+ goto out;
+
+ r = TCRYPT_remove_one(cd, name, dmd.uuid, 2, flags);
+out:
+ free(CONST_CAST(void*)dmd.uuid);
+ return (r == -ENODEV) ? 0 : r;
+}
+
+static int TCRYPT_status_one(struct crypt_device *cd, const char *name,
+ const char *base_uuid, int index,
+ size_t *key_size, char *cipher,
+ struct tcrypt_phdr *tcrypt_hdr,
+ struct device **device)
+{
+ struct crypt_dm_active_device dmd;
+ struct dm_target *tgt = &dmd.segment;
+ char dm_name[PATH_MAX], *c;
+ int r;
+
+ if (snprintf(dm_name, sizeof(dm_name), "%s_%d", name, index) < 0)
+ return -ENOMEM;
+
+ r = dm_status_device(cd, dm_name);
+ if (r < 0)
+ return r;
+
+ r = dm_query_device(cd, dm_name, DM_ACTIVE_DEVICE |
+ DM_ACTIVE_UUID |
+ DM_ACTIVE_CRYPT_CIPHER |
+ DM_ACTIVE_CRYPT_KEYSIZE, &dmd);
+ if (r < 0)
+ return r;
+ if (!single_segment(&dmd) || tgt->type != DM_CRYPT) {
+ r = -ENOTSUP;
+ goto out;
+ }
+
+ r = 0;
+
+ if (!strncmp(dmd.uuid, base_uuid, strlen(base_uuid))) {
+ if ((c = strchr(tgt->u.crypt.cipher, '-')))
+ *c = '\0';
+ strcat(cipher, "-");
+ strncat(cipher, tgt->u.crypt.cipher, MAX_CIPHER_LEN);
+ *key_size += tgt->u.crypt.vk->keylength;
+ tcrypt_hdr->d.mk_offset = tgt->u.crypt.offset * SECTOR_SIZE;
+ device_free(cd, *device);
+ MOVE_REF(*device, tgt->data_device);
+ } else
+ r = -ENODEV;
+out:
+ dm_targets_free(cd, &dmd);
+ free(CONST_CAST(void*)dmd.uuid);
+ return r;
+}
+
+int TCRYPT_init_by_name(struct crypt_device *cd, const char *name,
+ const char *uuid,
+ const struct dm_target *tgt,
+ struct device **device,
+ struct crypt_params_tcrypt *tcrypt_params,
+ struct tcrypt_phdr *tcrypt_hdr)
+{
+ struct tcrypt_algs *algs;
+ char cipher[MAX_CIPHER_LEN * 4], mode[MAX_CIPHER_LEN+1], *tmp;
+ size_t key_size;
+ int r;
+
+ memset(tcrypt_params, 0, sizeof(*tcrypt_params));
+ memset(tcrypt_hdr, 0, sizeof(*tcrypt_hdr));
+ tcrypt_hdr->d.sector_size = SECTOR_SIZE;
+ tcrypt_hdr->d.mk_offset = tgt->u.crypt.offset * SECTOR_SIZE;
+
+ strncpy(cipher, tgt->u.crypt.cipher, MAX_CIPHER_LEN);
+ tmp = strchr(cipher, '-');
+ if (!tmp)
+ return -EINVAL;
+ *tmp = '\0';
+ mode[MAX_CIPHER_LEN] = '\0';
+ strncpy(mode, ++tmp, MAX_CIPHER_LEN);
+
+ key_size = tgt->u.crypt.vk->keylength;
+ r = TCRYPT_status_one(cd, name, uuid, 1, &key_size,
+ cipher, tcrypt_hdr, device);
+ if (!r)
+ r = TCRYPT_status_one(cd, name, uuid, 2, &key_size,
+ cipher, tcrypt_hdr, device);
+
+ if (r < 0 && r != -ENODEV)
+ return r;
+
+ algs = TCRYPT_get_algs(cipher, mode);
+ if (!algs || key_size != algs->chain_key_size)
+ return -EINVAL;
+
+ tcrypt_params->key_size = algs->chain_key_size;
+ tcrypt_params->cipher = algs->long_name;
+ tcrypt_params->mode = algs->mode;
+ return 0;
+}
+
+uint64_t TCRYPT_get_data_offset(struct crypt_device *cd,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params)
+{
+ uint64_t size;
+
+ if (!hdr->d.version) {
+ /* No real header loaded, initialized by active device, use default mk_offset */
+ } else if (params->flags & CRYPT_TCRYPT_SYSTEM_HEADER) {
+ /* Mapping through whole device, not partition! */
+ if (crypt_dev_is_partition(device_path(crypt_data_device(cd))))
+ return 0;
+ } else if (params->mode && !strncmp(params->mode, "xts", 3)) {
+ if (hdr->d.version < 3)
+ return 1;
+
+ if (params->flags & CRYPT_TCRYPT_HIDDEN_HEADER) {
+ if (hdr->d.version > 3)
+ return (hdr->d.mk_offset / SECTOR_SIZE);
+ if (device_size(crypt_metadata_device(cd), &size) < 0)
+ return 0;
+ return (size - hdr->d.hidden_volume_size +
+ (TCRYPT_HDR_HIDDEN_OFFSET_OLD)) / SECTOR_SIZE;
+ }
+ } else if (params->flags & CRYPT_TCRYPT_HIDDEN_HEADER) {
+ if (device_size(crypt_metadata_device(cd), &size) < 0)
+ return 0;
+ return (size - hdr->d.hidden_volume_size +
+ (TCRYPT_HDR_HIDDEN_OFFSET_OLD)) / SECTOR_SIZE;
+ }
+
+ return hdr->d.mk_offset / SECTOR_SIZE;
+}
+
+uint64_t TCRYPT_get_iv_offset(struct crypt_device *cd,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params)
+{
+ uint64_t iv_offset;
+
+ if (params->mode && !strncmp(params->mode, "xts", 3))
+ iv_offset = TCRYPT_get_data_offset(cd, hdr, params);
+ else if (params->mode && !strncmp(params->mode, "lrw", 3))
+ iv_offset = 0;
+ else
+ iv_offset = hdr->d.mk_offset / SECTOR_SIZE;
+
+ if (params->flags & CRYPT_TCRYPT_SYSTEM_HEADER)
+ iv_offset += crypt_dev_partition_offset(device_path(crypt_data_device(cd)));
+
+ return iv_offset;
+}
+
+int TCRYPT_get_volume_key(struct crypt_device *cd,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params,
+ struct volume_key **vk)
+{
+ struct tcrypt_algs *algs;
+ unsigned int i, key_index;
+
+ if (!hdr->d.version) {
+ log_err(cd, _("This function is not supported without TCRYPT header load."));
+ return -ENOTSUP;
+ }
+
+ algs = TCRYPT_get_algs(params->cipher, params->mode);
+ if (!algs)
+ return -EINVAL;
+
+ *vk = crypt_alloc_volume_key(params->key_size, NULL);
+ if (!*vk)
+ return -ENOMEM;
+
+ for (i = 0, key_index = 0; i < algs->chain_count; i++) {
+ TCRYPT_copy_key(&algs->cipher[i], algs->mode,
+ &(*vk)->key[key_index], hdr->d.keys);
+ key_index += algs->cipher[i].key_size;
+ }
+
+ return 0;
+}
+
+int TCRYPT_dump(struct crypt_device *cd,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params)
+{
+ log_std(cd, "%s header information for %s\n",
+ hdr->d.magic[0] == 'T' ? "TCRYPT" : "VERACRYPT",
+ device_path(crypt_metadata_device(cd)));
+ if (hdr->d.version) {
+ log_std(cd, "Version: \t%d\n", hdr->d.version);
+ log_std(cd, "Driver req.:\t%x.%x\n", hdr->d.version_tc >> 8,
+ hdr->d.version_tc & 0xFF);
+
+ log_std(cd, "Sector size:\t%" PRIu32 "\n", hdr->d.sector_size);
+ log_std(cd, "MK offset:\t%" PRIu64 "\n", hdr->d.mk_offset);
+ log_std(cd, "PBKDF2 hash:\t%s\n", params->hash_name);
+ }
+ log_std(cd, "Cipher chain:\t%s\n", params->cipher);
+ log_std(cd, "Cipher mode:\t%s\n", params->mode);
+ log_std(cd, "MK bits: \t%zu\n", params->key_size * 8);
+ return 0;
+}
diff --git a/lib/tcrypt/tcrypt.h b/lib/tcrypt/tcrypt.h
new file mode 100644
index 0000000..b95d74d
--- /dev/null
+++ b/lib/tcrypt/tcrypt.h
@@ -0,0 +1,120 @@
+/*
+ * TCRYPT (TrueCrypt-compatible) header definition
+ *
+ * Copyright (C) 2012-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2012-2023 Milan Broz
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTSETUP_TCRYPT_H
+#define _CRYPTSETUP_TCRYPT_H
+
+#include <stdint.h>
+
+#define TCRYPT_HDR_SALT_LEN 64
+#define TCRYPT_HDR_IV_LEN 16
+#define TCRYPT_HDR_LEN 448
+#define TCRYPT_HDR_KEY_LEN 192
+#define TCRYPT_HDR_MAGIC "TRUE"
+#define VCRYPT_HDR_MAGIC "VERA"
+#define TCRYPT_HDR_MAGIC_LEN 4
+
+#define TCRYPT_HDR_HIDDEN_OFFSET_OLD -1536
+#define TCRYPT_HDR_HIDDEN_OFFSET 65536
+
+#define TCRYPT_HDR_HIDDEN_OFFSET_BCK -65536
+#define TCRYPT_HDR_OFFSET_BCK -131072
+
+#define TCRYPT_HDR_SYSTEM_OFFSET 31744
+
+#define TCRYPT_LRW_IKEY_LEN 16
+#define TCRYPT_KEY_POOL_LEN 64
+#define VCRYPT_KEY_POOL_LEN 128
+#define TCRYPT_KEYFILE_LEN 1048576
+
+#define TCRYPT_HDR_FLAG_SYSTEM (1 << 0)
+#define TCRYPT_HDR_FLAG_NONSYSTEM (1 << 1)
+
+struct tcrypt_phdr {
+ char salt[TCRYPT_HDR_SALT_LEN];
+
+ /* encrypted part, TCRYPT_HDR_LEN bytes */
+ union {
+ struct __attribute__((__packed__)) {
+ char magic[TCRYPT_HDR_MAGIC_LEN];
+ uint16_t version;
+ uint16_t version_tc;
+ uint32_t keys_crc32;
+ uint64_t _reserved1[2]; /* data/header ctime */
+ uint64_t hidden_volume_size;
+ uint64_t volume_size;
+ uint64_t mk_offset;
+ uint64_t mk_size;
+ uint32_t flags;
+ uint32_t sector_size;
+ uint8_t _reserved2[120];
+ uint32_t header_crc32;
+ char keys[256];
+ } d;
+ char e[TCRYPT_HDR_LEN];
+ };
+} __attribute__((__packed__));
+
+struct crypt_device;
+struct crypt_params_tcrypt;
+struct dm_target;
+struct volume_key;
+struct device;
+
+int TCRYPT_read_phdr(struct crypt_device *cd,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params);
+
+int TCRYPT_init_by_name(struct crypt_device *cd, const char *name,
+ const char *uuid,
+ const struct dm_target *tgt,
+ struct device **device,
+ struct crypt_params_tcrypt *tcrypt_params,
+ struct tcrypt_phdr *tcrypt_hdr);
+
+int TCRYPT_activate(struct crypt_device *cd,
+ const char *name,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params,
+ uint32_t flags);
+
+int TCRYPT_deactivate(struct crypt_device *cd,
+ const char *name,
+ uint32_t flags);
+
+uint64_t TCRYPT_get_data_offset(struct crypt_device *cd,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params);
+
+uint64_t TCRYPT_get_iv_offset(struct crypt_device *cd,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params);
+
+int TCRYPT_get_volume_key(struct crypt_device *cd,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params,
+ struct volume_key **vk);
+
+int TCRYPT_dump(struct crypt_device *cd,
+ struct tcrypt_phdr *hdr,
+ struct crypt_params_tcrypt *params);
+
+#endif
diff --git a/lib/utils.c b/lib/utils.c
new file mode 100644
index 0000000..bfcf60d
--- /dev/null
+++ b/lib/utils.c
@@ -0,0 +1,334 @@
+/*
+ * utils - miscellaneous device utilities for cryptsetup
+ *
+ * Copyright (C) 2004 Jana Saout <jana@saout.de>
+ * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/utsname.h>
+
+#include "internal.h"
+
+size_t crypt_getpagesize(void)
+{
+ long r = sysconf(_SC_PAGESIZE);
+ return r <= 0 ? DEFAULT_MEM_ALIGNMENT : (size_t)r;
+}
+
+unsigned crypt_cpusonline(void)
+{
+ long r = sysconf(_SC_NPROCESSORS_ONLN);
+ return r < 0 ? 1 : r;
+}
+
+uint64_t crypt_getphysmemory_kb(void)
+{
+ long pagesize, phys_pages;
+ uint64_t phys_memory_kb;
+
+ pagesize = sysconf(_SC_PAGESIZE);
+ phys_pages = sysconf(_SC_PHYS_PAGES);
+
+ if (pagesize < 0 || phys_pages < 0)
+ return 0;
+
+ phys_memory_kb = pagesize / 1024;
+ phys_memory_kb *= phys_pages;
+
+ return phys_memory_kb;
+}
+
+void crypt_process_priority(struct crypt_device *cd, int *priority, bool raise)
+{
+ int _priority, new_priority;
+
+ if (raise) {
+ _priority = getpriority(PRIO_PROCESS, 0);
+ if (_priority < 0)
+ _priority = 0;
+ if (priority)
+ *priority = _priority;
+
+ /*
+ * Do not bother checking CAP_SYS_NICE as device activation
+ * requires CAP_SYSADMIN later anyway.
+ */
+ if (getuid() || geteuid())
+ new_priority = 0;
+ else
+ new_priority = -18;
+
+ if (setpriority(PRIO_PROCESS, 0, new_priority))
+ log_dbg(cd, "Cannot raise process priority.");
+ } else {
+ _priority = priority ? *priority : 0;
+ if (setpriority(PRIO_PROCESS, 0, _priority))
+ log_dbg(cd, "Cannot reset process priority.");
+ }
+}
+
+/* Keyfile processing */
+
+/*
+ * A simple call to lseek(3) might not be possible for some inputs (e.g.
+ * reading from a pipe), so this function instead reads of up to BUFSIZ bytes
+ * at a time until the specified number of bytes. It returns -1 on read error
+ * or when it reaches EOF before the requested number of bytes have been
+ * discarded.
+ */
+static int keyfile_seek(int fd, uint64_t bytes)
+{
+ char tmp[BUFSIZ];
+ size_t next_read;
+ ssize_t bytes_r;
+ off_t r;
+
+ r = lseek(fd, bytes, SEEK_CUR);
+ if (r > 0)
+ return 0;
+ if (r < 0 && errno != ESPIPE)
+ return -1;
+
+ while (bytes > 0) {
+ /* figure out how much to read */
+ next_read = bytes > sizeof(tmp) ? sizeof(tmp) : (size_t)bytes;
+
+ bytes_r = read(fd, tmp, next_read);
+ if (bytes_r < 0) {
+ if (errno == EINTR)
+ continue;
+
+ crypt_safe_memzero(tmp, sizeof(tmp));
+ /* read error */
+ return -1;
+ }
+
+ if (bytes_r == 0)
+ /* EOF */
+ break;
+
+ bytes -= bytes_r;
+ }
+
+ crypt_safe_memzero(tmp, sizeof(tmp));
+ return bytes == 0 ? 0 : -1;
+}
+
+int crypt_keyfile_device_read(struct crypt_device *cd, const char *keyfile,
+ char **key, size_t *key_size_read,
+ uint64_t keyfile_offset, size_t key_size,
+ uint32_t flags)
+{
+ int fd, regular_file, char_to_read = 0, char_read = 0, unlimited_read = 0;
+ int r = -EINVAL, newline;
+ char *pass = NULL;
+ size_t buflen, i;
+ uint64_t file_read_size;
+ struct stat st;
+
+ if (!key || !key_size_read)
+ return -EINVAL;
+
+ *key = NULL;
+ *key_size_read = 0;
+
+ fd = keyfile ? open(keyfile, O_RDONLY) : STDIN_FILENO;
+ if (fd < 0) {
+ log_err(cd, _("Failed to open key file."));
+ return -EINVAL;
+ }
+
+ if (isatty(fd)) {
+ log_err(cd, _("Cannot read keyfile from a terminal."));
+ goto out;
+ }
+
+ /* If not requested otherwise, we limit input to prevent memory exhaustion */
+ if (key_size == 0) {
+ key_size = DEFAULT_KEYFILE_SIZE_MAXKB * 1024 + 1;
+ unlimited_read = 1;
+ /* use 4k for buffer (page divisor but avoid huge pages) */
+ buflen = 4096 - 16; /* sizeof(struct safe_allocation); */
+ } else
+ buflen = key_size;
+
+ regular_file = 0;
+ if (keyfile) {
+ if (stat(keyfile, &st) < 0) {
+ log_err(cd, _("Failed to stat key file."));
+ goto out;
+ }
+ if (S_ISREG(st.st_mode)) {
+ regular_file = 1;
+ file_read_size = (uint64_t)st.st_size;
+
+ if (keyfile_offset > file_read_size) {
+ log_err(cd, _("Cannot seek to requested keyfile offset."));
+ goto out;
+ }
+ file_read_size -= keyfile_offset;
+
+ /* known keyfile size, alloc it in one step */
+ if (file_read_size >= (uint64_t)key_size)
+ buflen = key_size;
+ else if (file_read_size)
+ buflen = file_read_size;
+ }
+ }
+
+ pass = crypt_safe_alloc(buflen);
+ if (!pass) {
+ log_err(cd, _("Out of memory while reading passphrase."));
+ goto out;
+ }
+
+ /* Discard keyfile_offset bytes on input */
+ if (keyfile_offset && keyfile_seek(fd, keyfile_offset) < 0) {
+ log_err(cd, _("Cannot seek to requested keyfile offset."));
+ goto out;
+ }
+
+ for (i = 0, newline = 0; i < key_size; i += char_read) {
+ if (i == buflen) {
+ buflen += 4096;
+ pass = crypt_safe_realloc(pass, buflen);
+ if (!pass) {
+ log_err(cd, _("Out of memory while reading passphrase."));
+ r = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (flags & CRYPT_KEYFILE_STOP_EOL) {
+ /* If we should stop on newline, we must read the input
+ * one character at the time. Otherwise we might end up
+ * having read some bytes after the newline, which we
+ * promised not to do.
+ */
+ char_to_read = 1;
+ } else {
+ /* char_to_read = min(key_size - i, buflen - i) */
+ char_to_read = key_size < buflen ?
+ key_size - i : buflen - i;
+ }
+ char_read = read_buffer(fd, &pass[i], char_to_read);
+ if (char_read < 0) {
+ log_err(cd, _("Error reading passphrase."));
+ r = -EPIPE;
+ goto out;
+ }
+
+ if (char_read == 0)
+ break;
+ /* Stop on newline only if not requested read from keyfile */
+ if ((flags & CRYPT_KEYFILE_STOP_EOL) && pass[i] == '\n') {
+ newline = 1;
+ pass[i] = '\0';
+ break;
+ }
+ }
+
+ /* Fail if piped input dies reading nothing */
+ if (!i && !regular_file && !newline) {
+ log_err(cd, _("Nothing to read on input."));
+ r = -EPIPE;
+ goto out;
+ }
+
+ /* Fail if we exceeded internal default (no specified size) */
+ if (unlimited_read && i == key_size) {
+ log_err(cd, _("Maximum keyfile size exceeded."));
+ goto out;
+ }
+
+ if (!unlimited_read && i != key_size) {
+ log_err(cd, _("Cannot read requested amount of data."));
+ goto out;
+ }
+
+ *key = pass;
+ *key_size_read = i;
+ r = 0;
+out:
+ if (fd != STDIN_FILENO)
+ close(fd);
+
+ if (r)
+ crypt_safe_free(pass);
+ return r;
+}
+
+int crypt_keyfile_read(struct crypt_device *cd, const char *keyfile,
+ char **key, size_t *key_size_read,
+ size_t keyfile_offset, size_t keyfile_size_max,
+ uint32_t flags)
+{
+ return crypt_keyfile_device_read(cd, keyfile, key, key_size_read,
+ keyfile_offset, keyfile_size_max, flags);
+}
+
+int kernel_version(uint64_t *kversion)
+{
+ struct utsname uts;
+ uint16_t maj, min, patch, rel;
+ int r = -EINVAL;
+
+ if (uname(&uts) < 0)
+ return r;
+
+ if (sscanf(uts.release, "%" SCNu16 ".%" SCNu16 ".%" SCNu16 "-%" SCNu16,
+ &maj, &min, &patch, &rel) == 4)
+ r = 0;
+ else if (sscanf(uts.release, "%" SCNu16 ".%" SCNu16 ".%" SCNu16,
+ &maj, &min, &patch) == 3) {
+ rel = 0;
+ r = 0;
+ }
+
+ if (!r)
+ *kversion = compact_version(maj, min, patch, rel);
+
+ return r;
+}
+
+bool crypt_string_in(const char *str, char **list, size_t list_size)
+{
+ size_t i;
+
+ for (i = 0; *list && i < list_size; i++, list++)
+ if (!strcmp(str, *list))
+ return true;
+
+ return false;
+}
+
+/* compare two strings (allows NULL values) */
+int crypt_strcmp(const char *a, const char *b)
+{
+ if (!a && !b)
+ return 0;
+ else if (!a || !b)
+ return 1;
+ return strcmp(a, b);
+}
diff --git a/lib/utils_benchmark.c b/lib/utils_benchmark.c
new file mode 100644
index 0000000..728e4df
--- /dev/null
+++ b/lib/utils_benchmark.c
@@ -0,0 +1,218 @@
+/*
+ * libcryptsetup - cryptsetup library, cipher benchmark
+ *
+ * Copyright (C) 2012-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2012-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+
+#include "internal.h"
+
+int crypt_benchmark(struct crypt_device *cd,
+ const char *cipher,
+ const char *cipher_mode,
+ size_t volume_key_size,
+ size_t iv_size,
+ size_t buffer_size,
+ double *encryption_mbs,
+ double *decryption_mbs)
+{
+ void *buffer = NULL;
+ char *iv = NULL, *key = NULL, mode[MAX_CIPHER_LEN], *c;
+ int r;
+
+ if (!cipher || !cipher_mode || !volume_key_size || !encryption_mbs || !decryption_mbs)
+ return -EINVAL;
+
+ r = init_crypto(cd);
+ if (r < 0)
+ return r;
+
+ r = -ENOMEM;
+ if (posix_memalign(&buffer, crypt_getpagesize(), buffer_size))
+ goto out;
+ memset(buffer, 0, buffer_size);
+
+ r = crypt_cipher_ivsize(cipher, cipher_mode);
+ if (r >= 0 && iv_size != (size_t)r) {
+ log_dbg(cd, "IV length for benchmark adjusted to %i bytes (requested %zu).", r, iv_size);
+ iv_size = r;
+ }
+
+ if (iv_size) {
+ iv = malloc(iv_size);
+ if (!iv)
+ goto out;
+ crypt_random_get(cd, iv, iv_size, CRYPT_RND_NORMAL);
+ }
+
+ key = malloc(volume_key_size);
+ if (!key)
+ goto out;
+
+ crypt_random_get(cd, key, volume_key_size, CRYPT_RND_NORMAL);
+
+ strncpy(mode, cipher_mode, sizeof(mode)-1);
+ /* Ignore IV generator */
+ if ((c = strchr(mode, '-')))
+ *c = '\0';
+
+ r = crypt_cipher_perf_kernel(cipher, cipher_mode, buffer, buffer_size, key, volume_key_size,
+ iv, iv_size, encryption_mbs, decryption_mbs);
+
+ if (r == -ERANGE)
+ log_dbg(cd, "Measured cipher runtime is too low.");
+ else if (r)
+ log_dbg(cd, "Cannot initialize cipher %s, mode %s, key size %zu, IV size %zu.",
+ cipher, cipher_mode, volume_key_size, iv_size);
+out:
+ free(buffer);
+ free(key);
+ free(iv);
+
+ return r;
+}
+
+int crypt_benchmark_pbkdf(struct crypt_device *cd,
+ struct crypt_pbkdf_type *pbkdf,
+ const char *password,
+ size_t password_size,
+ const char *salt,
+ size_t salt_size,
+ size_t volume_key_size,
+ int (*progress)(uint32_t time_ms, void *usrptr),
+ void *usrptr)
+{
+ int r, priority;
+ const char *kdf_opt;
+
+ if (!pbkdf || (!password && password_size))
+ return -EINVAL;
+
+ r = init_crypto(cd);
+ if (r < 0)
+ return r;
+
+ kdf_opt = !strcmp(pbkdf->type, CRYPT_KDF_PBKDF2) ? pbkdf->hash : "";
+
+ log_dbg(cd, "Running %s(%s) benchmark.", pbkdf->type, kdf_opt);
+
+ crypt_process_priority(cd, &priority, true);
+ r = crypt_pbkdf_perf(pbkdf->type, pbkdf->hash, password, password_size,
+ salt, salt_size, volume_key_size, pbkdf->time_ms,
+ pbkdf->max_memory_kb, pbkdf->parallel_threads,
+ &pbkdf->iterations, &pbkdf->max_memory_kb, progress, usrptr);
+ crypt_process_priority(cd, &priority, false);
+
+ if (!r)
+ log_dbg(cd, "Benchmark returns %s(%s) %u iterations, %u memory, %u threads (for %zu-bits key).",
+ pbkdf->type, kdf_opt, pbkdf->iterations, pbkdf->max_memory_kb,
+ pbkdf->parallel_threads, volume_key_size * 8);
+ return r;
+}
+
+struct benchmark_usrptr {
+ struct crypt_device *cd;
+ struct crypt_pbkdf_type *pbkdf;
+};
+
+static int benchmark_callback(uint32_t time_ms, void *usrptr)
+{
+ struct benchmark_usrptr *u = usrptr;
+
+ log_dbg(u->cd, "PBKDF benchmark: memory cost = %u, iterations = %u, "
+ "threads = %u (took %u ms)", u->pbkdf->max_memory_kb,
+ u->pbkdf->iterations, u->pbkdf->parallel_threads, time_ms);
+
+ return 0;
+}
+
+/*
+ * Used in internal places to benchmark crypt_device context PBKDF.
+ * Once requested parameters are benchmarked, iterations attribute is set,
+ * and the benchmarked values can be reused.
+ * Note that memory cost can be changed after benchmark (if used).
+ * NOTE: You need to check that you are benchmarking for the same key size.
+ */
+int crypt_benchmark_pbkdf_internal(struct crypt_device *cd,
+ struct crypt_pbkdf_type *pbkdf,
+ size_t volume_key_size)
+{
+ struct crypt_pbkdf_limits pbkdf_limits;
+ double PBKDF2_tmp;
+ uint32_t ms_tmp;
+ int r = -EINVAL;
+ struct benchmark_usrptr u = {
+ .cd = cd,
+ .pbkdf = pbkdf
+ };
+
+ r = crypt_pbkdf_get_limits(pbkdf->type, &pbkdf_limits);
+ if (r)
+ return r;
+
+ if (pbkdf->flags & CRYPT_PBKDF_NO_BENCHMARK) {
+ if (pbkdf->iterations) {
+ log_dbg(cd, "Reusing PBKDF values (no benchmark flag is set).");
+ return 0;
+ }
+ log_err(cd, _("PBKDF benchmark disabled but iterations not set."));
+ return -EINVAL;
+ }
+
+ /* For PBKDF2 run benchmark always. Also note it depends on volume_key_size! */
+ if (!strcmp(pbkdf->type, CRYPT_KDF_PBKDF2)) {
+ /*
+ * For PBKDF2 it is enough to run benchmark for only 1 second
+ * and interpolate final iterations value from it.
+ */
+ ms_tmp = pbkdf->time_ms;
+ pbkdf->time_ms = 1000;
+ pbkdf->parallel_threads = 0; /* N/A in PBKDF2 */
+ pbkdf->max_memory_kb = 0; /* N/A in PBKDF2 */
+
+ r = crypt_benchmark_pbkdf(cd, pbkdf, "foobarfo", 8, "01234567890abcdef", 16,
+ volume_key_size, &benchmark_callback, &u);
+ pbkdf->time_ms = ms_tmp;
+ if (r < 0) {
+ log_err(cd, _("Not compatible PBKDF2 options (using hash algorithm %s)."),
+ pbkdf->hash);
+ return r;
+ }
+
+ PBKDF2_tmp = ((double)pbkdf->iterations * pbkdf->time_ms / 1000.);
+ if (PBKDF2_tmp > (double)UINT32_MAX)
+ return -EINVAL;
+ pbkdf->iterations = AT_LEAST((uint32_t)PBKDF2_tmp, pbkdf_limits.min_iterations);
+ } else {
+ /* Already benchmarked */
+ if (pbkdf->iterations) {
+ log_dbg(cd, "Reusing PBKDF values.");
+ return 0;
+ }
+
+ r = crypt_benchmark_pbkdf(cd, pbkdf, "foobarfo", 8,
+ "0123456789abcdef0123456789abcdef", 32,
+ volume_key_size, &benchmark_callback, &u);
+ if (r < 0)
+ log_err(cd, _("Not compatible PBKDF options."));
+ }
+
+ return r;
+}
diff --git a/lib/utils_blkid.c b/lib/utils_blkid.c
new file mode 100644
index 0000000..5a848a1
--- /dev/null
+++ b/lib/utils_blkid.c
@@ -0,0 +1,347 @@
+/*
+ * blkid probe utilities
+ *
+ * Copyright (C) 2018-2023 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "utils_blkid.h"
+#include "utils_io.h"
+
+#ifdef HAVE_BLKID
+#include <blkid/blkid.h>
+/* make bad checksums flag optional */
+#ifndef BLKID_SUBLKS_BADCSUM
+#define BLKID_SUBLKS_BADCSUM 0
+#endif
+struct blkid_handle {
+ int fd;
+ blkid_probe pr;
+};
+#ifndef HAVE_BLKID_WIPE
+static size_t crypt_getpagesize(void)
+{
+ long r = sysconf(_SC_PAGESIZE);
+ return r <= 0 ? 4096 : (size_t)r;
+}
+#endif
+#endif
+
+void blk_set_chains_for_wipes(struct blkid_handle *h)
+{
+#ifdef HAVE_BLKID
+ blkid_probe_enable_partitions(h->pr, 1);
+ blkid_probe_set_partitions_flags(h->pr, 0
+#ifdef HAVE_BLKID_WIPE
+ | BLKID_PARTS_MAGIC
+#endif
+ );
+
+ blkid_probe_enable_superblocks(h->pr, 1);
+ blkid_probe_set_superblocks_flags(h->pr, BLKID_SUBLKS_LABEL |
+ BLKID_SUBLKS_UUID |
+ BLKID_SUBLKS_TYPE |
+ BLKID_SUBLKS_USAGE |
+ BLKID_SUBLKS_VERSION |
+ BLKID_SUBLKS_MAGIC |
+ BLKID_SUBLKS_BADCSUM);
+#endif
+}
+
+void blk_set_chains_for_full_print(struct blkid_handle *h)
+{
+ blk_set_chains_for_wipes(h);
+}
+
+void blk_set_chains_for_superblocks(struct blkid_handle *h)
+{
+#ifdef HAVE_BLKID
+ blkid_probe_enable_superblocks(h->pr, 1);
+ blkid_probe_set_superblocks_flags(h->pr, BLKID_SUBLKS_TYPE);
+#endif
+}
+
+void blk_set_chains_for_fast_detection(struct blkid_handle *h)
+{
+#ifdef HAVE_BLKID
+ blkid_probe_enable_partitions(h->pr, 1);
+ blkid_probe_set_partitions_flags(h->pr, 0);
+ blk_set_chains_for_superblocks(h);
+#endif
+}
+
+int blk_init_by_path(struct blkid_handle **h, const char *path)
+{
+ int r = -ENOTSUP;
+#ifdef HAVE_BLKID
+ struct blkid_handle *tmp = malloc(sizeof(*tmp));
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->fd = -1;
+
+ tmp->pr = blkid_new_probe_from_filename(path);
+ if (!tmp->pr) {
+ free(tmp);
+ return -EINVAL;
+ }
+
+ *h = tmp;
+
+ r = 0;
+#endif
+ return r;
+}
+
+int blk_init_by_fd(struct blkid_handle **h, int fd)
+{
+ int r = -ENOTSUP;
+#ifdef HAVE_BLKID
+ struct blkid_handle *tmp = malloc(sizeof(*tmp));
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->pr = blkid_new_probe();
+ if (!tmp->pr) {
+ free(tmp);
+ return -EINVAL;
+ }
+
+ if (blkid_probe_set_device(tmp->pr, fd, 0, 0)) {
+ blkid_free_probe(tmp->pr);
+ free(tmp);
+ return -EINVAL;
+ }
+
+ tmp->fd = fd;
+
+ *h = tmp;
+
+ r = 0;
+#endif
+ return r;
+}
+
+#ifdef HAVE_BLKID
+static int blk_superblocks_luks(struct blkid_handle *h, bool enable)
+{
+ char luks[] = "crypto_LUKS";
+ char *luks_filter[] = {
+ luks,
+ NULL
+ };
+ return blkid_probe_filter_superblocks_type(h->pr,
+ enable ? BLKID_FLTR_ONLYIN : BLKID_FLTR_NOTIN,
+ luks_filter);
+}
+#endif
+
+int blk_superblocks_filter_luks(struct blkid_handle *h)
+{
+ int r = -ENOTSUP;
+#ifdef HAVE_BLKID
+ r = blk_superblocks_luks(h, false);
+#endif
+ return r;
+}
+
+int blk_superblocks_only_luks(struct blkid_handle *h)
+{
+ int r = -ENOTSUP;
+#ifdef HAVE_BLKID
+ r = blk_superblocks_luks(h, true);
+#endif
+ return r;
+}
+
+blk_probe_status blk_probe(struct blkid_handle *h)
+{
+ blk_probe_status pr = PRB_FAIL;
+#ifdef HAVE_BLKID
+ int r = blkid_do_probe(h->pr);
+
+ if (r == 0)
+ pr = PRB_OK;
+ else if (r == 1)
+ pr = PRB_EMPTY;
+#endif
+ return pr;
+}
+
+blk_probe_status blk_safeprobe(struct blkid_handle *h)
+{
+ int r = -1;
+#ifdef HAVE_BLKID
+ r = blkid_do_safeprobe(h->pr);
+#endif
+ switch (r) {
+ case -2:
+ return PRB_AMBIGUOUS;
+ case 1:
+ return PRB_EMPTY;
+ case 0:
+ return PRB_OK;
+ default:
+ return PRB_FAIL;
+ }
+}
+
+int blk_is_partition(struct blkid_handle *h)
+{
+ int r = 0;
+#ifdef HAVE_BLKID
+ r = blkid_probe_has_value(h->pr, "PTTYPE");
+#endif
+ return r;
+}
+
+int blk_is_superblock(struct blkid_handle *h)
+{
+ int r = 0;
+#ifdef HAVE_BLKID
+ r = blkid_probe_has_value(h->pr, "TYPE");
+#endif
+ return r;
+}
+
+const char *blk_get_partition_type(struct blkid_handle *h)
+{
+ const char *value = NULL;
+#ifdef HAVE_BLKID
+ (void) blkid_probe_lookup_value(h->pr, "PTTYPE", &value, NULL);
+#endif
+ return value;
+}
+
+const char *blk_get_superblock_type(struct blkid_handle *h)
+{
+ const char *value = NULL;
+#ifdef HAVE_BLKID
+ (void) blkid_probe_lookup_value(h->pr, "TYPE", &value, NULL);
+#endif
+ return value;
+}
+
+void blk_free(struct blkid_handle *h)
+{
+#ifdef HAVE_BLKID
+ if (!h)
+ return;
+
+ if (h->pr)
+ blkid_free_probe(h->pr);
+
+ free(h);
+#endif
+}
+
+#ifdef HAVE_BLKID
+#ifndef HAVE_BLKID_WIPE
+static int blk_step_back(struct blkid_handle *h)
+{
+#ifdef HAVE_BLKID_STEP_BACK
+ return blkid_probe_step_back(h->pr);
+#else
+ blkid_reset_probe(h->pr);
+ blkid_probe_set_device(h->pr, h->fd, 0, 0);
+ return 0;
+#endif
+}
+#endif /* not HAVE_BLKID_WIPE */
+#endif /* HAVE_BLKID */
+
+int blk_do_wipe(struct blkid_handle *h)
+{
+#ifdef HAVE_BLKID
+#ifdef HAVE_BLKID_WIPE
+ return blkid_do_wipe(h->pr, 0);
+#else
+ const char *offset;
+ off_t offset_val;
+ void *buf;
+ ssize_t ret;
+ size_t alignment, len, bsize = blkid_probe_get_sectorsize(h->pr);
+
+ if (h->fd < 0 || !bsize)
+ return -EINVAL;
+
+ if (blk_is_partition(h)) {
+ if (blkid_probe_lookup_value(h->pr, "PTMAGIC_OFFSET", &offset, NULL))
+ return -EINVAL;
+ if (blkid_probe_lookup_value(h->pr, "PTMAGIC", NULL, &len))
+ return -EINVAL;
+ } else if (blk_is_superblock(h)) {
+ if (blkid_probe_lookup_value(h->pr, "SBMAGIC_OFFSET", &offset, NULL))
+ return -EINVAL;
+ if (blkid_probe_lookup_value(h->pr, "SBMAGIC", NULL, &len))
+ return -EINVAL;
+ } else
+ return 0;
+
+ alignment = crypt_getpagesize();
+
+ if (posix_memalign(&buf, alignment, len))
+ return -EINVAL;
+ memset(buf, 0, len);
+
+ offset_val = strtoll(offset, NULL, 10);
+
+ /* TODO: missing crypt_wipe_fd() */
+ ret = write_lseek_blockwise(h->fd, bsize, alignment, buf, len, offset_val);
+ free(buf);
+ if (ret < 0)
+ return -EIO;
+
+ if ((size_t)ret == len) {
+ blk_step_back(h);
+ return 0;
+ }
+
+ return -EIO;
+#endif
+#else /* HAVE_BLKID */
+ return -ENOTSUP;
+#endif
+}
+
+int blk_supported(void)
+{
+ int r = 0;
+#ifdef HAVE_BLKID
+ r = 1;
+#endif
+ return r;
+}
+
+unsigned blk_get_block_size(struct blkid_handle *h)
+{
+ unsigned block_size = 0;
+#ifdef HAVE_BLKID
+ const char *data;
+ if (!blk_is_superblock(h) || !blkid_probe_has_value(h->pr, "BLOCK_SIZE") ||
+ blkid_probe_lookup_value(h->pr, "BLOCK_SIZE", &data, NULL) ||
+ sscanf(data, "%u", &block_size) != 1)
+ block_size = 0;
+#endif
+ return block_size;
+}
diff --git a/lib/utils_blkid.h b/lib/utils_blkid.h
new file mode 100644
index 0000000..3ee1434
--- /dev/null
+++ b/lib/utils_blkid.h
@@ -0,0 +1,69 @@
+/*
+ * blkid probe utilities
+ *
+ * Copyright (C) 2018-2023 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _UTILS_BLKID_H
+#define _UTILS_BLKID_H
+
+#include <sys/types.h>
+
+struct blkid_handle;
+
+typedef enum { PRB_OK = 0, PRB_EMPTY, PRB_AMBIGUOUS, PRB_FAIL } blk_probe_status;
+
+int blk_init_by_path(struct blkid_handle **h, const char *path);
+
+void blk_free(struct blkid_handle *h);
+
+/*
+ * WARNING: This will reset file description offset as if
+ * lseek(devfd, 0, SEEK_SET) was called!
+ */
+int blk_init_by_fd(struct blkid_handle **h, int fd);
+
+void blk_set_chains_for_wipes(struct blkid_handle *h);
+
+void blk_set_chains_for_full_print(struct blkid_handle *h);
+
+void blk_set_chains_for_superblocks(struct blkid_handle *h);
+
+void blk_set_chains_for_fast_detection(struct blkid_handle *h);
+
+int blk_superblocks_filter_luks(struct blkid_handle *h);
+int blk_superblocks_only_luks(struct blkid_handle *h);
+
+blk_probe_status blk_safeprobe(struct blkid_handle *h);
+
+blk_probe_status blk_probe(struct blkid_handle *h);
+
+int blk_is_partition(struct blkid_handle *h);
+
+int blk_is_superblock(struct blkid_handle *h);
+
+const char *blk_get_partition_type(struct blkid_handle *h);
+
+const char *blk_get_superblock_type(struct blkid_handle *h);
+
+int blk_do_wipe(struct blkid_handle *h);
+
+int blk_supported(void);
+
+unsigned blk_get_block_size(struct blkid_handle *h);
+
+#endif
diff --git a/lib/utils_crypt.c b/lib/utils_crypt.c
new file mode 100644
index 0000000..0b7dc37
--- /dev/null
+++ b/lib/utils_crypt.c
@@ -0,0 +1,347 @@
+/*
+ * utils_crypt - cipher utilities for cryptsetup
+ *
+ * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+#include <ctype.h>
+#include <errno.h>
+
+#include "libcryptsetup.h"
+#include "utils_crypt.h"
+
+#define MAX_CAPI_LEN_STR "143" /* for sscanf of crypto API string + 16 + \0 */
+
+int crypt_parse_name_and_mode(const char *s, char *cipher, int *key_nums,
+ char *cipher_mode)
+{
+ if (!s || !cipher || !cipher_mode)
+ return -EINVAL;
+
+ if (sscanf(s, "%" MAX_CIPHER_LEN_STR "[^-]-%" MAX_CIPHER_LEN_STR "s",
+ cipher, cipher_mode) == 2) {
+ if (!strcmp(cipher_mode, "plain"))
+ strcpy(cipher_mode, "cbc-plain");
+ if (key_nums) {
+ char *tmp = strchr(cipher, ':');
+ *key_nums = tmp ? atoi(++tmp) : 1;
+ if (!*key_nums)
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* Short version for "empty" cipher */
+ if (!strcmp(s, "null") || !strcmp(s, "cipher_null")) {
+ strcpy(cipher, "cipher_null");
+ strcpy(cipher_mode, "ecb");
+ if (key_nums)
+ *key_nums = 0;
+ return 0;
+ }
+
+ if (sscanf(s, "%" MAX_CIPHER_LEN_STR "[^-]", cipher) == 1) {
+ strcpy(cipher_mode, "cbc-plain");
+ if (key_nums)
+ *key_nums = 1;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int crypt_parse_hash_integrity_mode(const char *s, char *integrity)
+{
+ char mode[MAX_CIPHER_LEN], hash[MAX_CIPHER_LEN];
+ int r;
+
+ if (!s || !integrity || strchr(s, '(') || strchr(s, ')'))
+ return -EINVAL;
+
+ r = sscanf(s, "%" MAX_CIPHER_LEN_STR "[^-]-%" MAX_CIPHER_LEN_STR "s", mode, hash);
+ if (r == 2 && !isdigit(hash[0]))
+ r = snprintf(integrity, MAX_CIPHER_LEN, "%s(%s)", mode, hash);
+ else if (r == 2)
+ r = snprintf(integrity, MAX_CIPHER_LEN, "%s-%s", mode, hash);
+ else if (r == 1)
+ r = snprintf(integrity, MAX_CIPHER_LEN, "%s", mode);
+ else
+ return -EINVAL;
+
+ if (r < 0 || r >= MAX_CIPHER_LEN)
+ return -EINVAL;
+
+ return 0;
+}
+
+int crypt_parse_integrity_mode(const char *s, char *integrity,
+ int *integrity_key_size)
+{
+ int ks = 0, r = 0;
+
+ if (!s || !integrity)
+ return -EINVAL;
+
+ /* AEAD modes */
+ if (!strcmp(s, "aead") ||
+ !strcmp(s, "poly1305") ||
+ !strcmp(s, "none")) {
+ strncpy(integrity, s, MAX_CIPHER_LEN);
+ ks = 0;
+ } else if (!strcmp(s, "hmac-sha1")) {
+ strncpy(integrity, "hmac(sha1)", MAX_CIPHER_LEN);
+ ks = 20;
+ } else if (!strcmp(s, "hmac-sha256")) {
+ strncpy(integrity, "hmac(sha256)", MAX_CIPHER_LEN);
+ ks = 32;
+ } else if (!strcmp(s, "hmac-sha512")) {
+ ks = 64;
+ strncpy(integrity, "hmac(sha512)", MAX_CIPHER_LEN);
+ } else if (!strcmp(s, "cmac-aes")) {
+ ks = 16;
+ strncpy(integrity, "cmac(aes)", MAX_CIPHER_LEN);
+ } else
+ r = -EINVAL;
+
+ if (integrity_key_size)
+ *integrity_key_size = ks;
+
+ return r;
+}
+
+int crypt_parse_pbkdf(const char *s, const char **pbkdf)
+{
+ const char *tmp = NULL;
+
+ if (!s)
+ return -EINVAL;
+
+ if (!strcasecmp(s, CRYPT_KDF_PBKDF2))
+ tmp = CRYPT_KDF_PBKDF2;
+ else if (!strcasecmp(s, CRYPT_KDF_ARGON2I))
+ tmp = CRYPT_KDF_ARGON2I;
+ else if (!strcasecmp(s, CRYPT_KDF_ARGON2ID))
+ tmp = CRYPT_KDF_ARGON2ID;
+
+ if (!tmp)
+ return -EINVAL;
+
+ if (pbkdf)
+ *pbkdf = tmp;
+
+ return 0;
+}
+
+/*
+ * Thanks Mikulas Patocka for these two char converting functions.
+ *
+ * This function is used to load cryptographic keys, so it is coded in such a
+ * way that there are no conditions or memory accesses that depend on data.
+ *
+ * Explanation of the logic:
+ * (ch - '9' - 1) is negative if ch <= '9'
+ * ('0' - 1 - ch) is negative if ch >= '0'
+ * we "and" these two values, so the result is negative if ch is in the range
+ * '0' ... '9'
+ * we are only interested in the sign, so we do a shift ">> 8"; note that right
+ * shift of a negative value is implementation-defined, so we cast the
+ * value to (unsigned) before the shift --- we have 0xffffff if ch is in
+ * the range '0' ... '9', 0 otherwise
+ * we "and" this value with (ch - '0' + 1) --- we have a value 1 ... 10 if ch is
+ * in the range '0' ... '9', 0 otherwise
+ * we add this value to -1 --- we have a value 0 ... 9 if ch is in the range '0'
+ * ... '9', -1 otherwise
+ * the next line is similar to the previous one, but we need to decode both
+ * uppercase and lowercase letters, so we use (ch & 0xdf), which converts
+ * lowercase to uppercase
+ */
+static int hex_to_bin(unsigned char ch)
+{
+ unsigned char cu = ch & 0xdf;
+ return -1 +
+ ((ch - '0' + 1) & (unsigned)((ch - '9' - 1) & ('0' - 1 - ch)) >> 8) +
+ ((cu - 'A' + 11) & (unsigned)((cu - 'F' - 1) & ('A' - 1 - cu)) >> 8);
+}
+
+static char hex2asc(unsigned char c)
+{
+ return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
+}
+
+ssize_t crypt_hex_to_bytes(const char *hex, char **result, int safe_alloc)
+{
+ char *bytes;
+ size_t i, len;
+ int bl, bh;
+
+ if (!hex || !result)
+ return -EINVAL;
+
+ len = strlen(hex);
+ if (len % 2)
+ return -EINVAL;
+ len /= 2;
+
+ bytes = safe_alloc ? crypt_safe_alloc(len) : malloc(len);
+ if (!bytes)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++) {
+ bh = hex_to_bin(hex[i * 2]);
+ bl = hex_to_bin(hex[i * 2 + 1]);
+ if (bh == -1 || bl == -1) {
+ safe_alloc ? crypt_safe_free(bytes) : free(bytes);
+ return -EINVAL;
+ }
+ bytes[i] = (bh << 4) | bl;
+ }
+ *result = bytes;
+ return i;
+}
+
+char *crypt_bytes_to_hex(size_t size, const char *bytes)
+{
+ unsigned i;
+ char *hex;
+
+ if (size && !bytes)
+ return NULL;
+
+ /* Alloc adds trailing \0 */
+ if (size == 0)
+ hex = crypt_safe_alloc(2);
+ else
+ hex = crypt_safe_alloc(size * 2 + 1);
+ if (!hex)
+ return NULL;
+
+ if (size == 0)
+ hex[0] = '-';
+ else for (i = 0; i < size; i++) {
+ hex[i * 2] = hex2asc((const unsigned char)bytes[i] >> 4);
+ hex[i * 2 + 1] = hex2asc((const unsigned char)bytes[i] & 0xf);
+ }
+
+ return hex;
+}
+
+void crypt_log_hex(struct crypt_device *cd,
+ const char *bytes, size_t size,
+ const char *sep, int numwrap, const char *wrapsep)
+{
+ unsigned i;
+
+ for (i = 0; i < size; i++) {
+ if (wrapsep && numwrap && i && !(i % numwrap))
+ crypt_logf(cd, CRYPT_LOG_NORMAL, wrapsep);
+ crypt_logf(cd, CRYPT_LOG_NORMAL, "%c%c%s",
+ hex2asc((const unsigned char)bytes[i] >> 4),
+ hex2asc((const unsigned char)bytes[i] & 0xf), sep);
+ }
+}
+
+bool crypt_is_cipher_null(const char *cipher_spec)
+{
+ if (!cipher_spec)
+ return false;
+ return (strstr(cipher_spec, "cipher_null") || !strcmp(cipher_spec, "null"));
+}
+
+int crypt_capi_to_cipher(char **org_c, char **org_i, const char *c_dm, const char *i_dm)
+{
+ char cipher[MAX_CAPI_ONE_LEN], mode[MAX_CAPI_ONE_LEN], iv[MAX_CAPI_ONE_LEN],
+ auth[MAX_CAPI_ONE_LEN], tmp[MAX_CAPI_LEN], dmcrypt_tmp[MAX_CAPI_LEN*2],
+ capi[MAX_CAPI_LEN+1];
+ size_t len;
+ int i;
+
+ if (!c_dm)
+ return -EINVAL;
+
+ /* legacy mode */
+ if (strncmp(c_dm, "capi:", 4)) {
+ if (!(*org_c = strdup(c_dm)))
+ return -ENOMEM;
+ if (i_dm) {
+ if (!(*org_i = strdup(i_dm))) {
+ free(*org_c);
+ *org_c = NULL;
+ return -ENOMEM;
+ }
+ } else
+ *org_i = NULL;
+ return 0;
+ }
+
+ /* modes with capi: prefix */
+ i = sscanf(c_dm, "capi:%" MAX_CAPI_LEN_STR "[^-]-%" MAX_CAPI_ONE_LEN_STR "s", tmp, iv);
+ if (i != 2)
+ return -EINVAL;
+
+ len = strlen(tmp);
+ if (len < 2)
+ return -EINVAL;
+
+ if (tmp[len-1] == ')')
+ tmp[len-1] = '\0';
+
+ if (sscanf(tmp, "rfc4309(%" MAX_CAPI_LEN_STR "s", capi) == 1) {
+ if (!(*org_i = strdup("aead")))
+ return -ENOMEM;
+ } else if (sscanf(tmp, "rfc7539(%" MAX_CAPI_LEN_STR "[^,],%" MAX_CAPI_ONE_LEN_STR "s", capi, auth) == 2) {
+ if (!(*org_i = strdup(auth)))
+ return -ENOMEM;
+ } else if (sscanf(tmp, "authenc(%" MAX_CAPI_ONE_LEN_STR "[^,],%" MAX_CAPI_LEN_STR "s", auth, capi) == 2) {
+ if (!(*org_i = strdup(auth)))
+ return -ENOMEM;
+ } else {
+ if (i_dm) {
+ if (!(*org_i = strdup(i_dm)))
+ return -ENOMEM;
+ } else
+ *org_i = NULL;
+ memset(capi, 0, sizeof(capi));
+ strncpy(capi, tmp, sizeof(capi)-1);
+ }
+
+ i = sscanf(capi, "%" MAX_CAPI_ONE_LEN_STR "[^(](%" MAX_CAPI_ONE_LEN_STR "[^)])", mode, cipher);
+ if (i == 2)
+ i = snprintf(dmcrypt_tmp, sizeof(dmcrypt_tmp), "%s-%s-%s", cipher, mode, iv);
+ else
+ i = snprintf(dmcrypt_tmp, sizeof(dmcrypt_tmp), "%s-%s", capi, iv);
+ if (i < 0 || (size_t)i >= sizeof(dmcrypt_tmp)) {
+ free(*org_i);
+ *org_i = NULL;
+ return -EINVAL;
+ }
+
+ if (!(*org_c = strdup(dmcrypt_tmp))) {
+ free(*org_i);
+ *org_i = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
diff --git a/lib/utils_crypt.h b/lib/utils_crypt.h
new file mode 100644
index 0000000..92e0705
--- /dev/null
+++ b/lib/utils_crypt.h
@@ -0,0 +1,54 @@
+/*
+ * utils_crypt - cipher utilities for cryptsetup
+ *
+ * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _UTILS_CRYPT_H
+#define _UTILS_CRYPT_H
+
+#include <stdbool.h>
+
+struct crypt_device;
+
+#define MAX_CIPHER_LEN 32
+#define MAX_CIPHER_LEN_STR "31"
+#define MAX_KEYFILES 32
+#define MAX_CAPI_ONE_LEN 2 * MAX_CIPHER_LEN
+#define MAX_CAPI_ONE_LEN_STR "63" /* for sscanf length + '\0' */
+#define MAX_CAPI_LEN 144 /* should be enough to fit whole capi string */
+
+int crypt_parse_name_and_mode(const char *s, char *cipher,
+ int *key_nums, char *cipher_mode);
+int crypt_parse_hash_integrity_mode(const char *s, char *integrity);
+int crypt_parse_integrity_mode(const char *s, char *integrity,
+ int *integrity_key_size);
+int crypt_parse_pbkdf(const char *s, const char **pbkdf);
+
+ssize_t crypt_hex_to_bytes(const char *hex, char **result, int safe_alloc);
+char *crypt_bytes_to_hex(size_t size, const char *bytes);
+void crypt_log_hex(struct crypt_device *cd,
+ const char *bytes, size_t size,
+ const char *sep, int numwrap, const char *wrapsep);
+
+bool crypt_is_cipher_null(const char *cipher_spec);
+
+int crypt_capi_to_cipher(char **org_c, char **org_i, const char *c_dm, const char *i_dm);
+
+#endif /* _UTILS_CRYPT_H */
diff --git a/lib/utils_device.c b/lib/utils_device.c
new file mode 100644
index 0000000..d80ea62
--- /dev/null
+++ b/lib/utils_device.c
@@ -0,0 +1,1089 @@
+/*
+ * device backend utilities
+ *
+ * Copyright (C) 2004 Jana Saout <jana@saout.de>
+ * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <linux/fs.h>
+#include <unistd.h>
+#ifdef HAVE_SYS_SYSMACROS_H
+# include <sys/sysmacros.h> /* for major, minor */
+#endif
+#ifdef HAVE_SYS_STATVFS_H
+# include <sys/statvfs.h>
+#endif
+#include "internal.h"
+#include "utils_device_locking.h"
+
+struct device {
+ char *path;
+
+ char *file_path;
+ int loop_fd;
+
+ int ro_dev_fd;
+ int dev_fd;
+ int dev_fd_excl;
+
+ struct crypt_lock_handle *lh;
+
+ unsigned int o_direct:1;
+ unsigned int init_done:1; /* path is bdev or loop already initialized */
+
+ /* cached values */
+ size_t alignment;
+ size_t block_size;
+ size_t loop_block_size;
+};
+
+static size_t device_fs_block_size_fd(int fd)
+{
+ size_t page_size = crypt_getpagesize();
+
+#ifdef HAVE_SYS_STATVFS_H
+ struct statvfs buf;
+
+ /*
+ * NOTE: some filesystems (NFS) returns bogus blocksize (1MB).
+ * Page-size io should always work and avoids increasing IO beyond aligned LUKS header.
+ */
+ if (!fstatvfs(fd, &buf) && buf.f_bsize && buf.f_bsize <= page_size)
+ return (size_t)buf.f_bsize;
+#endif
+ return page_size;
+}
+
+static size_t device_block_size_fd(int fd, size_t *min_size)
+{
+ struct stat st;
+ size_t bsize;
+ int arg;
+
+ if (fstat(fd, &st) < 0)
+ return 0;
+
+ if (S_ISREG(st.st_mode))
+ bsize = device_fs_block_size_fd(fd);
+ else {
+ if (ioctl(fd, BLKSSZGET, &arg) < 0)
+ bsize = crypt_getpagesize();
+ else
+ bsize = (size_t)arg;
+ }
+
+ if (!min_size)
+ return bsize;
+
+ if (S_ISREG(st.st_mode)) {
+ /* file can be empty as well */
+ if (st.st_size > (ssize_t)bsize)
+ *min_size = bsize;
+ else
+ *min_size = st.st_size;
+ } else {
+ /* block device must have at least one block */
+ *min_size = bsize;
+ }
+
+ return bsize;
+}
+
+static size_t device_block_phys_size_fd(int fd)
+{
+ struct stat st;
+ int arg;
+ size_t bsize = SECTOR_SIZE;
+
+ if (fstat(fd, &st) < 0)
+ return bsize;
+
+ if (S_ISREG(st.st_mode))
+ bsize = MAX_SECTOR_SIZE;
+ else if (ioctl(fd, BLKPBSZGET, &arg) >= 0)
+ bsize = (size_t)arg;
+
+ return bsize;
+}
+
+static size_t device_alignment_fd(int devfd)
+{
+ long alignment = DEFAULT_MEM_ALIGNMENT;
+
+#ifdef _PC_REC_XFER_ALIGN
+ alignment = fpathconf(devfd, _PC_REC_XFER_ALIGN);
+ if (alignment < 0)
+ alignment = DEFAULT_MEM_ALIGNMENT;
+#endif
+ return (size_t)alignment;
+}
+
+static int device_read_test(int devfd)
+{
+ char buffer[512];
+ int r = -EIO;
+ size_t minsize = 0, blocksize, alignment;
+
+ blocksize = device_block_size_fd(devfd, &minsize);
+ alignment = device_alignment_fd(devfd);
+
+ if (!blocksize || !alignment)
+ return -EINVAL;
+
+ if (minsize == 0)
+ return 0;
+
+ if (minsize > sizeof(buffer))
+ minsize = sizeof(buffer);
+
+ if (read_blockwise(devfd, blocksize, alignment, buffer, minsize) == (ssize_t)minsize)
+ r = 0;
+
+ crypt_safe_memzero(buffer, sizeof(buffer));
+ return r;
+}
+
+/*
+ * The direct-io is always preferred. The header is usually mapped to the same
+ * device and can be accessed when the rest of device is mapped to data device.
+ * Using direct-io ensures that we do not mess with data in cache.
+ * (But proper alignment should prevent this in the first place.)
+ * The read test is needed to detect broken configurations (seen with remote
+ * block devices) that allow open with direct-io but then fails on read.
+ */
+static int device_ready(struct crypt_device *cd, struct device *device)
+{
+ int devfd = -1, r = 0;
+ struct stat st;
+ size_t tmp_size;
+
+ if (!device)
+ return -EINVAL;
+
+ if (device->o_direct) {
+ log_dbg(cd, "Trying to open and read device %s with direct-io.",
+ device_path(device));
+ device->o_direct = 0;
+ devfd = open(device_path(device), O_RDONLY | O_DIRECT);
+ if (devfd >= 0) {
+ if (device_read_test(devfd) == 0) {
+ device->o_direct = 1;
+ } else {
+ close(devfd);
+ devfd = -1;
+ }
+ }
+ }
+
+ if (devfd < 0) {
+ log_dbg(cd, "Trying to open device %s without direct-io.",
+ device_path(device));
+ devfd = open(device_path(device), O_RDONLY);
+ }
+
+ if (devfd < 0) {
+ log_err(cd, _("Device %s does not exist or access denied."),
+ device_path(device));
+ return -EINVAL;
+ }
+
+ if (fstat(devfd, &st) < 0)
+ r = -EINVAL;
+ else if (!S_ISBLK(st.st_mode))
+ r = S_ISREG(st.st_mode) ? -ENOTBLK : -EINVAL;
+ if (r == -EINVAL) {
+ log_err(cd, _("Device %s is not compatible."),
+ device_path(device));
+ close(devfd);
+ return r;
+ }
+
+ /* Allow only increase (loop device) */
+ tmp_size = device_alignment_fd(devfd);
+ if (tmp_size > device->alignment)
+ device->alignment = tmp_size;
+
+ tmp_size = device_block_size_fd(devfd, NULL);
+ if (tmp_size > device->block_size)
+ device->block_size = tmp_size;
+
+ close(devfd);
+ return r;
+}
+
+static int _open_locked(struct crypt_device *cd, struct device *device, int flags)
+{
+ int fd;
+
+ if (!device)
+ return -EINVAL;
+
+ log_dbg(cd, "Opening locked device %s", device_path(device));
+
+ if ((flags & O_ACCMODE) != O_RDONLY && device_locked_readonly(device->lh)) {
+ log_dbg(cd, "Cannot open locked device %s in write mode. Read lock held.", device_path(device));
+ return -EAGAIN;
+ }
+
+ fd = open(device_path(device), flags);
+ if (fd < 0)
+ return -errno;
+
+ if (device_locked_verify(cd, fd, device->lh)) {
+ /* fd doesn't correspond to a locked resource */
+ close(fd);
+ log_dbg(cd, "Failed to verify lock resource for device %s.", device_path(device));
+ return -EINVAL;
+ }
+
+ return fd;
+}
+
+/*
+ * Common wrapper for device sync.
+ */
+void device_sync(struct crypt_device *cd, struct device *device)
+{
+ if (!device || device->dev_fd < 0)
+ return;
+
+ if (fsync(device->dev_fd) == -1)
+ log_dbg(cd, "Cannot sync device %s.", device_path(device));
+}
+
+/*
+ * in non-locked mode returns always fd or -1
+ *
+ * in locked mode:
+ * opened fd or one of:
+ * -EAGAIN : requested write mode while device being locked in via shared lock
+ * -EINVAL : invalid lock fd state
+ * -1 : all other errors
+ */
+static int device_open_internal(struct crypt_device *cd, struct device *device, int flags)
+{
+ int access, devfd;
+
+ if (device->o_direct)
+ flags |= O_DIRECT;
+
+ access = flags & O_ACCMODE;
+ if (access == O_WRONLY)
+ access = O_RDWR;
+
+ if (access == O_RDONLY && device->ro_dev_fd >= 0) {
+ log_dbg(cd, "Reusing open r%c fd on device %s", 'o', device_path(device));
+ return device->ro_dev_fd;
+ } else if (access == O_RDWR && device->dev_fd >= 0) {
+ log_dbg(cd, "Reusing open r%c fd on device %s", 'w', device_path(device));
+ return device->dev_fd;
+ }
+
+ if (device_locked(device->lh))
+ devfd = _open_locked(cd, device, flags);
+ else
+ devfd = open(device_path(device), flags);
+
+ if (devfd < 0) {
+ log_dbg(cd, "Cannot open device %s%s.",
+ device_path(device),
+ access != O_RDONLY ? " for write" : "");
+ return devfd;
+ }
+
+ if (access == O_RDONLY)
+ device->ro_dev_fd = devfd;
+ else
+ device->dev_fd = devfd;
+
+ return devfd;
+}
+
+int device_open(struct crypt_device *cd, struct device *device, int flags)
+{
+ if (!device)
+ return -EINVAL;
+
+ assert(!device_locked(device->lh));
+ return device_open_internal(cd, device, flags);
+}
+
+int device_open_excl(struct crypt_device *cd, struct device *device, int flags)
+{
+ const char *path;
+ struct stat st;
+
+ if (!device)
+ return -EINVAL;
+
+ assert(!device_locked(device->lh));
+
+ if (device->dev_fd_excl < 0) {
+ path = device_path(device);
+ if (stat(path, &st))
+ return -EINVAL;
+ if (!S_ISBLK(st.st_mode))
+ log_dbg(cd, "%s is not a block device. Can't open in exclusive mode.",
+ path);
+ else {
+ /* open(2) with O_EXCL (w/o O_CREAT) on regular file is undefined behaviour according to man page */
+ /* coverity[toctou] */
+ device->dev_fd_excl = open(path, O_RDONLY | O_EXCL); /* lgtm[cpp/toctou-race-condition] */
+ if (device->dev_fd_excl < 0)
+ return errno == EBUSY ? -EBUSY : device->dev_fd_excl;
+ if (fstat(device->dev_fd_excl, &st) || !S_ISBLK(st.st_mode)) {
+ log_dbg(cd, "%s is not a block device. Can't open in exclusive mode.",
+ path);
+ close(device->dev_fd_excl);
+ device->dev_fd_excl = -1;
+ } else
+ log_dbg(cd, "Device %s is blocked for exclusive open.", path);
+ }
+ }
+
+ return device_open_internal(cd, device, flags);
+}
+
+void device_release_excl(struct crypt_device *cd, struct device *device)
+{
+ if (device && device->dev_fd_excl >= 0) {
+ if (close(device->dev_fd_excl))
+ log_dbg(cd, "Failed to release exclusive handle on device %s.",
+ device_path(device));
+ else
+ log_dbg(cd, "Closed exclusive fd for %s.", device_path(device));
+ device->dev_fd_excl = -1;
+ }
+}
+
+int device_open_locked(struct crypt_device *cd, struct device *device, int flags)
+{
+ if (!device)
+ return -EINVAL;
+
+ assert(!crypt_metadata_locking_enabled() || device_locked(device->lh));
+ return device_open_internal(cd, device, flags);
+}
+
+/* Avoid any read from device, expects direct-io to work. */
+int device_alloc_no_check(struct device **device, const char *path)
+{
+ struct device *dev;
+
+ if (!path) {
+ *device = NULL;
+ return 0;
+ }
+
+ dev = malloc(sizeof(struct device));
+ if (!dev)
+ return -ENOMEM;
+
+ memset(dev, 0, sizeof(struct device));
+ dev->path = strdup(path);
+ if (!dev->path) {
+ free(dev);
+ return -ENOMEM;
+ }
+ dev->loop_fd = -1;
+ dev->ro_dev_fd = -1;
+ dev->dev_fd = -1;
+ dev->dev_fd_excl = -1;
+ dev->o_direct = 1;
+
+ *device = dev;
+ return 0;
+}
+
+int device_alloc(struct crypt_device *cd, struct device **device, const char *path)
+{
+ struct device *dev;
+ int r;
+
+ r = device_alloc_no_check(&dev, path);
+ if (r < 0)
+ return r;
+
+ if (dev) {
+ r = device_ready(cd, dev);
+ if (!r) {
+ dev->init_done = 1;
+ } else if (r == -ENOTBLK) {
+ /* alloc loop later */
+ } else if (r < 0) {
+ free(dev->path);
+ free(dev);
+ return -ENOTBLK;
+ }
+ }
+
+ *device = dev;
+ return 0;
+}
+
+void device_free(struct crypt_device *cd, struct device *device)
+{
+ if (!device)
+ return;
+
+ device_close(cd, device);
+
+ if (device->dev_fd_excl != -1) {
+ log_dbg(cd, "Closed exclusive fd for %s.", device_path(device));
+ close(device->dev_fd_excl);
+ }
+
+ if (device->loop_fd != -1) {
+ log_dbg(cd, "Closed loop %s (%s).", device->path, device->file_path);
+ close(device->loop_fd);
+ }
+
+ assert(!device_locked(device->lh));
+
+ free(device->file_path);
+ free(device->path);
+ free(device);
+}
+
+/* Get block device path */
+const char *device_block_path(const struct device *device)
+{
+ if (!device || !device->init_done)
+ return NULL;
+
+ return device->path;
+}
+
+/* Get device-mapper name of device (if possible) */
+const char *device_dm_name(const struct device *device)
+{
+ const char *dmdir = dm_get_dir();
+ size_t dmdir_len = strlen(dmdir);
+
+ if (!device || !device->init_done)
+ return NULL;
+
+ if (strncmp(device->path, dmdir, dmdir_len))
+ return NULL;
+
+ return &device->path[dmdir_len+1];
+}
+
+/* Get path to device / file */
+const char *device_path(const struct device *device)
+{
+ if (!device)
+ return NULL;
+
+ if (device->file_path)
+ return device->file_path;
+
+ return device->path;
+}
+
+/* block device topology ioctls, introduced in 2.6.32 */
+#ifndef BLKIOMIN
+#define BLKIOMIN _IO(0x12,120)
+#define BLKIOOPT _IO(0x12,121)
+#define BLKALIGNOFF _IO(0x12,122)
+#endif
+
+void device_topology_alignment(struct crypt_device *cd,
+ struct device *device,
+ unsigned long *required_alignment, /* bytes */
+ unsigned long *alignment_offset, /* bytes */
+ unsigned long default_alignment)
+{
+ int dev_alignment_offset = 0;
+ unsigned int min_io_size = 0, opt_io_size = 0;
+ unsigned long temp_alignment = 0;
+ int fd;
+
+ *required_alignment = default_alignment;
+ *alignment_offset = 0;
+
+ if (!device || !device->path) //FIXME
+ return;
+
+ fd = open(device->path, O_RDONLY);
+ if (fd == -1)
+ return;
+
+ /* minimum io size */
+ if (ioctl(fd, BLKIOMIN, &min_io_size) == -1) {
+ log_dbg(cd, "Topology info for %s not supported, using default offset %lu bytes.",
+ device->path, default_alignment);
+ goto out;
+ }
+
+ /* optimal io size */
+ if (ioctl(fd, BLKIOOPT, &opt_io_size) == -1)
+ opt_io_size = min_io_size;
+
+ /* alignment offset, bogus -1 means misaligned/unknown */
+ if (ioctl(fd, BLKALIGNOFF, &dev_alignment_offset) == -1 || dev_alignment_offset < 0)
+ dev_alignment_offset = 0;
+ *alignment_offset = (unsigned long)dev_alignment_offset;
+
+ temp_alignment = (unsigned long)min_io_size;
+
+ /*
+ * Ignore bogus opt-io that could break alignment.
+ * Also real opt_io_size should be aligned to minimal page size (4k).
+ * Some bogus USB enclosures reports wrong data here.
+ */
+ if ((temp_alignment < (unsigned long)opt_io_size) &&
+ !((unsigned long)opt_io_size % temp_alignment) && !MISALIGNED_4K(opt_io_size))
+ temp_alignment = (unsigned long)opt_io_size;
+ else if (opt_io_size && (opt_io_size != min_io_size))
+ log_err(cd, _("Ignoring bogus optimal-io size for data device (%u bytes)."), opt_io_size);
+
+ /* If calculated alignment is multiple of default, keep default */
+ if (temp_alignment && (default_alignment % temp_alignment))
+ *required_alignment = temp_alignment;
+
+ log_dbg(cd, "Topology: IO (%u/%u), offset = %lu; Required alignment is %lu bytes.",
+ min_io_size, opt_io_size, *alignment_offset, *required_alignment);
+out:
+ (void)close(fd);
+}
+
+size_t device_block_size(struct crypt_device *cd, struct device *device)
+{
+ int fd;
+
+ if (!device)
+ return 0;
+
+ if (device->block_size)
+ return device->block_size;
+
+ fd = open(device->file_path ?: device->path, O_RDONLY);
+ if (fd >= 0) {
+ device->block_size = device_block_size_fd(fd, NULL);
+ close(fd);
+ }
+
+ if (!device->block_size)
+ log_dbg(cd, "Cannot get block size for device %s.", device_path(device));
+
+ return device->block_size;
+}
+
+size_t device_optimal_encryption_sector_size(struct crypt_device *cd, struct device *device)
+{
+ int fd;
+ size_t phys_block_size;
+
+ if (!device)
+ return SECTOR_SIZE;
+
+ fd = open(device->file_path ?: device->path, O_RDONLY);
+ if (fd < 0) {
+ log_dbg(cd, "Cannot get optimal encryption sector size for device %s.", device_path(device));
+ return SECTOR_SIZE;
+ }
+
+ /* cache device block size */
+ device->block_size = device_block_size_fd(fd, NULL);
+ if (!device->block_size) {
+ close(fd);
+ log_dbg(cd, "Cannot get block size for device %s.", device_path(device));
+ return SECTOR_SIZE;
+ }
+
+ if (device->block_size >= MAX_SECTOR_SIZE) {
+ close(fd);
+ return MISALIGNED(device->block_size, MAX_SECTOR_SIZE) ? SECTOR_SIZE : MAX_SECTOR_SIZE;
+ }
+
+ phys_block_size = device_block_phys_size_fd(fd);
+ close(fd);
+
+ if (device->block_size >= phys_block_size ||
+ phys_block_size <= SECTOR_SIZE ||
+ phys_block_size > MAX_SECTOR_SIZE ||
+ MISALIGNED(phys_block_size, device->block_size))
+ return device->block_size;
+
+ return phys_block_size;
+}
+
+int device_read_ahead(struct device *device, uint32_t *read_ahead)
+{
+ int fd, r = 0;
+ long read_ahead_long;
+
+ if (!device)
+ return 0;
+
+ if ((fd = open(device->path, O_RDONLY)) < 0)
+ return 0;
+
+ r = ioctl(fd, BLKRAGET, &read_ahead_long) ? 0 : 1;
+ close(fd);
+
+ if (r)
+ *read_ahead = (uint32_t) read_ahead_long;
+
+ return r;
+}
+
+/* Get data size in bytes */
+int device_size(struct device *device, uint64_t *size)
+{
+ struct stat st;
+ int devfd, r = -EINVAL;
+
+ if (!device)
+ return -EINVAL;
+
+ devfd = open(device->path, O_RDONLY);
+ if (devfd == -1)
+ return -EINVAL;
+
+ if (fstat(devfd, &st) < 0)
+ goto out;
+
+ if (S_ISREG(st.st_mode)) {
+ *size = (uint64_t)st.st_size;
+ r = 0;
+ } else if (ioctl(devfd, BLKGETSIZE64, size) >= 0)
+ r = 0;
+out:
+ close(devfd);
+ return r;
+}
+
+/* For a file, allocate the required space */
+int device_fallocate(struct device *device, uint64_t size)
+{
+ struct stat st;
+ int devfd, r = -EINVAL;
+
+ if (!device)
+ return -EINVAL;
+
+ devfd = open(device_path(device), O_RDWR);
+ if (devfd == -1)
+ return -EINVAL;
+
+ if (!fstat(devfd, &st) && S_ISREG(st.st_mode) &&
+ ((uint64_t)st.st_size >= size || !posix_fallocate(devfd, 0, size))) {
+ r = 0;
+ if (device->file_path && crypt_loop_resize(device->path))
+ r = -EINVAL;
+ }
+
+ close(devfd);
+ return r;
+}
+
+int device_check_size(struct crypt_device *cd,
+ struct device *device,
+ uint64_t req_offset, int falloc)
+{
+ uint64_t dev_size;
+
+ if (device_size(device, &dev_size)) {
+ log_dbg(cd, "Cannot get device size for device %s.", device_path(device));
+ return -EIO;
+ }
+
+ log_dbg(cd, "Device size %" PRIu64 ", offset %" PRIu64 ".", dev_size, req_offset);
+
+ if (req_offset > dev_size) {
+ /* If it is header file, increase its size */
+ if (falloc && !device_fallocate(device, req_offset))
+ return 0;
+
+ log_err(cd, _("Device %s is too small. Need at least %" PRIu64 " bytes."),
+ device_path(device), req_offset);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int device_info(struct crypt_device *cd,
+ struct device *device,
+ enum devcheck device_check,
+ int *readonly, uint64_t *size)
+{
+ struct stat st;
+ int fd = -1, r, flags = 0, real_readonly;
+ uint64_t real_size;
+
+ if (!device)
+ return -ENOTBLK;
+
+ real_readonly = 0;
+ real_size = 0;
+
+ if (stat(device->path, &st) < 0) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* never wipe header on mounted device */
+ if (device_check == DEV_EXCL && S_ISBLK(st.st_mode))
+ flags |= O_EXCL;
+
+ /* Try to open read-write to check whether it is a read-only device */
+ /* coverity[toctou] */
+ fd = open(device->path, O_RDWR | flags);
+ if (fd == -1 && errno == EROFS) {
+ real_readonly = 1;
+ fd = open(device->path, O_RDONLY | flags);
+ }
+
+ if (fd == -1 && device_check == DEV_EXCL && errno == EBUSY) {
+ r = -EBUSY;
+ goto out;
+ }
+
+ if (fd == -1) {
+ r = errno ? -errno : -EINVAL;
+ goto out;
+ }
+
+ r = 0;
+ if (S_ISREG(st.st_mode)) {
+ //FIXME: add readonly check
+ real_size = (uint64_t)st.st_size;
+ real_size >>= SECTOR_SHIFT;
+ } else {
+ /* If the device can be opened read-write, i.e. readonly is still 0, then
+ * check whether BKROGET says that it is read-only. E.g. read-only loop
+ * devices may be opened read-write but are read-only according to BLKROGET
+ */
+ if (real_readonly == 0 && (r = ioctl(fd, BLKROGET, &real_readonly)) < 0)
+ goto out;
+
+ r = ioctl(fd, BLKGETSIZE64, &real_size);
+ if (r >= 0) {
+ real_size >>= SECTOR_SHIFT;
+ goto out;
+ }
+ }
+out:
+ if (fd != -1)
+ close(fd);
+
+ switch (r) {
+ case 0:
+ if (readonly)
+ *readonly = real_readonly;
+ if (size)
+ *size = real_size;
+ break;
+ case -EBUSY:
+ log_err(cd, _("Cannot use device %s which is in use "
+ "(already mapped or mounted)."), device_path(device));
+ break;
+ case -EACCES:
+ log_err(cd, _("Cannot use device %s, permission denied."), device_path(device));
+ break;
+ default:
+ log_err(cd, _("Cannot get info about device %s."), device_path(device));
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+int device_check_access(struct crypt_device *cd,
+ struct device *device,
+ enum devcheck device_check)
+{
+ return device_info(cd, device, device_check, NULL, NULL);
+}
+
+static int device_internal_prepare(struct crypt_device *cd, struct device *device)
+{
+ char *loop_device = NULL, *file_path = NULL;
+ int r, loop_fd, readonly = 0;
+
+ if (device->init_done)
+ return 0;
+
+ if (getuid() || geteuid()) {
+ log_err(cd, _("Cannot use a loopback device, "
+ "running as non-root user."));
+ return -ENOTSUP;
+ }
+
+ log_dbg(cd, "Allocating a free loop device (block size: %zu).",
+ device->loop_block_size ?: SECTOR_SIZE);
+
+ /* Keep the loop open, detached on last close. */
+ loop_fd = crypt_loop_attach(&loop_device, device->path, 0, 1, &readonly, device->loop_block_size);
+ if (loop_fd == -1) {
+ log_err(cd, _("Attaching loopback device failed "
+ "(loop device with autoclear flag is required)."));
+ free(loop_device);
+ return -EINVAL;
+ }
+
+ file_path = device->path;
+ device->path = loop_device;
+
+ r = device_ready(cd, device);
+ if (r < 0) {
+ device->path = file_path;
+ crypt_loop_detach(loop_device);
+ free(loop_device);
+ return r;
+ }
+
+ log_dbg(cd, "Attached loop device block size is %zu bytes.", device_block_size_fd(loop_fd, NULL));
+
+ device->loop_fd = loop_fd;
+ device->file_path = file_path;
+ device->init_done = 1;
+
+ return 0;
+}
+
+int device_block_adjust(struct crypt_device *cd,
+ struct device *device,
+ enum devcheck device_check,
+ uint64_t device_offset,
+ uint64_t *size,
+ uint32_t *flags)
+{
+ int r, real_readonly;
+ uint64_t real_size;
+
+ if (!device)
+ return -ENOTBLK;
+
+ r = device_internal_prepare(cd, device);
+ if (r)
+ return r;
+
+ r = device_info(cd, device, device_check, &real_readonly, &real_size);
+ if (r)
+ return r;
+
+ if (device_offset >= real_size) {
+ log_err(cd, _("Requested offset is beyond real size of device %s."),
+ device_path(device));
+ return -EINVAL;
+ }
+
+ if (size && !*size) {
+ *size = real_size;
+ if (!*size) {
+ log_err(cd, _("Device %s has zero size."), device_path(device));
+ return -ENOTBLK;
+ }
+ *size -= device_offset;
+ }
+
+ /* in case of size is set by parameter */
+ if (size && ((real_size - device_offset) < *size)) {
+ log_dbg(cd, "Device %s: offset = %" PRIu64 " requested size = %" PRIu64
+ ", backing device size = %" PRIu64,
+ device->path, device_offset, *size, real_size);
+ log_err(cd, _("Device %s is too small."), device_path(device));
+ return -EINVAL;
+ }
+
+ if (flags && real_readonly)
+ *flags |= CRYPT_ACTIVATE_READONLY;
+
+ if (size)
+ log_dbg(cd, "Calculated device size is %" PRIu64" sectors (%s), offset %" PRIu64 ".",
+ *size, real_readonly ? "RO" : "RW", device_offset);
+ return 0;
+}
+
+size_t size_round_up(size_t size, size_t block)
+{
+ size_t s = (size + (block - 1)) / block;
+ return s * block;
+}
+
+void device_disable_direct_io(struct device *device)
+{
+ if (device)
+ device->o_direct = 0;
+}
+
+int device_direct_io(const struct device *device)
+{
+ return device ? device->o_direct : 0;
+}
+
+static int device_compare_path(const char *path1, const char *path2)
+{
+ struct stat st_path1, st_path2;
+
+ if (stat(path1, &st_path1 ) < 0 || stat(path2, &st_path2 ) < 0)
+ return -EINVAL;
+
+ if (S_ISBLK(st_path1.st_mode) && S_ISBLK(st_path2.st_mode))
+ return (st_path1.st_rdev == st_path2.st_rdev) ? 1 : 0;
+
+ if (S_ISREG(st_path1.st_mode) && S_ISREG(st_path2.st_mode))
+ return (st_path1.st_ino == st_path2.st_ino &&
+ st_path1.st_dev == st_path2.st_dev) ? 1 : 0;
+
+ return 0;
+}
+
+int device_is_identical(struct device *device1, struct device *device2)
+{
+ if (!device1 || !device2)
+ return 0;
+
+ if (device1 == device2)
+ return 1;
+
+ if (!strcmp(device_path(device1), device_path(device2)))
+ return 1;
+
+ return device_compare_path(device_path(device1), device_path(device2));
+}
+
+int device_is_rotational(struct device *device)
+{
+ struct stat st;
+
+ if (!device)
+ return -EINVAL;
+
+ if (stat(device_path(device), &st) < 0)
+ return -EINVAL;
+
+ if (!S_ISBLK(st.st_mode))
+ return 0;
+
+ return crypt_dev_is_rotational(major(st.st_rdev), minor(st.st_rdev));
+}
+
+size_t device_alignment(struct device *device)
+{
+ int devfd;
+
+ if (!device)
+ return -EINVAL;
+
+ if (!device->alignment) {
+ devfd = open(device_path(device), O_RDONLY);
+ if (devfd != -1) {
+ device->alignment = device_alignment_fd(devfd);
+ close(devfd);
+ }
+ }
+
+ return device->alignment;
+}
+
+void device_set_lock_handle(struct device *device, struct crypt_lock_handle *h)
+{
+ if (device)
+ device->lh = h;
+}
+
+struct crypt_lock_handle *device_get_lock_handle(struct device *device)
+{
+ return device ? device->lh : NULL;
+}
+
+int device_read_lock(struct crypt_device *cd, struct device *device)
+{
+ if (!device || !crypt_metadata_locking_enabled())
+ return 0;
+
+ if (device_read_lock_internal(cd, device))
+ return -EBUSY;
+
+ return 0;
+}
+
+int device_write_lock(struct crypt_device *cd, struct device *device)
+{
+ if (!device || !crypt_metadata_locking_enabled())
+ return 0;
+
+ assert(!device_locked(device->lh) || !device_locked_readonly(device->lh));
+
+ return device_write_lock_internal(cd, device);
+}
+
+void device_read_unlock(struct crypt_device *cd, struct device *device)
+{
+ if (!device || !crypt_metadata_locking_enabled())
+ return;
+
+ assert(device_locked(device->lh));
+
+ device_unlock_internal(cd, device);
+}
+
+void device_write_unlock(struct crypt_device *cd, struct device *device)
+{
+ if (!device || !crypt_metadata_locking_enabled())
+ return;
+
+ assert(device_locked(device->lh) && !device_locked_readonly(device->lh));
+
+ device_unlock_internal(cd, device);
+}
+
+bool device_is_locked(struct device *device)
+{
+ return device ? device_locked(device->lh) : 0;
+}
+
+void device_close(struct crypt_device *cd, struct device *device)
+{
+ if (!device)
+ return;
+
+ if (device->ro_dev_fd != -1) {
+ log_dbg(cd, "Closing read only fd for %s.", device_path(device));
+ if (close(device->ro_dev_fd))
+ log_dbg(cd, "Failed to close read only fd for %s.", device_path(device));
+ device->ro_dev_fd = -1;
+ }
+
+ if (device->dev_fd != -1) {
+ log_dbg(cd, "Closing read write fd for %s.", device_path(device));
+ if (close(device->dev_fd))
+ log_dbg(cd, "Failed to close read write fd for %s.", device_path(device));
+ device->dev_fd = -1;
+ }
+}
+
+void device_set_block_size(struct device *device, size_t size)
+{
+ if (!device)
+ return;
+
+ device->loop_block_size = size;
+}
diff --git a/lib/utils_device_locking.c b/lib/utils_device_locking.c
new file mode 100644
index 0000000..e18ea77
--- /dev/null
+++ b/lib/utils_device_locking.c
@@ -0,0 +1,520 @@
+/*
+ * Metadata on-disk locking for processes serialization
+ *
+ * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <linux/limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/file.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#ifdef HAVE_SYS_SYSMACROS_H
+# include <sys/sysmacros.h> /* for major, minor */
+#endif
+#include <libgen.h>
+
+#include "internal.h"
+#include "utils_device_locking.h"
+
+#define same_inode(buf1, buf2) \
+ ((buf1).st_ino == (buf2).st_ino && \
+ (buf1).st_dev == (buf2).st_dev)
+
+enum lock_type {
+ DEV_LOCK_READ = 0,
+ DEV_LOCK_WRITE
+};
+
+enum lock_mode {
+ DEV_LOCK_FILE = 0,
+ DEV_LOCK_BDEV,
+ DEV_LOCK_NAME
+};
+
+struct crypt_lock_handle {
+ unsigned refcnt;
+ int flock_fd;
+ enum lock_type type;
+ enum lock_mode mode;
+ union {
+ struct {
+ dev_t devno;
+ } bdev;
+ struct {
+ char *name;
+ } name;
+ } u;
+};
+
+static int resource_by_name(char *res, size_t res_size, const char *name, bool fullpath)
+{
+ int r;
+
+ if (fullpath)
+ r = snprintf(res, res_size, "%s/LN_%s", DEFAULT_LUKS2_LOCK_PATH, name);
+ else
+ r = snprintf(res, res_size, "LN_%s", name);
+
+ return (r < 0 || (size_t)r >= res_size) ? -EINVAL : 0;
+}
+
+static int resource_by_devno(char *res, size_t res_size, dev_t devno, unsigned fullpath)
+{
+ int r;
+
+ if (fullpath)
+ r = snprintf(res, res_size, "%s/L_%d:%d", DEFAULT_LUKS2_LOCK_PATH, major(devno), minor(devno));
+ else
+ r = snprintf(res, res_size, "L_%d:%d", major(devno), minor(devno));
+
+ return (r < 0 || (size_t)r >= res_size) ? -EINVAL : 0;
+}
+
+static int open_lock_dir(struct crypt_device *cd, const char *dir, const char *base)
+{
+ int dirfd, lockdfd;
+
+ dirfd = open(dir, O_RDONLY | O_DIRECTORY | O_CLOEXEC);
+ if (dirfd < 0) {
+ log_dbg(cd, "Failed to open directory %s: (%d: %s).", dir, errno, strerror(errno));
+ if (errno == ENOTDIR || errno == ENOENT)
+ log_err(cd, _("Locking aborted. The locking path %s/%s is unusable (not a directory or missing)."), dir, base);
+ return -EINVAL;
+ }
+
+ lockdfd = openat(dirfd, base, O_RDONLY | O_NOFOLLOW | O_DIRECTORY | O_CLOEXEC);
+ if (lockdfd < 0) {
+ if (errno == ENOENT) {
+ log_dbg(cd, "Locking directory %s/%s will be created with default compiled-in permissions.", dir, base);
+
+ /* success or failure w/ errno == EEXIST either way just try to open the 'base' directory again */
+ if (mkdirat(dirfd, base, DEFAULT_LUKS2_LOCK_DIR_PERMS) && errno != EEXIST)
+ log_dbg(cd, "Failed to create directory %s in %s (%d: %s).", base, dir, errno, strerror(errno));
+ else
+ lockdfd = openat(dirfd, base, O_RDONLY | O_NOFOLLOW | O_DIRECTORY | O_CLOEXEC);
+ } else {
+ log_dbg(cd, "Failed to open directory %s/%s: (%d: %s)", dir, base, errno, strerror(errno));
+ if (errno == ENOTDIR || errno == ELOOP)
+ log_err(cd, _("Locking aborted. The locking path %s/%s is unusable (%s is not a directory)."), dir, base, base);
+ }
+ }
+
+ close(dirfd);
+ return lockdfd >= 0 ? lockdfd : -EINVAL;
+}
+
+static int open_resource(struct crypt_device *cd, const char *res)
+{
+ int err, lockdir_fd, r;
+ char dir[] = DEFAULT_LUKS2_LOCK_PATH,
+ base[] = DEFAULT_LUKS2_LOCK_PATH;
+
+ lockdir_fd = open_lock_dir(cd, dirname(dir), basename(base));
+ if (lockdir_fd < 0)
+ return -EINVAL;
+
+ log_dbg(cd, "Opening lock resource file %s/%s", DEFAULT_LUKS2_LOCK_PATH, res);
+ r = openat(lockdir_fd, res, O_CREAT | O_NOFOLLOW | O_RDWR | O_CLOEXEC, 0777);
+ err = errno;
+
+ close(lockdir_fd);
+
+ return r < 0 ? -err : r;
+}
+
+static int acquire_lock_handle(struct crypt_device *cd, struct device *device, struct crypt_lock_handle *h)
+{
+ char res[PATH_MAX];
+ int dev_fd, fd;
+ struct stat st;
+
+ dev_fd = open(device_path(device), O_RDONLY | O_NONBLOCK | O_CLOEXEC);
+ if (dev_fd < 0)
+ return -EINVAL;
+
+ if (fstat(dev_fd, &st)) {
+ close(dev_fd);
+ return -EINVAL;
+ }
+
+ if (S_ISBLK(st.st_mode)) {
+ if (resource_by_devno(res, sizeof(res), st.st_rdev, 0)) {
+ close(dev_fd);
+ return -EINVAL;
+ }
+
+ fd = open_resource(cd, res);
+ close(dev_fd);
+ if (fd < 0)
+ return fd;
+
+ h->flock_fd = fd;
+ h->u.bdev.devno = st.st_rdev;
+ h->mode = DEV_LOCK_BDEV;
+ } else if (S_ISREG(st.st_mode)) {
+ /* workaround for nfsv4 */
+ fd = open(device_path(device), O_RDWR | O_NONBLOCK | O_CLOEXEC);
+ if (fd < 0)
+ h->flock_fd = dev_fd;
+ else {
+ h->flock_fd = fd;
+ close(dev_fd);
+ }
+ h->mode = DEV_LOCK_FILE;
+ } else {
+ /* Wrong device type */
+ close(dev_fd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int acquire_lock_handle_by_name(struct crypt_device *cd, const char *name, struct crypt_lock_handle *h)
+{
+ char res[PATH_MAX];
+ int fd;
+
+ h->u.name.name = strdup(name);
+ if (!h->u.name.name)
+ return -ENOMEM;
+
+ if (resource_by_name(res, sizeof(res), name, false)) {
+ free(h->u.name.name);
+ return -EINVAL;
+ }
+
+ fd = open_resource(cd, res);
+ if (fd < 0) {
+ free(h->u.name.name);
+ return fd;
+ }
+
+ h->flock_fd = fd;
+ h->mode = DEV_LOCK_NAME;
+
+ return 0;
+}
+
+static void release_lock_handle(struct crypt_device *cd, struct crypt_lock_handle *h)
+{
+ char res[PATH_MAX];
+ struct stat buf_a, buf_b;
+
+ if ((h->mode == DEV_LOCK_NAME) && /* was it name lock */
+ !flock(h->flock_fd, LOCK_EX | LOCK_NB) && /* lock to drop the file */
+ !resource_by_name(res, sizeof(res), h->u.name.name, true) && /* acquire lock resource name */
+ !fstat(h->flock_fd, &buf_a) && /* read inode id referred by fd */
+ !stat(res, &buf_b) && /* does path file still exist? */
+ same_inode(buf_a, buf_b)) { /* is it same id as the one referenced by fd? */
+ /* coverity[toctou] */
+ if (unlink(res)) /* yes? unlink the file. lgtm[cpp/toctou-race-condition] */
+ log_dbg(cd, "Failed to unlink resource file: %s", res);
+ }
+
+ if ((h->mode == DEV_LOCK_BDEV) && /* was it block device */
+ !flock(h->flock_fd, LOCK_EX | LOCK_NB) && /* lock to drop the file */
+ !resource_by_devno(res, sizeof(res), h->u.bdev.devno, 1) && /* acquire lock resource name */
+ !fstat(h->flock_fd, &buf_a) && /* read inode id referred by fd */
+ !stat(res, &buf_b) && /* does path file still exist? */
+ same_inode(buf_a, buf_b)) { /* is it same id as the one referenced by fd? */
+ /* coverity[toctou] */
+ if (unlink(res)) /* yes? unlink the file. lgtm[cpp/toctou-race-condition] */
+ log_dbg(cd, "Failed to unlink resource file: %s", res);
+ }
+
+ if (h->mode == DEV_LOCK_NAME)
+ free(h->u.name.name);
+
+ if (close(h->flock_fd))
+ log_dbg(cd, "Failed to close lock resource fd (%d).", h->flock_fd);
+}
+
+int device_locked(struct crypt_lock_handle *h)
+{
+ return (h && (h->type == DEV_LOCK_READ || h->type == DEV_LOCK_WRITE));
+}
+
+int device_locked_readonly(struct crypt_lock_handle *h)
+{
+ return (h && h->type == DEV_LOCK_READ);
+}
+
+static int verify_lock_handle(struct crypt_lock_handle *h)
+{
+ char res[PATH_MAX];
+ struct stat lck_st, res_st;
+
+ /* we locked a regular file, check during device_open() instead. No reason to check now */
+ if (h->mode == DEV_LOCK_FILE)
+ return 0;
+
+ if (h->mode == DEV_LOCK_NAME) {
+ if (resource_by_name(res, sizeof(res), h->u.name.name, true))
+ return -EINVAL;
+ } else if (h->mode == DEV_LOCK_BDEV) {
+ if (resource_by_devno(res, sizeof(res), h->u.bdev.devno, true))
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ if (fstat(h->flock_fd, &lck_st))
+ return -EINVAL;
+
+ return (stat(res, &res_st) || !same_inode(lck_st, res_st)) ? -EAGAIN : 0;
+}
+
+static unsigned device_lock_inc(struct crypt_lock_handle *h)
+{
+ return ++h->refcnt;
+}
+
+static unsigned device_lock_dec(struct crypt_lock_handle *h)
+{
+ assert(h->refcnt);
+
+ return --h->refcnt;
+}
+
+static int acquire_and_verify(struct crypt_device *cd, struct device *device, const char *resource, int flock_op, struct crypt_lock_handle **lock)
+{
+ int r;
+ struct crypt_lock_handle *h;
+
+ if (device && resource)
+ return -EINVAL;
+
+ if (!(h = malloc(sizeof(*h))))
+ return -ENOMEM;
+
+ do {
+ r = device ? acquire_lock_handle(cd, device, h) : acquire_lock_handle_by_name(cd, resource, h);
+ if (r < 0)
+ break;
+
+ if (flock(h->flock_fd, flock_op)) {
+ log_dbg(cd, "Flock on fd %d failed with errno %d.", h->flock_fd, errno);
+ r = (errno == EWOULDBLOCK) ? -EBUSY : -EINVAL;
+ release_lock_handle(cd, h);
+ break;
+ }
+
+ log_dbg(cd, "Verifying lock handle for %s.", device ? device_path(device) : resource);
+
+ /*
+ * check whether another libcryptsetup process removed resource file before this
+ * one managed to flock() it. See release_lock_handle() for details
+ */
+ r = verify_lock_handle(h);
+ if (r < 0) {
+ if (flock(h->flock_fd, LOCK_UN))
+ log_dbg(cd, "flock on fd %d failed.", h->flock_fd);
+ release_lock_handle(cd, h);
+ log_dbg(cd, "Lock handle verification failed.");
+ }
+ } while (r == -EAGAIN);
+
+ if (r < 0) {
+ free(h);
+ return r;
+ }
+
+ *lock = h;
+
+ return 0;
+}
+
+int device_read_lock_internal(struct crypt_device *cd, struct device *device)
+{
+ int r;
+ struct crypt_lock_handle *h;
+
+ if (!device)
+ return -EINVAL;
+
+ h = device_get_lock_handle(device);
+
+ if (device_locked(h)) {
+ device_lock_inc(h);
+ log_dbg(cd, "Device %s READ lock (or higher) already held.", device_path(device));
+ return 0;
+ }
+
+ log_dbg(cd, "Acquiring read lock for device %s.", device_path(device));
+
+ r = acquire_and_verify(cd, device, NULL, LOCK_SH, &h);
+ if (r < 0)
+ return r;
+
+ h->type = DEV_LOCK_READ;
+ h->refcnt = 1;
+ device_set_lock_handle(device, h);
+
+ log_dbg(cd, "Device %s READ lock taken.", device_path(device));
+
+ return 0;
+}
+
+int device_write_lock_internal(struct crypt_device *cd, struct device *device)
+{
+ int r;
+ struct crypt_lock_handle *h;
+
+ if (!device)
+ return -EINVAL;
+
+ h = device_get_lock_handle(device);
+
+ if (device_locked(h)) {
+ log_dbg(cd, "Device %s WRITE lock already held.", device_path(device));
+ return device_lock_inc(h);
+ }
+
+ log_dbg(cd, "Acquiring write lock for device %s.", device_path(device));
+
+ r = acquire_and_verify(cd, device, NULL, LOCK_EX, &h);
+ if (r < 0)
+ return r;
+
+ h->type = DEV_LOCK_WRITE;
+ h->refcnt = 1;
+ device_set_lock_handle(device, h);
+
+ log_dbg(cd, "Device %s WRITE lock taken.", device_path(device));
+
+ return 1;
+}
+
+int crypt_read_lock(struct crypt_device *cd, const char *resource, bool blocking, struct crypt_lock_handle **lock)
+{
+ int r;
+ struct crypt_lock_handle *h;
+
+ if (!resource)
+ return -EINVAL;
+
+ log_dbg(cd, "Acquiring %sblocking read lock for resource %s.", blocking ? "" : "non", resource);
+
+ r = acquire_and_verify(cd, NULL, resource, LOCK_SH | (blocking ? 0 : LOCK_NB), &h);
+ if (r < 0)
+ return r;
+
+ h->type = DEV_LOCK_READ;
+ h->refcnt = 1;
+
+ log_dbg(cd, "READ lock for resource %s taken.", resource);
+
+ *lock = h;
+
+ return 0;
+}
+
+int crypt_write_lock(struct crypt_device *cd, const char *resource, bool blocking, struct crypt_lock_handle **lock)
+{
+ int r;
+ struct crypt_lock_handle *h;
+
+ if (!resource)
+ return -EINVAL;
+
+ log_dbg(cd, "Acquiring %sblocking write lock for resource %s.", blocking ? "" : "non", resource);
+
+ r = acquire_and_verify(cd, NULL, resource, LOCK_EX | (blocking ? 0 : LOCK_NB), &h);
+ if (r < 0)
+ return r;
+
+ h->type = DEV_LOCK_WRITE;
+ h->refcnt = 1;
+
+ log_dbg(cd, "WRITE lock for resource %s taken.", resource);
+
+ *lock = h;
+
+ return 0;
+}
+
+static void unlock_internal(struct crypt_device *cd, struct crypt_lock_handle *h)
+{
+ if (flock(h->flock_fd, LOCK_UN))
+ log_dbg(cd, "flock on fd %d failed.", h->flock_fd);
+ release_lock_handle(cd, h);
+ free(h);
+}
+
+void crypt_unlock_internal(struct crypt_device *cd, struct crypt_lock_handle *h)
+{
+ if (!h)
+ return;
+
+ /* nested locks are illegal */
+ assert(!device_lock_dec(h));
+
+ log_dbg(cd, "Unlocking %s lock for resource %s.",
+ device_locked_readonly(h) ? "READ" : "WRITE", h->u.name.name);
+
+ unlock_internal(cd, h);
+}
+
+void device_unlock_internal(struct crypt_device *cd, struct device *device)
+{
+ bool readonly;
+ struct crypt_lock_handle *h = device_get_lock_handle(device);
+ unsigned u = device_lock_dec(h);
+
+ if (u)
+ return;
+
+ readonly = device_locked_readonly(h);
+
+ unlock_internal(cd, h);
+
+ log_dbg(cd, "Device %s %s lock released.", device_path(device),
+ readonly ? "READ" : "WRITE");
+
+ device_set_lock_handle(device, NULL);
+}
+
+int device_locked_verify(struct crypt_device *cd, int dev_fd, struct crypt_lock_handle *h)
+{
+ char res[PATH_MAX];
+ struct stat dev_st, lck_st, st;
+
+ if (fstat(dev_fd, &dev_st) || fstat(h->flock_fd, &lck_st))
+ return 1;
+
+ /* if device handle is regular file the handle must match the lock handle */
+ if (S_ISREG(dev_st.st_mode)) {
+ log_dbg(cd, "Verifying locked device handle (regular file)");
+ if (!same_inode(dev_st, lck_st))
+ return 1;
+ } else if (S_ISBLK(dev_st.st_mode)) {
+ log_dbg(cd, "Verifying locked device handle (bdev)");
+ if (resource_by_devno(res, sizeof(res), dev_st.st_rdev, 1) ||
+ stat(res, &st) ||
+ !same_inode(lck_st, st))
+ return 1;
+ } else
+ return 1;
+
+ return 0;
+}
diff --git a/lib/utils_device_locking.h b/lib/utils_device_locking.h
new file mode 100644
index 0000000..b73f15d
--- /dev/null
+++ b/lib/utils_device_locking.h
@@ -0,0 +1,49 @@
+/*
+ * Metadata on-disk locking for processes serialization
+ *
+ * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTSETUP_UTILS_LOCKING_H
+#define _CRYPTSETUP_UTILS_LOCKING_H
+
+#include <stdbool.h>
+
+struct crypt_device;
+struct crypt_lock_handle;
+struct device;
+
+int device_locked_readonly(struct crypt_lock_handle *h);
+int device_locked(struct crypt_lock_handle *h);
+
+int device_read_lock_internal(struct crypt_device *cd, struct device *device);
+int device_write_lock_internal(struct crypt_device *cd, struct device *device);
+void device_unlock_internal(struct crypt_device *cd, struct device *device);
+
+int device_locked_verify(struct crypt_device *cd, int fd, struct crypt_lock_handle *h);
+
+int crypt_read_lock(struct crypt_device *cd, const char *name, bool blocking, struct crypt_lock_handle **lock);
+int crypt_write_lock(struct crypt_device *cd, const char *name, bool blocking, struct crypt_lock_handle **lock);
+void crypt_unlock_internal(struct crypt_device *cd, struct crypt_lock_handle *h);
+
+
+/* Used only in device internal allocation */
+void device_set_lock_handle(struct device *device, struct crypt_lock_handle *h);
+struct crypt_lock_handle *device_get_lock_handle(struct device *device);
+
+#endif
diff --git a/lib/utils_devpath.c b/lib/utils_devpath.c
new file mode 100644
index 0000000..dc5a5bb
--- /dev/null
+++ b/lib/utils_devpath.c
@@ -0,0 +1,459 @@
+/*
+ * devname - search for device name
+ *
+ * Copyright (C) 2004 Jana Saout <jana@saout.de>
+ * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <errno.h>
+#include <limits.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#ifdef HAVE_SYS_SYSMACROS_H
+# include <sys/sysmacros.h> /* for major, minor */
+#endif
+#include "internal.h"
+
+static char *__lookup_dev(char *path, dev_t dev, int dir_level, const int max_level)
+{
+ struct dirent *entry;
+ struct stat st;
+ char *ptr;
+ char *result = NULL;
+ DIR *dir;
+ int space;
+
+ /* Ignore strange nested directories */
+ if (dir_level > max_level)
+ return NULL;
+
+ path[PATH_MAX - 1] = '\0';
+ ptr = path + strlen(path);
+ *ptr++ = '/';
+ *ptr = '\0';
+ space = PATH_MAX - (ptr - path);
+
+ dir = opendir(path);
+ if (!dir)
+ return NULL;
+
+ while((entry = readdir(dir))) {
+ if (entry->d_name[0] == '.' ||
+ !strncmp(entry->d_name, "..", 2))
+ continue;
+
+ if (dir_level == 0 &&
+ (!strcmp(entry->d_name, "shm") ||
+ !strcmp(entry->d_name, "fd") ||
+ !strcmp(entry->d_name, "char") ||
+ !strcmp(entry->d_name, "pts")))
+ continue;
+
+ strncpy(ptr, entry->d_name, space);
+ if (stat(path, &st) < 0)
+ continue;
+
+ if (S_ISDIR(st.st_mode)) {
+ result = __lookup_dev(path, dev, dir_level + 1, max_level);
+ if (result)
+ break;
+ } else if (S_ISBLK(st.st_mode)) {
+ /* workaround: ignore dm-X devices, these are internal kernel names */
+ if (dir_level == 0 && dm_is_dm_kernel_name(entry->d_name))
+ continue;
+ if (st.st_rdev == dev) {
+ result = strdup(path);
+ break;
+ }
+ }
+ }
+
+ closedir(dir);
+ return result;
+}
+
+/*
+ * Non-udev systemd need to scan for device here.
+ */
+static char *lookup_dev_old(int major, int minor)
+{
+ dev_t dev;
+ char *result = NULL, buf[PATH_MAX + 1];
+
+ dev = makedev(major, minor);
+ strncpy(buf, "/dev", PATH_MAX);
+ buf[PATH_MAX] = '\0';
+
+ /* First try low level device */
+ if ((result = __lookup_dev(buf, dev, 0, 0)))
+ return result;
+
+ /* If it is dm, try DM dir */
+ if (dm_is_dm_device(major)) {
+ strncpy(buf, dm_get_dir(), PATH_MAX);
+ if ((result = __lookup_dev(buf, dev, 0, 0)))
+ return result;
+ }
+
+ strncpy(buf, "/dev", PATH_MAX);
+ return __lookup_dev(buf, dev, 0, 4);
+}
+
+/*
+ * Returns string pointing to device in /dev according to "major:minor" dev_id
+ */
+char *crypt_lookup_dev(const char *dev_id)
+{
+ int major, minor;
+ char link[PATH_MAX], path[PATH_MAX], *devname, *devpath = NULL;
+ struct stat st;
+ ssize_t len;
+
+ if (sscanf(dev_id, "%d:%d", &major, &minor) != 2)
+ return NULL;
+
+ if (snprintf(path, sizeof(path), "/sys/dev/block/%s", dev_id) < 0)
+ return NULL;
+
+ len = readlink(path, link, sizeof(link) - 1);
+ if (len < 0) {
+ /* Without /sys use old scan */
+ if (stat("/sys/dev/block", &st) < 0)
+ return lookup_dev_old(major, minor);
+ return NULL;
+ }
+
+ link[len] = '\0';
+ devname = strrchr(link, '/');
+ if (!devname)
+ return NULL;
+ devname++;
+
+ if (dm_is_dm_kernel_name(devname))
+ devpath = dm_device_path("/dev/mapper/", major, minor);
+ else if (snprintf(path, sizeof(path), "/dev/%s", devname) > 0)
+ devpath = strdup(path);
+
+ /*
+ * Check that path is correct.
+ */
+ if (devpath && ((stat(devpath, &st) < 0) ||
+ !S_ISBLK(st.st_mode) ||
+ (st.st_rdev != makedev(major, minor)))) {
+ free(devpath);
+ /* Should never happen unless user mangles with dev nodes. */
+ return lookup_dev_old(major, minor);
+ }
+
+ return devpath;
+}
+
+static int _read_uint64(const char *sysfs_path, uint64_t *value)
+{
+ char tmp[64] = {0};
+ int fd, r;
+
+ if ((fd = open(sysfs_path, O_RDONLY)) < 0)
+ return 0;
+ r = read(fd, tmp, sizeof(tmp));
+ close(fd);
+
+ if (r <= 0)
+ return 0;
+
+ if (sscanf(tmp, "%" PRIu64, value) != 1)
+ return 0;
+
+ return 1;
+}
+
+static int _sysfs_get_uint64(int major, int minor, uint64_t *value, const char *attr)
+{
+ char path[PATH_MAX];
+
+ if (snprintf(path, sizeof(path), "/sys/dev/block/%d:%d/%s",
+ major, minor, attr) < 0)
+ return 0;
+
+ return _read_uint64(path, value);
+}
+
+static int _path_get_uint64(const char *sysfs_path, uint64_t *value, const char *attr)
+{
+ char path[PATH_MAX];
+
+ if (snprintf(path, sizeof(path), "%s/%s",
+ sysfs_path, attr) < 0)
+ return 0;
+
+ return _read_uint64(path, value);
+}
+
+int crypt_dev_is_rotational(int major, int minor)
+{
+ uint64_t val;
+
+ if (!_sysfs_get_uint64(major, minor, &val, "queue/rotational"))
+ return 1; /* if failed, expect rotational disk */
+
+ return val ? 1 : 0;
+}
+
+int crypt_dev_is_partition(const char *dev_path)
+{
+ uint64_t val;
+ struct stat st;
+
+ if (stat(dev_path, &st) < 0)
+ return 0;
+
+ if (!S_ISBLK(st.st_mode))
+ return 0;
+
+ if (!_sysfs_get_uint64(major(st.st_rdev), minor(st.st_rdev),
+ &val, "partition"))
+ return 0;
+
+ return val ? 1 : 0;
+}
+
+uint64_t crypt_dev_partition_offset(const char *dev_path)
+{
+ uint64_t val;
+ struct stat st;
+
+ if (!crypt_dev_is_partition(dev_path))
+ return 0;
+
+ if (stat(dev_path, &st) < 0)
+ return 0;
+
+ if (!_sysfs_get_uint64(major(st.st_rdev), minor(st.st_rdev),
+ &val, "start"))
+ return 0;
+
+ return val;
+}
+
+/* Try to find partition which match offset and size on top level device */
+char *crypt_get_partition_device(const char *dev_path, uint64_t offset, uint64_t size)
+{
+ char link[PATH_MAX], path[PATH_MAX], part_path[PATH_MAX], *devname;
+ char *result = NULL;
+ struct stat st;
+ size_t devname_len;
+ ssize_t len;
+ struct dirent *entry;
+ DIR *dir;
+ uint64_t part_offset, part_size;
+
+ if (stat(dev_path, &st) < 0)
+ return NULL;
+
+ if (!S_ISBLK(st.st_mode))
+ return NULL;
+
+ if (snprintf(path, sizeof(path), "/sys/dev/block/%d:%d",
+ major(st.st_rdev), minor(st.st_rdev)) < 0)
+ return NULL;
+
+ dir = opendir(path);
+ if (!dir)
+ return NULL;
+
+ len = readlink(path, link, sizeof(link) - 1);
+ if (len < 0) {
+ closedir(dir);
+ return NULL;
+ }
+
+ /* Get top level disk name for sysfs search */
+ link[len] = '\0';
+ devname = strrchr(link, '/');
+ if (!devname) {
+ closedir(dir);
+ return NULL;
+ }
+ devname++;
+
+ /* DM devices do not use kernel partitions. */
+ if (dm_is_dm_kernel_name(devname)) {
+ closedir(dir);
+ return NULL;
+ }
+
+ devname_len = strlen(devname);
+ while((entry = readdir(dir))) {
+ if (strncmp(entry->d_name, devname, devname_len))
+ continue;
+
+ if (snprintf(part_path, sizeof(part_path), "%s/%s",
+ path, entry->d_name) < 0)
+ continue;
+
+ if (stat(part_path, &st) < 0)
+ continue;
+
+ if (S_ISDIR(st.st_mode)) {
+ if (!_path_get_uint64(part_path, &part_offset, "start") ||
+ !_path_get_uint64(part_path, &part_size, "size"))
+ continue;
+ if (part_offset == offset && part_size == size &&
+ snprintf(part_path, sizeof(part_path), "/dev/%s",
+ entry->d_name) > 0) {
+ result = strdup(part_path);
+ break;
+ }
+ }
+ }
+ closedir(dir);
+
+ return result;
+}
+
+/* Try to find base device from partition */
+char *crypt_get_base_device(const char *dev_path)
+{
+ char link[PATH_MAX], path[PATH_MAX], part_path[PATH_MAX], *devname;
+ struct stat st;
+ ssize_t len;
+
+ if (!crypt_dev_is_partition(dev_path))
+ return NULL;
+
+ if (stat(dev_path, &st) < 0)
+ return NULL;
+
+ if (snprintf(path, sizeof(path), "/sys/dev/block/%d:%d",
+ major(st.st_rdev), minor(st.st_rdev)) < 0)
+ return NULL;
+
+ len = readlink(path, link, sizeof(link) - 1);
+ if (len < 0)
+ return NULL;
+
+ /* Get top level disk name for sysfs search */
+ link[len] = '\0';
+ devname = strrchr(link, '/');
+ if (!devname)
+ return NULL;
+ *devname = '\0';
+ devname = strrchr(link, '/');
+ if (!devname)
+ return NULL;
+ devname++;
+
+ if (dm_is_dm_kernel_name(devname))
+ return NULL;
+
+ if (snprintf(part_path, sizeof(part_path), "/dev/%s", devname) < 0)
+ return NULL;
+
+ return strdup(part_path);
+}
+
+int lookup_by_disk_id(const char *dm_uuid)
+{
+ struct dirent *entry;
+ struct stat st;
+ int r = 0; /* not found */
+ DIR *dir = opendir("/dev/disk/by-id");
+
+ if (!dir)
+ /* map ENOTDIR to ENOENT we'll handle both errors same */
+ return errno == ENOTDIR ? -ENOENT : -errno;
+
+ while ((entry = readdir(dir))) {
+ if (entry->d_name[0] == '.' ||
+ !strncmp(entry->d_name, "..", 2))
+ continue;
+
+ if (fstatat(dirfd(dir), entry->d_name, &st, AT_SYMLINK_NOFOLLOW)) {
+ r = -EINVAL;
+ break;
+ }
+
+ if (!S_ISREG(st.st_mode) && !S_ISLNK(st.st_mode))
+ continue;
+
+ if (!strncmp(entry->d_name, dm_uuid, strlen(dm_uuid))) {
+ r = 1;
+ break;
+ }
+ }
+
+ closedir(dir);
+
+ return r;
+}
+
+int lookup_by_sysfs_uuid_field(const char *dm_uuid)
+{
+ struct dirent *entry;
+ char subpath[PATH_MAX], uuid[DM_UUID_LEN];
+ ssize_t s;
+ struct stat st;
+ int fd, len, r = 0; /* not found */
+ DIR *dir = opendir("/sys/block/");
+
+ if (!dir)
+ /* map ENOTDIR to ENOENT we'll handle both errors same */
+ return errno == ENOTDIR ? -ENOENT : -errno;
+
+ while (r != 1 && (entry = readdir(dir))) {
+ if (entry->d_name[0] == '.' ||
+ !strncmp(entry->d_name, "..", 2))
+ continue;
+
+ len = snprintf(subpath, PATH_MAX, "%s/%s", entry->d_name, "dm/uuid");
+ if (len < 0 || len >= PATH_MAX) {
+ r = -EINVAL;
+ break;
+ }
+
+ /* looking for dm-X/dm/uuid file, symlinks are fine */
+ fd = openat(dirfd(dir), subpath, O_RDONLY | O_CLOEXEC);
+ if (fd < 0)
+ continue;
+
+ if (fstat(fd, &st) || !S_ISREG(st.st_mode)) {
+ close(fd);
+ continue;
+ }
+
+ /* reads binary data */
+ s = read_buffer(fd, uuid, sizeof(uuid) - 1);
+ if (s > 0) {
+ uuid[s] = '\0';
+ if (!strncmp(uuid, dm_uuid, strlen(dm_uuid)))
+ r = 1;
+ }
+
+ close(fd);
+ }
+
+ closedir(dir);
+
+ return r;
+}
diff --git a/lib/utils_dm.h b/lib/utils_dm.h
new file mode 100644
index 0000000..79212a2
--- /dev/null
+++ b/lib/utils_dm.h
@@ -0,0 +1,246 @@
+/*
+ * libdevmapper - device-mapper backend for cryptsetup
+ *
+ * Copyright (C) 2004 Jana Saout <jana@saout.de>
+ * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _UTILS_DM_H
+#define _UTILS_DM_H
+
+/* device-mapper library helpers */
+#include <stddef.h>
+#include <stdint.h>
+
+struct crypt_device;
+struct volume_key;
+struct crypt_params_verity;
+struct device;
+struct crypt_params_integrity;
+
+/* Device mapper internal flags */
+#define DM_RESUME_PRIVATE (1 << 4) /* CRYPT_ACTIVATE_PRIVATE */
+#define DM_SUSPEND_SKIP_LOCKFS (1 << 5)
+#define DM_SUSPEND_WIPE_KEY (1 << 6)
+#define DM_SUSPEND_NOFLUSH (1 << 7)
+
+static inline uint32_t act2dmflags(uint32_t act_flags)
+{
+ return (act_flags & DM_RESUME_PRIVATE);
+}
+
+/* Device mapper backend - kernel support flags */
+#define DM_KEY_WIPE_SUPPORTED (1 << 0) /* key wipe message */
+#define DM_LMK_SUPPORTED (1 << 1) /* lmk mode */
+#define DM_SECURE_SUPPORTED (1 << 2) /* wipe (secure) buffer flag */
+#define DM_PLAIN64_SUPPORTED (1 << 3) /* plain64 IV */
+#define DM_DISCARDS_SUPPORTED (1 << 4) /* discards/TRIM option is supported */
+#define DM_VERITY_SUPPORTED (1 << 5) /* dm-verity target supported */
+#define DM_TCW_SUPPORTED (1 << 6) /* tcw (TCRYPT CBC with whitening) */
+#define DM_SAME_CPU_CRYPT_SUPPORTED (1 << 7) /* same_cpu_crypt */
+#define DM_SUBMIT_FROM_CRYPT_CPUS_SUPPORTED (1 << 8) /* submit_from_crypt_cpus */
+#define DM_VERITY_ON_CORRUPTION_SUPPORTED (1 << 9) /* ignore/restart_on_corruption, ignore_zero_block */
+#define DM_VERITY_FEC_SUPPORTED (1 << 10) /* Forward Error Correction (FEC) */
+#define DM_KERNEL_KEYRING_SUPPORTED (1 << 11) /* dm-crypt allows loading kernel keyring keys */
+#define DM_INTEGRITY_SUPPORTED (1 << 12) /* dm-integrity target supported */
+#define DM_SECTOR_SIZE_SUPPORTED (1 << 13) /* support for sector size setting in dm-crypt/dm-integrity */
+#define DM_CAPI_STRING_SUPPORTED (1 << 14) /* support for cryptoapi format cipher definition */
+#define DM_DEFERRED_SUPPORTED (1 << 15) /* deferred removal of device */
+#define DM_INTEGRITY_RECALC_SUPPORTED (1 << 16) /* dm-integrity automatic recalculation supported */
+#define DM_INTEGRITY_BITMAP_SUPPORTED (1 << 17) /* dm-integrity bitmap mode supported */
+#define DM_GET_TARGET_VERSION_SUPPORTED (1 << 18) /* dm DM_GET_TARGET version ioctl supported */
+#define DM_INTEGRITY_FIX_PADDING_SUPPORTED (1 << 19) /* supports the parameter fix_padding that fixes a bug that caused excessive padding */
+#define DM_BITLK_EBOIV_SUPPORTED (1 << 20) /* EBOIV for BITLK supported */
+#define DM_BITLK_ELEPHANT_SUPPORTED (1 << 21) /* Elephant diffuser for BITLK supported */
+#define DM_VERITY_SIGNATURE_SUPPORTED (1 << 22) /* Verity option root_hash_sig_key_desc supported */
+#define DM_INTEGRITY_DISCARDS_SUPPORTED (1 << 23) /* dm-integrity discards/TRIM option is supported */
+#define DM_INTEGRITY_RESIZE_SUPPORTED (1 << 23) /* dm-integrity resize of the integrity device supported (introduced in the same version as discards)*/
+#define DM_VERITY_PANIC_CORRUPTION_SUPPORTED (1 << 24) /* dm-verity panic on corruption */
+#define DM_CRYPT_NO_WORKQUEUE_SUPPORTED (1 << 25) /* dm-crypt suppot for bypassing workqueues */
+#define DM_INTEGRITY_FIX_HMAC_SUPPORTED (1 << 26) /* hmac covers also superblock */
+#define DM_INTEGRITY_RESET_RECALC_SUPPORTED (1 << 27) /* dm-integrity automatic recalculation supported */
+#define DM_VERITY_TASKLETS_SUPPORTED (1 << 28) /* dm-verity tasklets supported */
+
+typedef enum { DM_CRYPT = 0, DM_VERITY, DM_INTEGRITY, DM_LINEAR, DM_ERROR, DM_ZERO, DM_UNKNOWN } dm_target_type;
+enum tdirection { TARGET_EMPTY = 0, TARGET_SET, TARGET_QUERY };
+
+int dm_flags(struct crypt_device *cd, dm_target_type target, uint32_t *flags);
+
+#define DM_ACTIVE_DEVICE (1 << 0)
+#define DM_ACTIVE_UUID (1 << 1)
+#define DM_ACTIVE_HOLDERS (1 << 2)
+
+#define DM_ACTIVE_CRYPT_CIPHER (1 << 3)
+#define DM_ACTIVE_CRYPT_KEYSIZE (1 << 4)
+#define DM_ACTIVE_CRYPT_KEY (1 << 5)
+
+#define DM_ACTIVE_VERITY_ROOT_HASH (1 << 6)
+#define DM_ACTIVE_VERITY_HASH_DEVICE (1 << 7)
+#define DM_ACTIVE_VERITY_PARAMS (1 << 8)
+
+#define DM_ACTIVE_INTEGRITY_PARAMS (1 << 9)
+
+#define DM_ACTIVE_JOURNAL_CRYPT_KEY (1 << 10)
+#define DM_ACTIVE_JOURNAL_CRYPT_KEYSIZE (1 << 11)
+
+#define DM_ACTIVE_JOURNAL_MAC_KEY (1 << 12)
+#define DM_ACTIVE_JOURNAL_MAC_KEYSIZE (1 << 13)
+
+struct dm_target {
+ dm_target_type type;
+ enum tdirection direction;
+ uint64_t offset;
+ uint64_t size;
+ struct device *data_device;
+ union {
+ struct {
+ const char *cipher;
+ const char *integrity;
+
+ /* Active key for device */
+ struct volume_key *vk;
+
+ /* struct crypt_active_device */
+ uint64_t offset; /* offset in sectors */
+ uint64_t iv_offset; /* IV initialisation sector */
+ uint32_t tag_size; /* additional on-disk tag size */
+ uint32_t sector_size; /* encryption sector size */
+ } crypt;
+ struct {
+ struct device *hash_device;
+ struct device *fec_device;
+
+ const char *root_hash;
+ uint32_t root_hash_size;
+ const char *root_hash_sig_key_desc;
+
+ uint64_t hash_offset; /* hash offset in blocks (not header) */
+ uint64_t fec_offset; /* FEC offset in blocks (not header) */
+ uint64_t fec_blocks; /* FEC blocks covering data + hash + padding (foreign metadata)*/
+ struct crypt_params_verity *vp;
+ } verity;
+ struct {
+ uint64_t journal_size;
+ uint32_t journal_watermark;
+ uint32_t journal_commit_time;
+ uint32_t interleave_sectors;
+ uint32_t tag_size;
+ uint64_t offset; /* offset in sectors */
+ uint32_t sector_size; /* integrity sector size */
+ uint32_t buffer_sectors;
+
+ const char *integrity;
+ /* Active key for device */
+ struct volume_key *vk;
+
+ const char *journal_integrity;
+ struct volume_key *journal_integrity_key;
+
+ const char *journal_crypt;
+ struct volume_key *journal_crypt_key;
+
+ struct device *meta_device;
+
+ bool fix_padding;
+ bool fix_hmac;
+ bool legacy_recalc;
+ } integrity;
+ struct {
+ uint64_t offset;
+ } linear;
+ struct {
+ } zero;
+ } u;
+
+ char *params;
+ struct dm_target *next;
+};
+
+struct crypt_dm_active_device {
+ uint64_t size; /* active device size */
+ uint32_t flags; /* activation flags */
+ const char *uuid;
+
+ unsigned holders:1; /* device holders detected (on query only) */
+
+ struct dm_target segment;
+};
+
+static inline bool single_segment(const struct crypt_dm_active_device *dmd)
+{
+ return dmd && !dmd->segment.next;
+}
+
+void dm_backend_init(struct crypt_device *cd);
+void dm_backend_exit(struct crypt_device *cd);
+
+int dm_targets_allocate(struct dm_target *first, unsigned count);
+void dm_targets_free(struct crypt_device *cd, struct crypt_dm_active_device *dmd);
+
+int dm_crypt_target_set(struct dm_target *tgt, uint64_t seg_offset, uint64_t seg_size,
+ struct device *data_device, struct volume_key *vk, const char *cipher,
+ uint64_t iv_offset, uint64_t data_offset, const char *integrity,
+ uint32_t tag_size, uint32_t sector_size);
+int dm_verity_target_set(struct dm_target *tgt, uint64_t seg_offset, uint64_t seg_size,
+ struct device *data_device, struct device *hash_device, struct device *fec_device,
+ const char *root_hash, uint32_t root_hash_size, const char* root_hash_sig_key_desc,
+ uint64_t hash_offset_block, uint64_t fec_blocks, struct crypt_params_verity *vp);
+int dm_integrity_target_set(struct crypt_device *cd,
+ struct dm_target *tgt, uint64_t seg_offset, uint64_t seg_size,
+ struct device *meta_device,
+ struct device *data_device, uint64_t tag_size, uint64_t offset, uint32_t sector_size,
+ struct volume_key *vk,
+ struct volume_key *journal_crypt_key, struct volume_key *journal_mac_key,
+ const struct crypt_params_integrity *ip);
+int dm_linear_target_set(struct dm_target *tgt, uint64_t seg_offset, uint64_t seg_size,
+ struct device *data_device, uint64_t data_offset);
+int dm_zero_target_set(struct dm_target *tgt, uint64_t seg_offset, uint64_t seg_size);
+
+int dm_remove_device(struct crypt_device *cd, const char *name, uint32_t flags);
+int dm_status_device(struct crypt_device *cd, const char *name);
+int dm_status_suspended(struct crypt_device *cd, const char *name);
+int dm_status_verity_ok(struct crypt_device *cd, const char *name);
+int dm_status_integrity_failures(struct crypt_device *cd, const char *name, uint64_t *count);
+int dm_query_device(struct crypt_device *cd, const char *name,
+ uint32_t get_flags, struct crypt_dm_active_device *dmd);
+int dm_device_deps(struct crypt_device *cd, const char *name, const char *prefix,
+ char **names, size_t names_length);
+int dm_create_device(struct crypt_device *cd, const char *name,
+ const char *type, struct crypt_dm_active_device *dmd);
+int dm_reload_device(struct crypt_device *cd, const char *name,
+ struct crypt_dm_active_device *dmd, uint32_t dmflags, unsigned resume);
+int dm_suspend_device(struct crypt_device *cd, const char *name, uint32_t dmflags);
+int dm_resume_device(struct crypt_device *cd, const char *name, uint32_t dmflags);
+int dm_resume_and_reinstate_key(struct crypt_device *cd, const char *name,
+ const struct volume_key *vk);
+int dm_error_device(struct crypt_device *cd, const char *name);
+int dm_clear_device(struct crypt_device *cd, const char *name);
+int dm_cancel_deferred_removal(const char *name);
+
+const char *dm_get_dir(void);
+
+int lookup_dm_dev_by_uuid(struct crypt_device *cd, const char *uuid, const char *type);
+
+/* These are DM helpers used only by utils_devpath file */
+int dm_is_dm_device(int major);
+int dm_is_dm_kernel_name(const char *name);
+char *dm_device_path(const char *prefix, int major, int minor);
+char *dm_device_name(const char *path);
+
+#endif /* _UTILS_DM_H */
diff --git a/lib/utils_io.c b/lib/utils_io.c
new file mode 100644
index 0000000..a5bc501
--- /dev/null
+++ b/lib/utils_io.c
@@ -0,0 +1,299 @@
+/*
+ * utils - miscellaneous I/O utilities for cryptsetup
+ *
+ * Copyright (C) 2004 Jana Saout <jana@saout.de>
+ * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include "utils_io.h"
+
+/* coverity[ -taint_source : arg-1 ] */
+static ssize_t _read_buffer(int fd, void *buf, size_t length, volatile int *quit)
+{
+ size_t read_size = 0;
+ ssize_t r;
+
+ if (fd < 0 || !buf)
+ return -EINVAL;
+
+ do {
+ r = read(fd, buf, length - read_size);
+ if (r == -1 && errno != EINTR)
+ return r;
+ if (r > 0) {
+ read_size += (size_t)r;
+ buf = (uint8_t*)buf + r;
+ }
+ if (r == 0 || (quit && *quit))
+ return (ssize_t)read_size;
+ } while (read_size != length);
+
+ return (ssize_t)length;
+}
+
+ssize_t read_buffer(int fd, void *buf, size_t length)
+{
+ return _read_buffer(fd, buf, length, NULL);
+}
+
+ssize_t read_buffer_intr(int fd, void *buf, size_t length, volatile int *quit)
+{
+ return _read_buffer(fd, buf, length, quit);
+}
+
+static ssize_t _write_buffer(int fd, const void *buf, size_t length, volatile int *quit)
+{
+ size_t write_size = 0;
+ ssize_t w;
+
+ if (fd < 0 || !buf || !length)
+ return -EINVAL;
+
+ do {
+ w = write(fd, buf, length - write_size);
+ if (w < 0 && errno != EINTR)
+ return w;
+ if (w > 0) {
+ write_size += (size_t) w;
+ buf = (const uint8_t*)buf + w;
+ }
+ if (w == 0 || (quit && *quit))
+ return (ssize_t)write_size;
+ } while (write_size != length);
+
+ return (ssize_t)write_size;
+}
+
+ssize_t write_buffer(int fd, const void *buf, size_t length)
+{
+ return _write_buffer(fd, buf, length, NULL);
+}
+
+ssize_t write_buffer_intr(int fd, const void *buf, size_t length, volatile int *quit)
+{
+ return _write_buffer(fd, buf, length, quit);
+}
+
+ssize_t write_blockwise(int fd, size_t bsize, size_t alignment,
+ void *orig_buf, size_t length)
+{
+ void *hangover_buf = NULL, *buf = NULL;
+ size_t hangover, solid;
+ ssize_t r, ret = -1;
+
+ if (fd == -1 || !orig_buf || !bsize || !alignment)
+ return -1;
+
+ hangover = length % bsize;
+ solid = length - hangover;
+
+ if ((size_t)orig_buf & (alignment - 1)) {
+ if (posix_memalign(&buf, alignment, length))
+ return -1;
+ memcpy(buf, orig_buf, length);
+ } else
+ buf = orig_buf;
+
+ if (solid) {
+ r = write_buffer(fd, buf, solid);
+ if (r < 0 || r != (ssize_t)solid)
+ goto out;
+ }
+
+ if (hangover) {
+ if (posix_memalign(&hangover_buf, alignment, bsize))
+ goto out;
+ memset(hangover_buf, 0, bsize);
+
+ r = read_buffer(fd, hangover_buf, bsize);
+ if (r < 0)
+ goto out;
+
+ if (lseek(fd, -(off_t)r, SEEK_CUR) < 0)
+ goto out;
+
+ memcpy(hangover_buf, (char*)buf + solid, hangover);
+
+ r = write_buffer(fd, hangover_buf, bsize);
+ if (r < 0 || r < (ssize_t)hangover)
+ goto out;
+ }
+ ret = length;
+out:
+ free(hangover_buf);
+ if (buf != orig_buf)
+ free(buf);
+ return ret;
+}
+
+ssize_t read_blockwise(int fd, size_t bsize, size_t alignment,
+ void *orig_buf, size_t length)
+{
+ void *hangover_buf = NULL, *buf = NULL;
+ size_t hangover, solid;
+ ssize_t r, ret = -1;
+
+ if (fd == -1 || !orig_buf || !bsize || !alignment)
+ return -1;
+
+ hangover = length % bsize;
+ solid = length - hangover;
+
+ if ((size_t)orig_buf & (alignment - 1)) {
+ if (posix_memalign(&buf, alignment, length))
+ return -1;
+ } else
+ buf = orig_buf;
+
+ r = read_buffer(fd, buf, solid);
+ if (r < 0 || r != (ssize_t)solid)
+ goto out;
+
+ if (hangover) {
+ if (posix_memalign(&hangover_buf, alignment, bsize))
+ goto out;
+ r = read_buffer(fd, hangover_buf, bsize);
+ if (r < 0 || r < (ssize_t)hangover)
+ goto out;
+
+ memcpy((char *)buf + solid, hangover_buf, hangover);
+ }
+ ret = length;
+out:
+ free(hangover_buf);
+ if (buf != orig_buf) {
+ if (ret != -1)
+ memcpy(orig_buf, buf, length);
+ free(buf);
+ }
+ return ret;
+}
+
+/*
+ * Combines llseek with blockwise write. write_blockwise can already deal with short writes
+ * but we also need a function to deal with short writes at the start. But this information
+ * is implicitly included in the read/write offset, which can not be set to non-aligned
+ * boundaries. Hence, we combine llseek with write.
+ */
+ssize_t write_lseek_blockwise(int fd, size_t bsize, size_t alignment,
+ void *buf, size_t length, off_t offset)
+{
+ void *frontPadBuf = NULL;
+ size_t frontHang, innerCount = 0;
+ ssize_t r, ret = -1;
+
+ if (fd == -1 || !buf || !bsize || !alignment)
+ return -1;
+
+ if (offset < 0)
+ offset = lseek(fd, offset, SEEK_END);
+
+ if (offset < 0)
+ return -1;
+
+ frontHang = offset % bsize;
+
+ if (lseek(fd, offset - frontHang, SEEK_SET) < 0)
+ return -1;
+
+ if (frontHang && length) {
+ if (posix_memalign(&frontPadBuf, alignment, bsize))
+ return -1;
+
+ innerCount = bsize - frontHang;
+ if (innerCount > length)
+ innerCount = length;
+
+ r = read_buffer(fd, frontPadBuf, bsize);
+ if (r < 0 || r < (ssize_t)(frontHang + innerCount))
+ goto out;
+
+ memcpy((char*)frontPadBuf + frontHang, buf, innerCount);
+
+ if (lseek(fd, offset - frontHang, SEEK_SET) < 0)
+ goto out;
+
+ r = write_buffer(fd, frontPadBuf, bsize);
+ if (r < 0 || r != (ssize_t)bsize)
+ goto out;
+
+ buf = (char*)buf + innerCount;
+ length -= innerCount;
+ }
+
+ ret = length ? write_blockwise(fd, bsize, alignment, buf, length) : 0;
+ if (ret >= 0)
+ ret += innerCount;
+out:
+ free(frontPadBuf);
+ return ret;
+}
+
+ssize_t read_lseek_blockwise(int fd, size_t bsize, size_t alignment,
+ void *buf, size_t length, off_t offset)
+{
+ void *frontPadBuf = NULL;
+ size_t frontHang, innerCount = 0;
+ ssize_t r, ret = -1;
+
+ if (fd == -1 || !buf || bsize <= 0)
+ return -1;
+
+ if (offset < 0)
+ offset = lseek(fd, offset, SEEK_END);
+
+ if (offset < 0)
+ return -1;
+
+ frontHang = offset % bsize;
+
+ if (lseek(fd, offset - frontHang, SEEK_SET) < 0)
+ return -1;
+
+ if (frontHang && length) {
+ if (posix_memalign(&frontPadBuf, alignment, bsize))
+ return -1;
+
+ innerCount = bsize - frontHang;
+ if (innerCount > length)
+ innerCount = length;
+
+ r = read_buffer(fd, frontPadBuf, bsize);
+ if (r < 0 || r < (ssize_t)(frontHang + innerCount))
+ goto out;
+
+ memcpy(buf, (char*)frontPadBuf + frontHang, innerCount);
+
+ buf = (char*)buf + innerCount;
+ length -= innerCount;
+ }
+
+ ret = read_blockwise(fd, bsize, alignment, buf, length);
+ if (ret >= 0)
+ ret += innerCount;
+out:
+ free(frontPadBuf);
+ return ret;
+}
diff --git a/lib/utils_io.h b/lib/utils_io.h
new file mode 100644
index 0000000..f8b3f00
--- /dev/null
+++ b/lib/utils_io.h
@@ -0,0 +1,43 @@
+/*
+ * utils - miscellaneous I/O utilities for cryptsetup
+ *
+ * Copyright (C) 2004 Jana Saout <jana@saout.de>
+ * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTSETUP_UTILS_IO_H
+#define _CRYPTSETUP_UTILS_IO_H
+
+#include <stddef.h>
+#include <sys/types.h>
+
+ssize_t read_buffer(int fd, void *buf, size_t length);
+ssize_t read_buffer_intr(int fd, void *buf, size_t length, volatile int *quit);
+ssize_t write_buffer(int fd, const void *buf, size_t length);
+ssize_t write_buffer_intr(int fd, const void *buf, size_t length, volatile int *quit);
+ssize_t write_blockwise(int fd, size_t bsize, size_t alignment,
+ void *orig_buf, size_t length);
+ssize_t read_blockwise(int fd, size_t bsize, size_t alignment,
+ void *orig_buf, size_t length);
+ssize_t write_lseek_blockwise(int fd, size_t bsize, size_t alignment,
+ void *buf, size_t length, off_t offset);
+ssize_t read_lseek_blockwise(int fd, size_t bsize, size_t alignment,
+ void *buf, size_t length, off_t offset);
+
+#endif
diff --git a/lib/utils_keyring.c b/lib/utils_keyring.c
new file mode 100644
index 0000000..a0c4db1
--- /dev/null
+++ b/lib/utils_keyring.c
@@ -0,0 +1,237 @@
+/*
+ * kernel keyring utilities
+ *
+ * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+
+#include "libcryptsetup.h"
+#include "libcryptsetup_macros.h"
+#include "utils_keyring.h"
+
+#ifndef HAVE_KEY_SERIAL_T
+#define HAVE_KEY_SERIAL_T
+typedef int32_t key_serial_t;
+#endif
+
+#ifdef KERNEL_KEYRING
+
+static const struct {
+ key_type_t type;
+ const char *type_name;
+} key_types[] = {
+ { LOGON_KEY, "logon" },
+ { USER_KEY, "user" },
+};
+
+#include <linux/keyctl.h>
+
+/* request_key */
+static key_serial_t request_key(const char *type,
+ const char *description,
+ const char *callout_info,
+ key_serial_t keyring)
+{
+ return syscall(__NR_request_key, type, description, callout_info, keyring);
+}
+
+/* add_key */
+static key_serial_t add_key(const char *type,
+ const char *description,
+ const void *payload,
+ size_t plen,
+ key_serial_t keyring)
+{
+ return syscall(__NR_add_key, type, description, payload, plen, keyring);
+}
+
+/* keyctl_read */
+static long keyctl_read(key_serial_t key, char *buffer, size_t buflen)
+{
+ return syscall(__NR_keyctl, KEYCTL_READ, key, buffer, buflen);
+}
+
+/* keyctl_revoke */
+static long keyctl_revoke(key_serial_t key)
+{
+ return syscall(__NR_keyctl, KEYCTL_REVOKE, key);
+}
+
+/* keyctl_unlink */
+static long keyctl_unlink(key_serial_t key, key_serial_t keyring)
+{
+ return syscall(__NR_keyctl, KEYCTL_UNLINK, key, keyring);
+}
+#endif
+
+int keyring_check(void)
+{
+#ifdef KERNEL_KEYRING
+ /* logon type key descriptions must be in format "prefix:description" */
+ return syscall(__NR_request_key, "logon", "dummy", NULL, 0) == -1l && errno != ENOSYS;
+#else
+ return 0;
+#endif
+}
+
+int keyring_add_key_in_thread_keyring(key_type_t ktype, const char *key_desc, const void *key, size_t key_size)
+{
+#ifdef KERNEL_KEYRING
+ key_serial_t kid;
+ const char *type_name = key_type_name(ktype);
+
+ if (!type_name || !key_desc)
+ return -EINVAL;
+
+ kid = add_key(type_name, key_desc, key, key_size, KEY_SPEC_THREAD_KEYRING);
+ if (kid < 0)
+ return -errno;
+
+ return 0;
+#else
+ return -ENOTSUP;
+#endif
+}
+
+/* currently used in client utilities only */
+int keyring_add_key_in_user_keyring(key_type_t ktype, const char *key_desc, const void *key, size_t key_size)
+{
+#ifdef KERNEL_KEYRING
+ const char *type_name = key_type_name(ktype);
+ key_serial_t kid;
+
+ if (!type_name || !key_desc)
+ return -EINVAL;
+
+ kid = add_key(type_name, key_desc, key, key_size, KEY_SPEC_USER_KEYRING);
+ if (kid < 0)
+ return -errno;
+
+ return 0;
+#else
+ return -ENOTSUP;
+#endif
+}
+
+/* alias for the same code */
+int keyring_get_key(const char *key_desc,
+ char **key,
+ size_t *key_size)
+{
+ return keyring_get_passphrase(key_desc, key, key_size);
+}
+
+int keyring_get_passphrase(const char *key_desc,
+ char **passphrase,
+ size_t *passphrase_len)
+{
+#ifdef KERNEL_KEYRING
+ int err;
+ key_serial_t kid;
+ long ret;
+ char *buf = NULL;
+ size_t len = 0;
+
+ do
+ kid = request_key(key_type_name(USER_KEY), key_desc, NULL, 0);
+ while (kid < 0 && errno == EINTR);
+
+ if (kid < 0)
+ return -errno;
+
+ /* just get payload size */
+ ret = keyctl_read(kid, NULL, 0);
+ if (ret > 0) {
+ len = ret;
+ buf = crypt_safe_alloc(len);
+ if (!buf)
+ return -ENOMEM;
+
+ /* retrieve actual payload data */
+ ret = keyctl_read(kid, buf, len);
+ }
+
+ if (ret < 0) {
+ err = errno;
+ crypt_safe_free(buf);
+ return -err;
+ }
+
+ *passphrase = buf;
+ *passphrase_len = len;
+
+ return 0;
+#else
+ return -ENOTSUP;
+#endif
+}
+
+static int keyring_revoke_and_unlink_key_type(const char *type_name, const char *key_desc)
+{
+#ifdef KERNEL_KEYRING
+ key_serial_t kid;
+
+ if (!type_name || !key_desc)
+ return -EINVAL;
+
+ do
+ kid = request_key(type_name, key_desc, NULL, 0);
+ while (kid < 0 && errno == EINTR);
+
+ if (kid < 0)
+ return 0;
+
+ if (keyctl_revoke(kid))
+ return -errno;
+
+ /*
+ * best effort only. the key could have been linked
+ * in some other keyring and its payload is now
+ * revoked anyway.
+ */
+ keyctl_unlink(kid, KEY_SPEC_THREAD_KEYRING);
+ keyctl_unlink(kid, KEY_SPEC_PROCESS_KEYRING);
+ keyctl_unlink(kid, KEY_SPEC_USER_KEYRING);
+
+ return 0;
+#else
+ return -ENOTSUP;
+#endif
+}
+
+const char *key_type_name(key_type_t type)
+{
+#ifdef KERNEL_KEYRING
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(key_types); i++)
+ if (type == key_types[i].type)
+ return key_types[i].type_name;
+#endif
+ return NULL;
+}
+
+int keyring_revoke_and_unlink_key(key_type_t ktype, const char *key_desc)
+{
+ return keyring_revoke_and_unlink_key_type(key_type_name(ktype), key_desc);
+}
diff --git a/lib/utils_keyring.h b/lib/utils_keyring.h
new file mode 100644
index 0000000..0248862
--- /dev/null
+++ b/lib/utils_keyring.h
@@ -0,0 +1,55 @@
+/*
+ * kernel keyring syscall wrappers
+ *
+ * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _UTILS_KEYRING
+#define _UTILS_KEYRING
+
+#include <stddef.h>
+
+typedef enum { LOGON_KEY = 0, USER_KEY } key_type_t;
+
+const char *key_type_name(key_type_t ktype);
+
+int keyring_check(void);
+
+int keyring_get_key(const char *key_desc,
+ char **key,
+ size_t *key_size);
+
+int keyring_get_passphrase(const char *key_desc,
+ char **passphrase,
+ size_t *passphrase_len);
+
+int keyring_add_key_in_thread_keyring(
+ key_type_t ktype,
+ const char *key_desc,
+ const void *key,
+ size_t key_size);
+
+int keyring_add_key_in_user_keyring(
+ key_type_t ktype,
+ const char *key_desc,
+ const void *key,
+ size_t key_size);
+
+int keyring_revoke_and_unlink_key(key_type_t ktype, const char *key_desc);
+
+#endif
diff --git a/lib/utils_loop.c b/lib/utils_loop.c
new file mode 100644
index 0000000..9b31603
--- /dev/null
+++ b/lib/utils_loop.c
@@ -0,0 +1,331 @@
+/*
+ * loopback block device utilities
+ *
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <limits.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_SYSMACROS_H
+# include <sys/sysmacros.h> /* for major, minor */
+#endif
+#include <linux/types.h>
+#include <linux/loop.h>
+
+#include "utils_loop.h"
+#include "libcryptsetup_macros.h"
+
+#define LOOP_DEV_MAJOR 7
+
+#ifndef LO_FLAGS_AUTOCLEAR
+#define LO_FLAGS_AUTOCLEAR 4
+#endif
+
+#ifndef LOOP_CTL_GET_FREE
+#define LOOP_CTL_GET_FREE 0x4C82
+#endif
+
+#ifndef LOOP_SET_CAPACITY
+#define LOOP_SET_CAPACITY 0x4C07
+#endif
+
+#ifndef LOOP_SET_BLOCK_SIZE
+#define LOOP_SET_BLOCK_SIZE 0x4C09
+#endif
+
+#ifndef LOOP_CONFIGURE
+#define LOOP_CONFIGURE 0x4C0A
+struct loop_config {
+ __u32 fd;
+ __u32 block_size;
+ struct loop_info64 info;
+ __u64 __reserved[8];
+};
+#endif
+
+static char *crypt_loop_get_device_old(void)
+{
+ char dev[64];
+ int i, loop_fd;
+ struct loop_info64 lo64 = {0};
+
+ for (i = 0; i < 256; i++) {
+ sprintf(dev, "/dev/loop%d", i);
+
+ loop_fd = open(dev, O_RDONLY);
+ if (loop_fd < 0)
+ return NULL;
+
+ if (ioctl(loop_fd, LOOP_GET_STATUS64, &lo64) &&
+ errno == ENXIO) {
+ close(loop_fd);
+ return strdup(dev);
+ }
+ close(loop_fd);
+ }
+
+ return NULL;
+}
+
+static char *crypt_loop_get_device(void)
+{
+ char dev[64];
+ int i, loop_fd;
+ struct stat st;
+
+ loop_fd = open("/dev/loop-control", O_RDONLY);
+ if (loop_fd < 0)
+ return crypt_loop_get_device_old();
+
+ i = ioctl(loop_fd, LOOP_CTL_GET_FREE);
+ if (i < 0) {
+ close(loop_fd);
+ return NULL;
+ }
+ close(loop_fd);
+
+ if (sprintf(dev, "/dev/loop%d", i) < 0)
+ return NULL;
+
+ if (stat(dev, &st) || !S_ISBLK(st.st_mode))
+ return NULL;
+
+ return strdup(dev);
+}
+
+int crypt_loop_attach(char **loop, const char *file, int offset,
+ int autoclear, int *readonly, size_t blocksize)
+{
+ struct loop_config config = {0};
+ char *lo_file_name;
+ int loop_fd = -1, file_fd = -1, r = 1;
+ int fallback = 0;
+
+ *loop = NULL;
+
+ file_fd = open(file, (*readonly ? O_RDONLY : O_RDWR) | O_EXCL);
+ if (file_fd < 0 && (errno == EROFS || errno == EACCES) && !*readonly) {
+ *readonly = 1;
+ file_fd = open(file, O_RDONLY | O_EXCL);
+ }
+ if (file_fd < 0)
+ goto out;
+
+ config.fd = file_fd;
+
+ lo_file_name = (char*)config.info.lo_file_name;
+ lo_file_name[LO_NAME_SIZE-1] = '\0';
+ strncpy(lo_file_name, file, LO_NAME_SIZE-1);
+ config.info.lo_offset = offset;
+ if (autoclear)
+ config.info.lo_flags |= LO_FLAGS_AUTOCLEAR;
+ if (blocksize > SECTOR_SIZE)
+ config.block_size = blocksize;
+
+ while (loop_fd < 0) {
+ *loop = crypt_loop_get_device();
+ if (!*loop)
+ goto out;
+
+ loop_fd = open(*loop, *readonly ? O_RDONLY : O_RDWR);
+ if (loop_fd < 0)
+ goto out;
+ if (ioctl(loop_fd, LOOP_CONFIGURE, &config) < 0) {
+ if (errno == EINVAL || errno == ENOTTY) {
+ free(*loop);
+ *loop = NULL;
+
+ close(loop_fd);
+ loop_fd = -1;
+
+ /* kernel doesn't support LOOP_CONFIGURE */
+ fallback = 1;
+ break;
+ }
+ if (errno != EBUSY)
+ goto out;
+ free(*loop);
+ *loop = NULL;
+
+ close(loop_fd);
+ loop_fd = -1;
+ }
+ }
+
+ if (fallback) {
+ while (loop_fd < 0) {
+ *loop = crypt_loop_get_device();
+ if (!*loop)
+ goto out;
+
+ loop_fd = open(*loop, *readonly ? O_RDONLY : O_RDWR);
+ if (loop_fd < 0)
+ goto out;
+ if (ioctl(loop_fd, LOOP_SET_FD, file_fd) < 0) {
+ if (errno != EBUSY)
+ goto out;
+ free(*loop);
+ *loop = NULL;
+
+ close(loop_fd);
+ loop_fd = -1;
+ }
+ }
+
+ if (blocksize > SECTOR_SIZE)
+ (void)ioctl(loop_fd, LOOP_SET_BLOCK_SIZE, (unsigned long)blocksize);
+
+ if (ioctl(loop_fd, LOOP_SET_STATUS64, &config.info) < 0) {
+ (void)ioctl(loop_fd, LOOP_CLR_FD, 0);
+ goto out;
+ }
+ }
+
+ /* Verify that autoclear is really set */
+ if (autoclear) {
+ memset(&config.info, 0, sizeof(config.info));
+ if (ioctl(loop_fd, LOOP_GET_STATUS64, &config.info) < 0 ||
+ !(config.info.lo_flags & LO_FLAGS_AUTOCLEAR)) {
+ (void)ioctl(loop_fd, LOOP_CLR_FD, 0);
+ goto out;
+ }
+ }
+
+ r = 0;
+out:
+ if (r && loop_fd >= 0)
+ close(loop_fd);
+ if (file_fd >= 0)
+ close(file_fd);
+ if (r && *loop) {
+ free(*loop);
+ *loop = NULL;
+ }
+ return r ? -1 : loop_fd;
+}
+
+int crypt_loop_detach(const char *loop)
+{
+ int loop_fd = -1, r = 1;
+
+ loop_fd = open(loop, O_RDONLY);
+ if (loop_fd < 0)
+ return 1;
+
+ if (!ioctl(loop_fd, LOOP_CLR_FD, 0))
+ r = 0;
+
+ close(loop_fd);
+ return r;
+}
+
+int crypt_loop_resize(const char *loop)
+{
+ int loop_fd = -1, r = 1;
+
+ loop_fd = open(loop, O_RDONLY);
+ if (loop_fd < 0)
+ return 1;
+
+ if (!ioctl(loop_fd, LOOP_SET_CAPACITY, 0))
+ r = 0;
+
+ close(loop_fd);
+ return r;
+}
+
+static char *_ioctl_backing_file(const char *loop)
+{
+ struct loop_info64 lo64 = {0};
+ int loop_fd;
+
+ loop_fd = open(loop, O_RDONLY);
+ if (loop_fd < 0)
+ return NULL;
+
+ if (ioctl(loop_fd, LOOP_GET_STATUS64, &lo64) < 0) {
+ close(loop_fd);
+ return NULL;
+ }
+
+ lo64.lo_file_name[LO_NAME_SIZE-2] = '*';
+ lo64.lo_file_name[LO_NAME_SIZE-1] = 0;
+
+ close(loop_fd);
+
+ return strdup((char*)lo64.lo_file_name);
+}
+
+static char *_sysfs_backing_file(const char *loop)
+{
+ struct stat st;
+ char buf[PATH_MAX];
+ size_t len;
+ int fd;
+
+ if (stat(loop, &st) || !S_ISBLK(st.st_mode))
+ return NULL;
+
+ if (snprintf(buf, sizeof(buf), "/sys/dev/block/%d:%d/loop/backing_file",
+ major(st.st_rdev), minor(st.st_rdev)) < 0)
+ return NULL;
+
+ fd = open(buf, O_RDONLY);
+ if (fd < 0)
+ return NULL;
+
+ len = read(fd, buf, PATH_MAX);
+ close(fd);
+ if (len < 2)
+ return NULL;
+
+ buf[len - 1] = '\0';
+ return strdup(buf);
+}
+
+char *crypt_loop_backing_file(const char *loop)
+{
+ char *bf;
+
+ if (!crypt_loop_device(loop))
+ return NULL;
+
+ bf = _sysfs_backing_file(loop);
+ return bf ?: _ioctl_backing_file(loop);
+}
+
+int crypt_loop_device(const char *loop)
+{
+ struct stat st;
+
+ if (!loop)
+ return 0;
+
+ if (stat(loop, &st) || !S_ISBLK(st.st_mode) ||
+ major(st.st_rdev) != LOOP_DEV_MAJOR)
+ return 0;
+
+ return 1;
+}
diff --git a/lib/utils_loop.h b/lib/utils_loop.h
new file mode 100644
index 0000000..c1f6356
--- /dev/null
+++ b/lib/utils_loop.h
@@ -0,0 +1,34 @@
+/*
+ * loopback block device utilities
+ *
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _UTILS_LOOP_H
+#define _UTILS_LOOP_H
+
+/* loopback device helpers */
+
+char *crypt_loop_backing_file(const char *loop);
+int crypt_loop_device(const char *loop);
+int crypt_loop_attach(char **loop, const char *file, int offset,
+ int autoclear, int *readonly, size_t blocksize);
+int crypt_loop_detach(const char *loop);
+int crypt_loop_resize(const char *loop);
+
+#endif /* _UTILS_LOOP_H */
diff --git a/lib/utils_pbkdf.c b/lib/utils_pbkdf.c
new file mode 100644
index 0000000..4d7e18d
--- /dev/null
+++ b/lib/utils_pbkdf.c
@@ -0,0 +1,333 @@
+/*
+ * utils_pbkdf - PBKDF settings for libcryptsetup
+ *
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+
+#include "internal.h"
+
+const struct crypt_pbkdf_type default_pbkdf2 = {
+ .type = CRYPT_KDF_PBKDF2,
+ .hash = DEFAULT_LUKS1_HASH,
+ .time_ms = DEFAULT_LUKS1_ITER_TIME
+};
+
+const struct crypt_pbkdf_type default_argon2i = {
+ .type = CRYPT_KDF_ARGON2I,
+ .hash = DEFAULT_LUKS1_HASH,
+ .time_ms = DEFAULT_LUKS2_ITER_TIME,
+ .max_memory_kb = DEFAULT_LUKS2_MEMORY_KB,
+ .parallel_threads = DEFAULT_LUKS2_PARALLEL_THREADS
+};
+
+const struct crypt_pbkdf_type default_argon2id = {
+ .type = CRYPT_KDF_ARGON2ID,
+ .hash = DEFAULT_LUKS1_HASH,
+ .time_ms = DEFAULT_LUKS2_ITER_TIME,
+ .max_memory_kb = DEFAULT_LUKS2_MEMORY_KB,
+ .parallel_threads = DEFAULT_LUKS2_PARALLEL_THREADS
+};
+
+const struct crypt_pbkdf_type *crypt_get_pbkdf_type_params(const char *pbkdf_type)
+{
+ if (!pbkdf_type)
+ return NULL;
+
+ if (!strcmp(pbkdf_type, CRYPT_KDF_PBKDF2))
+ return &default_pbkdf2;
+ else if (!strcmp(pbkdf_type, CRYPT_KDF_ARGON2I))
+ return &default_argon2i;
+ else if (!strcmp(pbkdf_type, CRYPT_KDF_ARGON2ID))
+ return &default_argon2id;
+
+ return NULL;
+}
+
+static uint32_t adjusted_phys_memory(void)
+{
+ uint64_t memory_kb = crypt_getphysmemory_kb();
+
+ /* Ignore bogus value */
+ if (memory_kb < (128 * 1024) || memory_kb > UINT32_MAX)
+ return DEFAULT_LUKS2_MEMORY_KB;
+
+ /*
+ * Never use more than half of physical memory.
+ * OOM killer is too clever...
+ */
+ memory_kb /= 2;
+
+ return memory_kb;
+}
+
+/*
+ * PBKDF configuration interface
+ */
+int verify_pbkdf_params(struct crypt_device *cd,
+ const struct crypt_pbkdf_type *pbkdf)
+{
+ struct crypt_pbkdf_limits pbkdf_limits;
+ const char *pbkdf_type;
+ int r;
+
+ r = init_crypto(cd);
+ if (r < 0)
+ return r;
+
+ if (!pbkdf || !pbkdf->type ||
+ (!pbkdf->hash && !strcmp(pbkdf->type, "pbkdf2")))
+ return -EINVAL;
+
+ if (!pbkdf->time_ms && !(pbkdf->flags & CRYPT_PBKDF_NO_BENCHMARK)) {
+ log_err(cd, _("Requested PBKDF target time cannot be zero."));
+ return -EINVAL;
+ }
+
+ r = crypt_parse_pbkdf(pbkdf->type, &pbkdf_type);
+ if (r < 0) {
+ log_err(cd, _("Unknown PBKDF type %s."), pbkdf->type);
+ return r;
+ }
+
+ if (pbkdf->hash && crypt_hash_size(pbkdf->hash) < 0) {
+ log_err(cd, _("Requested hash %s is not supported."), pbkdf->hash);
+ return -EINVAL;
+ }
+
+ r = crypt_pbkdf_get_limits(pbkdf->type, &pbkdf_limits);
+ if (r < 0)
+ return r;
+
+ if (crypt_get_type(cd) &&
+ !strcmp(crypt_get_type(cd), CRYPT_LUKS1) &&
+ strcmp(pbkdf_type, CRYPT_KDF_PBKDF2)) {
+ log_err(cd, _("Requested PBKDF type is not supported for LUKS1."));
+ return -EINVAL;
+ }
+
+ if (!strcmp(pbkdf_type, CRYPT_KDF_PBKDF2)) {
+ if (pbkdf->max_memory_kb || pbkdf->parallel_threads) {
+ log_err(cd, _("PBKDF max memory or parallel threads must not be set with pbkdf2."));
+ return -EINVAL;
+ }
+ if (pbkdf->flags & CRYPT_PBKDF_NO_BENCHMARK &&
+ pbkdf->iterations < pbkdf_limits.min_iterations) {
+ log_err(cd, _("Forced iteration count is too low for %s (minimum is %u)."),
+ pbkdf_type, pbkdf_limits.min_iterations);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ /* TODO: properly define minimal iterations and also minimal memory values */
+ if (pbkdf->flags & CRYPT_PBKDF_NO_BENCHMARK) {
+ if (pbkdf->iterations < pbkdf_limits.min_iterations) {
+ log_err(cd, _("Forced iteration count is too low for %s (minimum is %u)."),
+ pbkdf_type, pbkdf_limits.min_iterations);
+ r = -EINVAL;
+ }
+ if (pbkdf->max_memory_kb < pbkdf_limits.min_memory) {
+ log_err(cd, _("Forced memory cost is too low for %s (minimum is %u kilobytes)."),
+ pbkdf_type, pbkdf_limits.min_memory);
+ r = -EINVAL;
+ }
+ }
+
+ if (pbkdf->max_memory_kb > pbkdf_limits.max_memory) {
+ log_err(cd, _("Requested maximum PBKDF memory cost is too high (maximum is %d kilobytes)."),
+ pbkdf_limits.max_memory);
+ r = -EINVAL;
+ }
+ if (!pbkdf->max_memory_kb) {
+ log_err(cd, _("Requested maximum PBKDF memory cannot be zero."));
+ r = -EINVAL;
+ }
+ if (!pbkdf->parallel_threads) {
+ log_err(cd, _("Requested PBKDF parallel threads cannot be zero."));
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+int init_pbkdf_type(struct crypt_device *cd,
+ const struct crypt_pbkdf_type *pbkdf,
+ const char *dev_type)
+{
+ struct crypt_pbkdf_type *cd_pbkdf = crypt_get_pbkdf(cd);
+ struct crypt_pbkdf_limits pbkdf_limits;
+ const char *hash, *type;
+ unsigned cpus;
+ uint32_t old_flags, memory_kb;
+ int r;
+
+ if (crypt_fips_mode()) {
+ if (pbkdf && strcmp(pbkdf->type, CRYPT_KDF_PBKDF2)) {
+ log_err(cd, _("Only PBKDF2 is supported in FIPS mode."));
+ return -EINVAL;
+ }
+ if (!pbkdf)
+ pbkdf = crypt_get_pbkdf_type_params(CRYPT_KDF_PBKDF2);
+ }
+
+ if (!pbkdf && dev_type && !strcmp(dev_type, CRYPT_LUKS2))
+ pbkdf = crypt_get_pbkdf_type_params(DEFAULT_LUKS2_PBKDF);
+ else if (!pbkdf)
+ pbkdf = crypt_get_pbkdf_type_params(CRYPT_KDF_PBKDF2);
+
+ r = verify_pbkdf_params(cd, pbkdf);
+ if (r)
+ return r;
+
+ r = crypt_pbkdf_get_limits(pbkdf->type, &pbkdf_limits);
+ if (r < 0)
+ return r;
+
+ type = strdup(pbkdf->type);
+ hash = pbkdf->hash ? strdup(pbkdf->hash) : NULL;
+
+ if (!type || (!hash && pbkdf->hash)) {
+ free(CONST_CAST(void*)type);
+ free(CONST_CAST(void*)hash);
+ return -ENOMEM;
+ }
+
+ free(CONST_CAST(void*)cd_pbkdf->type);
+ free(CONST_CAST(void*)cd_pbkdf->hash);
+ cd_pbkdf->type = type;
+ cd_pbkdf->hash = hash;
+
+ old_flags = cd_pbkdf->flags;
+ cd_pbkdf->flags = pbkdf->flags;
+
+ /* Reset iteration count so benchmark must run again. */
+ if (cd_pbkdf->flags & CRYPT_PBKDF_NO_BENCHMARK)
+ cd_pbkdf->iterations = pbkdf->iterations;
+ else
+ cd_pbkdf->iterations = 0;
+
+ if (old_flags & CRYPT_PBKDF_ITER_TIME_SET)
+ cd_pbkdf->flags |= CRYPT_PBKDF_ITER_TIME_SET;
+ else
+ cd_pbkdf->time_ms = pbkdf->time_ms;
+
+ cd_pbkdf->max_memory_kb = pbkdf->max_memory_kb;
+ cd_pbkdf->parallel_threads = pbkdf->parallel_threads;
+
+ if (cd_pbkdf->parallel_threads > pbkdf_limits.max_parallel) {
+ log_dbg(cd, "Maximum PBKDF threads is %d (requested %d).",
+ pbkdf_limits.max_parallel, cd_pbkdf->parallel_threads);
+ cd_pbkdf->parallel_threads = pbkdf_limits.max_parallel;
+ }
+
+ if (cd_pbkdf->parallel_threads) {
+ cpus = crypt_cpusonline();
+ if (cd_pbkdf->parallel_threads > cpus) {
+ log_dbg(cd, "Only %u active CPUs detected, "
+ "PBKDF threads decreased from %d to %d.",
+ cpus, cd_pbkdf->parallel_threads, cpus);
+ cd_pbkdf->parallel_threads = cpus;
+ }
+ }
+
+ if (cd_pbkdf->max_memory_kb) {
+ memory_kb = adjusted_phys_memory();
+ if (cd_pbkdf->max_memory_kb > memory_kb) {
+ log_dbg(cd, "Not enough physical memory detected, "
+ "PBKDF max memory decreased from %dkB to %dkB.",
+ cd_pbkdf->max_memory_kb, memory_kb);
+ cd_pbkdf->max_memory_kb = memory_kb;
+ }
+ }
+
+ if (!strcmp(pbkdf->type, CRYPT_KDF_PBKDF2))
+ log_dbg(cd, "PBKDF %s-%s, time_ms %u (iterations %u).",
+ cd_pbkdf->type, cd_pbkdf->hash, cd_pbkdf->time_ms, cd_pbkdf->iterations);
+ else
+ log_dbg(cd, "PBKDF %s, time_ms %u (iterations %u), max_memory_kb %u, parallel_threads %u.",
+ cd_pbkdf->type, cd_pbkdf->time_ms, cd_pbkdf->iterations,
+ cd_pbkdf->max_memory_kb, cd_pbkdf->parallel_threads);
+
+ return 0;
+}
+
+/* Libcryptsetup API */
+
+int crypt_set_pbkdf_type(struct crypt_device *cd, const struct crypt_pbkdf_type *pbkdf)
+{
+ if (!cd)
+ return -EINVAL;
+
+ if (!pbkdf)
+ log_dbg(cd, "Resetting pbkdf type to default");
+
+ crypt_get_pbkdf(cd)->flags = 0;
+
+ return init_pbkdf_type(cd, pbkdf, crypt_get_type(cd));
+}
+
+const struct crypt_pbkdf_type *crypt_get_pbkdf_type(struct crypt_device *cd)
+{
+ if (!cd)
+ return NULL;
+
+ return crypt_get_pbkdf(cd)->type ? crypt_get_pbkdf(cd) : NULL;
+}
+
+const struct crypt_pbkdf_type *crypt_get_pbkdf_default(const char *type)
+{
+ if (!type)
+ return NULL;
+
+ if (!strcmp(type, CRYPT_LUKS1) || crypt_fips_mode())
+ return crypt_get_pbkdf_type_params(CRYPT_KDF_PBKDF2);
+ else if (!strcmp(type, CRYPT_LUKS2))
+ return crypt_get_pbkdf_type_params(DEFAULT_LUKS2_PBKDF);
+
+ return NULL;
+}
+
+void crypt_set_iteration_time(struct crypt_device *cd, uint64_t iteration_time_ms)
+{
+ struct crypt_pbkdf_type *pbkdf;
+ uint32_t old_time_ms;
+
+ if (!cd || iteration_time_ms > UINT32_MAX)
+ return;
+
+ pbkdf = crypt_get_pbkdf(cd);
+ old_time_ms = pbkdf->time_ms;
+ pbkdf->time_ms = (uint32_t)iteration_time_ms;
+
+ if (pbkdf->type && verify_pbkdf_params(cd, pbkdf)) {
+ pbkdf->time_ms = old_time_ms;
+ log_dbg(cd, "Invalid iteration time.");
+ return;
+ }
+
+ pbkdf->flags |= CRYPT_PBKDF_ITER_TIME_SET;
+
+ /* iterations must be benchmarked now */
+ pbkdf->flags &= ~(CRYPT_PBKDF_NO_BENCHMARK);
+ pbkdf->iterations = 0;
+
+ log_dbg(cd, "Iteration time set to %" PRIu64 " milliseconds.", iteration_time_ms);
+}
diff --git a/lib/utils_safe_memory.c b/lib/utils_safe_memory.c
new file mode 100644
index 0000000..b161369
--- /dev/null
+++ b/lib/utils_safe_memory.c
@@ -0,0 +1,122 @@
+/*
+ * utils_safe_memory - safe memory helpers
+ *
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/mman.h>
+#include "libcryptsetup.h"
+
+struct safe_allocation {
+ size_t size;
+ bool locked;
+ char data[0] __attribute__((aligned(8)));
+};
+#define OVERHEAD offsetof(struct safe_allocation, data)
+
+/*
+ * Replacement for memset(s, 0, n) on stack that can be optimized out
+ * Also used in safe allocations for explicit memory wipe.
+ */
+void crypt_safe_memzero(void *data, size_t size)
+{
+ if (!data)
+ return;
+
+#ifdef HAVE_EXPLICIT_BZERO
+ explicit_bzero(data, size);
+#else
+ volatile uint8_t *p = (volatile uint8_t *)data;
+
+ while(size--)
+ *p++ = 0;
+#endif
+}
+
+/* safe allocations */
+void *crypt_safe_alloc(size_t size)
+{
+ struct safe_allocation *alloc;
+
+ if (!size || size > (SIZE_MAX - OVERHEAD))
+ return NULL;
+
+ alloc = malloc(size + OVERHEAD);
+ if (!alloc)
+ return NULL;
+
+ crypt_safe_memzero(alloc, size + OVERHEAD);
+ alloc->size = size;
+
+ /* Ignore failure if it is over limit. */
+ if (!mlock(alloc, size + OVERHEAD))
+ alloc->locked = true;
+
+ /* coverity[leaked_storage] */
+ return &alloc->data;
+}
+
+void crypt_safe_free(void *data)
+{
+ struct safe_allocation *alloc;
+ volatile size_t *s;
+ void *p;
+
+ if (!data)
+ return;
+
+ p = (char *)data - OVERHEAD;
+ alloc = (struct safe_allocation *)p;
+
+ crypt_safe_memzero(data, alloc->size);
+
+ if (alloc->locked) {
+ munlock(alloc, alloc->size + OVERHEAD);
+ alloc->locked = false;
+ }
+
+ s = (volatile size_t *)&alloc->size;
+ *s = 0x55aa55aa;
+ free(alloc);
+}
+
+void *crypt_safe_realloc(void *data, size_t size)
+{
+ struct safe_allocation *alloc;
+ void *new_data;
+ void *p;
+
+ new_data = crypt_safe_alloc(size);
+
+ if (new_data && data) {
+
+ p = (char *)data - OVERHEAD;
+ alloc = (struct safe_allocation *)p;
+
+ if (size > alloc->size)
+ size = alloc->size;
+
+ memcpy(new_data, data, size);
+ }
+
+ crypt_safe_free(data);
+ return new_data;
+}
diff --git a/lib/utils_storage_wrappers.c b/lib/utils_storage_wrappers.c
new file mode 100644
index 0000000..6ff5afa
--- /dev/null
+++ b/lib/utils_storage_wrappers.c
@@ -0,0 +1,394 @@
+/*
+ * Generic wrapper for storage functions
+ * (experimental only)
+ *
+ * Copyright (C) 2018-2023 Ondrej Kozina
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "utils_storage_wrappers.h"
+#include "internal.h"
+
+struct crypt_storage_wrapper {
+ crypt_storage_wrapper_type type;
+ int dev_fd;
+ int block_size;
+ size_t mem_alignment;
+ uint64_t data_offset;
+ union {
+ struct {
+ struct crypt_storage *s;
+ uint64_t iv_start;
+ } cb;
+ struct {
+ int dmcrypt_fd;
+ char name[PATH_MAX];
+ } dm;
+ } u;
+};
+
+static int crypt_storage_backend_init(struct crypt_device *cd,
+ struct crypt_storage_wrapper *w,
+ uint64_t iv_start,
+ int sector_size,
+ const char *cipher,
+ const char *cipher_mode,
+ const struct volume_key *vk,
+ uint32_t flags)
+{
+ int r;
+ struct crypt_storage *s;
+
+ /* iv_start, sector_size */
+ r = crypt_storage_init(&s, sector_size, cipher, cipher_mode, vk->key, vk->keylength, flags & LARGE_IV);
+ if (r)
+ return r;
+
+ if ((flags & DISABLE_KCAPI) && crypt_storage_kernel_only(s)) {
+ log_dbg(cd, "Could not initialize userspace block cipher and kernel fallback is disabled.");
+ crypt_storage_destroy(s);
+ return -ENOTSUP;
+ }
+
+ w->type = USPACE;
+ w->u.cb.s = s;
+ w->u.cb.iv_start = iv_start;
+
+ return 0;
+}
+
+static int crypt_storage_dmcrypt_init(
+ struct crypt_device *cd,
+ struct crypt_storage_wrapper *cw,
+ struct device *device,
+ uint64_t device_offset,
+ uint64_t iv_start,
+ int sector_size,
+ const char *cipher_spec,
+ struct volume_key *vk,
+ int open_flags)
+{
+ static int counter = 0;
+ char path[PATH_MAX];
+ struct crypt_dm_active_device dmd = {
+ .flags = CRYPT_ACTIVATE_PRIVATE,
+ };
+ int mode, r, fd = -1;
+
+ log_dbg(cd, "Using temporary dmcrypt to access data.");
+
+ if (snprintf(cw->u.dm.name, sizeof(cw->u.dm.name), "temporary-cryptsetup-%d-%d", getpid(), counter++) < 0)
+ return -ENOMEM;
+ if (snprintf(path, sizeof(path), "%s/%s", dm_get_dir(), cw->u.dm.name) < 0)
+ return -ENOMEM;
+
+ r = device_block_adjust(cd, device, DEV_OK,
+ device_offset, &dmd.size, &dmd.flags);
+ if (r < 0) {
+ log_err(cd, _("Device %s does not exist or access denied."),
+ device_path(device));
+ return -EIO;
+ }
+
+ mode = open_flags | O_DIRECT;
+ if (dmd.flags & CRYPT_ACTIVATE_READONLY)
+ mode = (open_flags & ~O_ACCMODE) | O_RDONLY;
+
+ if (vk->key_description)
+ dmd.flags |= CRYPT_ACTIVATE_KEYRING_KEY;
+
+ r = dm_crypt_target_set(&dmd.segment, 0, dmd.size,
+ device,
+ vk,
+ cipher_spec,
+ iv_start,
+ device_offset,
+ NULL,
+ 0,
+ sector_size);
+ if (r)
+ return r;
+
+ r = dm_create_device(cd, cw->u.dm.name, "TEMP", &dmd);
+ if (r < 0) {
+ if (r != -EACCES && r != -ENOTSUP)
+ log_dbg(cd, "error hint would be nice");
+ r = -EIO;
+ }
+
+ dm_targets_free(cd, &dmd);
+
+ if (r)
+ return r;
+
+ fd = open(path, mode);
+ if (fd < 0) {
+ log_dbg(cd, "Failed to open %s", path);
+ dm_remove_device(cd, cw->u.dm.name, CRYPT_DEACTIVATE_FORCE);
+ return -EINVAL;
+ }
+
+ cw->type = DMCRYPT;
+ cw->u.dm.dmcrypt_fd = fd;
+
+ return 0;
+}
+
+int crypt_storage_wrapper_init(struct crypt_device *cd,
+ struct crypt_storage_wrapper **cw,
+ struct device *device,
+ uint64_t data_offset,
+ uint64_t iv_start,
+ int sector_size,
+ const char *cipher,
+ struct volume_key *vk,
+ uint32_t flags)
+{
+ int open_flags, r;
+ char _cipher[MAX_CIPHER_LEN], mode[MAX_CIPHER_LEN];
+ struct crypt_storage_wrapper *w;
+
+ /* device-mapper restrictions */
+ if (data_offset & ((1 << SECTOR_SHIFT) - 1))
+ return -EINVAL;
+
+ if (crypt_parse_name_and_mode(cipher, _cipher, NULL, mode))
+ return -EINVAL;
+
+ open_flags = O_CLOEXEC | ((flags & OPEN_READONLY) ? O_RDONLY : O_RDWR);
+
+ w = malloc(sizeof(*w));
+ if (!w)
+ return -ENOMEM;
+
+ memset(w, 0, sizeof(*w));
+ w->data_offset = data_offset;
+ w->mem_alignment = device_alignment(device);
+ w->block_size = device_block_size(cd, device);
+ if (!w->block_size || !w->mem_alignment) {
+ log_dbg(cd, "block size or alignment error.");
+ r = -EINVAL;
+ goto err;
+ }
+
+ w->dev_fd = device_open(cd, device, open_flags);
+ if (w->dev_fd < 0) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ if (crypt_is_cipher_null(_cipher)) {
+ log_dbg(cd, "Requested cipher_null, switching to noop wrapper.");
+ w->type = NONE;
+ *cw = w;
+ return 0;
+ }
+
+ if (!vk) {
+ log_dbg(cd, "no key passed.");
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = crypt_storage_backend_init(cd, w, iv_start, sector_size, _cipher, mode, vk, flags);
+ if (!r) {
+ *cw = w;
+ return 0;
+ }
+
+ log_dbg(cd, "Failed to initialize userspace block cipher.");
+
+ if ((r != -ENOTSUP && r != -ENOENT) || (flags & DISABLE_DMCRYPT))
+ goto err;
+
+ r = crypt_storage_dmcrypt_init(cd, w, device, data_offset >> SECTOR_SHIFT, iv_start,
+ sector_size, cipher, vk, open_flags);
+ if (r) {
+ log_dbg(cd, "Dm-crypt backend failed to initialize.");
+ goto err;
+ }
+ *cw = w;
+ return 0;
+err:
+ crypt_storage_wrapper_destroy(w);
+ /* wrapper destroy */
+ return r;
+}
+
+/* offset is relative to sector_start */
+ssize_t crypt_storage_wrapper_read(struct crypt_storage_wrapper *cw,
+ off_t offset, void *buffer, size_t buffer_length)
+{
+ return read_lseek_blockwise(cw->dev_fd,
+ cw->block_size,
+ cw->mem_alignment,
+ buffer,
+ buffer_length,
+ cw->data_offset + offset);
+}
+
+ssize_t crypt_storage_wrapper_read_decrypt(struct crypt_storage_wrapper *cw,
+ off_t offset, void *buffer, size_t buffer_length)
+{
+ int r;
+ ssize_t read;
+
+ if (cw->type == DMCRYPT)
+ return read_lseek_blockwise(cw->u.dm.dmcrypt_fd,
+ cw->block_size,
+ cw->mem_alignment,
+ buffer,
+ buffer_length,
+ offset);
+
+ read = read_lseek_blockwise(cw->dev_fd,
+ cw->block_size,
+ cw->mem_alignment,
+ buffer,
+ buffer_length,
+ cw->data_offset + offset);
+ if (cw->type == NONE || read < 0)
+ return read;
+
+ r = crypt_storage_decrypt(cw->u.cb.s,
+ cw->u.cb.iv_start + (offset >> SECTOR_SHIFT),
+ read,
+ buffer);
+ if (r)
+ return -EINVAL;
+
+ return read;
+}
+
+ssize_t crypt_storage_wrapper_decrypt(struct crypt_storage_wrapper *cw,
+ off_t offset, void *buffer, size_t buffer_length)
+{
+ int r;
+ ssize_t read;
+
+ if (cw->type == NONE)
+ return 0;
+
+ if (cw->type == DMCRYPT) {
+ /* there's nothing we can do, just read/decrypt via dm-crypt */
+ read = crypt_storage_wrapper_read_decrypt(cw, offset, buffer, buffer_length);
+ if (read < 0 || (size_t)read != buffer_length)
+ return -EINVAL;
+ return 0;
+ }
+
+ r = crypt_storage_decrypt(cw->u.cb.s,
+ cw->u.cb.iv_start + (offset >> SECTOR_SHIFT),
+ buffer_length,
+ buffer);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+ssize_t crypt_storage_wrapper_write(struct crypt_storage_wrapper *cw,
+ off_t offset, void *buffer, size_t buffer_length)
+{
+ return write_lseek_blockwise(cw->dev_fd,
+ cw->block_size,
+ cw->mem_alignment,
+ buffer,
+ buffer_length,
+ cw->data_offset + offset);
+}
+
+ssize_t crypt_storage_wrapper_encrypt_write(struct crypt_storage_wrapper *cw,
+ off_t offset, void *buffer, size_t buffer_length)
+{
+ if (cw->type == DMCRYPT)
+ return write_lseek_blockwise(cw->u.dm.dmcrypt_fd,
+ cw->block_size,
+ cw->mem_alignment,
+ buffer,
+ buffer_length,
+ offset);
+
+ if (cw->type == USPACE &&
+ crypt_storage_encrypt(cw->u.cb.s,
+ cw->u.cb.iv_start + (offset >> SECTOR_SHIFT),
+ buffer_length, buffer))
+ return -EINVAL;
+
+ return write_lseek_blockwise(cw->dev_fd,
+ cw->block_size,
+ cw->mem_alignment,
+ buffer,
+ buffer_length,
+ cw->data_offset + offset);
+}
+
+ssize_t crypt_storage_wrapper_encrypt(struct crypt_storage_wrapper *cw,
+ off_t offset, void *buffer, size_t buffer_length)
+{
+ if (cw->type == NONE)
+ return 0;
+
+ if (cw->type == DMCRYPT)
+ return -ENOTSUP;
+
+ if (crypt_storage_encrypt(cw->u.cb.s,
+ cw->u.cb.iv_start + (offset >> SECTOR_SHIFT),
+ buffer_length,
+ buffer))
+ return -EINVAL;
+
+ return 0;
+}
+
+void crypt_storage_wrapper_destroy(struct crypt_storage_wrapper *cw)
+{
+ if (!cw)
+ return;
+
+ if (cw->type == USPACE)
+ crypt_storage_destroy(cw->u.cb.s);
+ if (cw->type == DMCRYPT) {
+ close(cw->u.dm.dmcrypt_fd);
+ dm_remove_device(NULL, cw->u.dm.name, CRYPT_DEACTIVATE_FORCE);
+ }
+
+ free(cw);
+}
+
+int crypt_storage_wrapper_datasync(const struct crypt_storage_wrapper *cw)
+{
+ if (!cw)
+ return -EINVAL;
+ if (cw->type == DMCRYPT)
+ return fdatasync(cw->u.dm.dmcrypt_fd);
+ else
+ return fdatasync(cw->dev_fd);
+}
+
+crypt_storage_wrapper_type crypt_storage_wrapper_get_type(const struct crypt_storage_wrapper *cw)
+{
+ return cw ? cw->type : NONE;
+}
diff --git a/lib/utils_storage_wrappers.h b/lib/utils_storage_wrappers.h
new file mode 100644
index 0000000..f7781e8
--- /dev/null
+++ b/lib/utils_storage_wrappers.h
@@ -0,0 +1,75 @@
+/*
+ * Generic wrapper for storage functions
+ * (experimental only)
+ *
+ * Copyright (C) 2018-2023 Ondrej Kozina
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _UTILS_STORAGE_WRAPPERS_H
+#define _UTILS_STORAGE_WRAPPERS_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+struct crypt_storage_wrapper;
+struct device;
+struct volume_key;
+struct crypt_device;
+
+#define DISABLE_USPACE (1 << 0)
+#define DISABLE_KCAPI (1 << 1)
+#define DISABLE_DMCRYPT (1 << 2)
+#define OPEN_READONLY (1 << 3)
+#define LARGE_IV (1 << 4)
+
+typedef enum {
+ NONE = 0,
+ USPACE,
+ DMCRYPT
+} crypt_storage_wrapper_type;
+
+int crypt_storage_wrapper_init(struct crypt_device *cd,
+ struct crypt_storage_wrapper **cw,
+ struct device *device,
+ uint64_t data_offset,
+ uint64_t iv_start,
+ int sector_size,
+ const char *cipher,
+ struct volume_key *vk,
+ uint32_t flags);
+
+void crypt_storage_wrapper_destroy(struct crypt_storage_wrapper *cw);
+
+/* !!! when doing 'read' or 'write' all offset values are RELATIVE to data_offset !!! */
+ssize_t crypt_storage_wrapper_read(struct crypt_storage_wrapper *cw,
+ off_t offset, void *buffer, size_t buffer_length);
+ssize_t crypt_storage_wrapper_read_decrypt(struct crypt_storage_wrapper *cw,
+ off_t offset, void *buffer, size_t buffer_length);
+ssize_t crypt_storage_wrapper_decrypt(struct crypt_storage_wrapper *cw,
+ off_t offset, void *buffer, size_t buffer_length);
+
+ssize_t crypt_storage_wrapper_write(struct crypt_storage_wrapper *cw,
+ off_t offset, void *buffer, size_t buffer_length);
+ssize_t crypt_storage_wrapper_encrypt_write(struct crypt_storage_wrapper *cw,
+ off_t offset, void *buffer, size_t buffer_length);
+ssize_t crypt_storage_wrapper_encrypt(struct crypt_storage_wrapper *cw,
+ off_t offset, void *buffer, size_t buffer_length);
+
+int crypt_storage_wrapper_datasync(const struct crypt_storage_wrapper *cw);
+
+crypt_storage_wrapper_type crypt_storage_wrapper_get_type(const struct crypt_storage_wrapper *cw);
+#endif
diff --git a/lib/utils_wipe.c b/lib/utils_wipe.c
new file mode 100644
index 0000000..1df46c1
--- /dev/null
+++ b/lib/utils_wipe.c
@@ -0,0 +1,311 @@
+/*
+ * utils_wipe - wipe a device
+ *
+ * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2009-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <linux/fs.h>
+#include "internal.h"
+
+/* block device zeroout ioctls, introduced in Linux kernel 3.7 */
+#ifndef BLKZEROOUT
+#define BLKZEROOUT _IO(0x12,127)
+#endif
+
+static int wipe_zeroout(struct crypt_device *cd, int devfd,
+ uint64_t offset, uint64_t length)
+{
+ static bool zeroout_available = true;
+ uint64_t range[2] = { offset, length };
+ int r;
+
+ if (!zeroout_available)
+ return -ENOTSUP;
+
+ r = ioctl(devfd, BLKZEROOUT, &range);
+ if (r < 0) {
+ log_dbg(cd, "BLKZEROOUT ioctl not available (error %i), disabling.", r);
+ zeroout_available = false;
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+/*
+ * Wipe using Peter Gutmann method described in
+ * https://www.cs.auckland.ac.nz/~pgut001/pubs/secure_del.html
+ * Note: used only for rotational device (and even there it is not needed today...)
+ */
+static void wipeSpecial(char *buffer, size_t buffer_size, unsigned int turn)
+{
+ unsigned int i;
+
+ unsigned char write_modes[][3] = {
+ {"\x55\x55\x55"}, {"\xaa\xaa\xaa"}, {"\x92\x49\x24"},
+ {"\x49\x24\x92"}, {"\x24\x92\x49"}, {"\x00\x00\x00"},
+ {"\x11\x11\x11"}, {"\x22\x22\x22"}, {"\x33\x33\x33"},
+ {"\x44\x44\x44"}, {"\x55\x55\x55"}, {"\x66\x66\x66"},
+ {"\x77\x77\x77"}, {"\x88\x88\x88"}, {"\x99\x99\x99"},
+ {"\xaa\xaa\xaa"}, {"\xbb\xbb\xbb"}, {"\xcc\xcc\xcc"},
+ {"\xdd\xdd\xdd"}, {"\xee\xee\xee"}, {"\xff\xff\xff"},
+ {"\x92\x49\x24"}, {"\x49\x24\x92"}, {"\x24\x92\x49"},
+ {"\x6d\xb6\xdb"}, {"\xb6\xdb\x6d"}, {"\xdb\x6d\xb6"}
+ };
+
+ for (i = 0; i < buffer_size / 3; ++i) {
+ memcpy(buffer, write_modes[turn], 3);
+ buffer += 3;
+ }
+}
+
+static int crypt_wipe_special(struct crypt_device *cd, int fd, size_t bsize,
+ size_t alignment, char *buffer,
+ uint64_t offset, size_t size)
+{
+ int r = 0;
+ unsigned int i;
+ ssize_t written;
+
+ for (i = 0; i < 39; ++i) {
+ if (i < 5) {
+ r = crypt_random_get(cd, buffer, size, CRYPT_RND_NORMAL);
+ } else if (i >= 5 && i < 32) {
+ wipeSpecial(buffer, size, i - 5);
+ r = 0;
+ } else if (i >= 32 && i < 38) {
+ r = crypt_random_get(cd, buffer, size, CRYPT_RND_NORMAL);
+ } else if (i >= 38 && i < 39) {
+ memset(buffer, 0xFF, size);
+ r = 0;
+ }
+ if (r < 0)
+ return -EIO;
+
+ written = write_lseek_blockwise(fd, bsize, alignment,
+ buffer, size, offset);
+ if (written < 0 || written != (ssize_t)size)
+ return -EIO;
+ }
+
+ /* Rewrite it finally with random */
+ if (crypt_random_get(cd, buffer, size, CRYPT_RND_NORMAL) < 0)
+ return -EIO;
+
+ written = write_lseek_blockwise(fd, bsize, alignment, buffer, size, offset);
+ if (written < 0 || written != (ssize_t)size)
+ return -EIO;
+
+ return 0;
+}
+
+static int wipe_block(struct crypt_device *cd, int devfd, crypt_wipe_pattern pattern,
+ char *sf, size_t device_block_size, size_t alignment,
+ size_t wipe_block_size, uint64_t offset, bool *need_block_init,
+ bool blockdev)
+{
+ int r;
+
+ if (pattern == CRYPT_WIPE_SPECIAL)
+ return crypt_wipe_special(cd, devfd, device_block_size, alignment,
+ sf, offset, wipe_block_size);
+
+ if (*need_block_init) {
+ if (pattern == CRYPT_WIPE_ZERO) {
+ memset(sf, 0, wipe_block_size);
+ *need_block_init = false;
+ r = 0;
+ } else if (pattern == CRYPT_WIPE_RANDOM ||
+ pattern == CRYPT_WIPE_ENCRYPTED_ZERO) {
+ r = crypt_random_get(cd, sf, wipe_block_size,
+ CRYPT_RND_NORMAL) ? -EIO : 0;
+ *need_block_init = true;
+ } else
+ r = -EINVAL;
+
+ if (r)
+ return r;
+ }
+
+ if (blockdev && pattern == CRYPT_WIPE_ZERO &&
+ !wipe_zeroout(cd, devfd, offset, wipe_block_size)) {
+ /* zeroout ioctl does not move offset */
+ if (lseek(devfd, offset + wipe_block_size, SEEK_SET) < 0) {
+ log_err(cd, _("Cannot seek to device offset."));
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ if (write_blockwise(devfd, device_block_size, alignment, sf,
+ wipe_block_size) == (ssize_t)wipe_block_size)
+ return 0;
+
+ return -EIO;
+}
+
+int crypt_wipe_device(struct crypt_device *cd,
+ struct device *device,
+ crypt_wipe_pattern pattern,
+ uint64_t offset,
+ uint64_t length,
+ size_t wipe_block_size,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
+ void *usrptr)
+{
+ int r, devfd;
+ struct stat st;
+ size_t bsize, alignment;
+ char *sf = NULL;
+ uint64_t dev_size;
+ bool need_block_init = true;
+
+ /* Note: LUKS1 calls it with wipe_block not aligned to multiple of bsize */
+ bsize = device_block_size(cd, device);
+ alignment = device_alignment(device);
+ if (!bsize || !alignment || !wipe_block_size)
+ return -EINVAL;
+
+ /* if wipe_block_size < bsize, then a wipe is highly ineffective */
+
+ /* Everything must be aligned to SECTOR_SIZE */
+ if (MISALIGNED_512(offset) || MISALIGNED_512(length) || MISALIGNED_512(wipe_block_size))
+ return -EINVAL;
+
+ if (device_is_locked(device))
+ devfd = device_open_locked(cd, device, O_RDWR);
+ else
+ devfd = device_open(cd, device, O_RDWR);
+ if (devfd < 0)
+ return errno ? -errno : -EINVAL;
+
+ if (fstat(devfd, &st) < 0) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (length)
+ dev_size = offset + length;
+ else {
+ r = device_size(device, &dev_size);
+ if (r)
+ goto out;
+
+ if (dev_size <= offset) {
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ r = posix_memalign((void **)&sf, alignment, wipe_block_size);
+ if (r)
+ goto out;
+
+ if (lseek(devfd, offset, SEEK_SET) < 0) {
+ log_err(cd, _("Cannot seek to device offset."));
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (progress && progress(dev_size, offset, usrptr)) {
+ r = -EINVAL; /* No change yet, treat this as a parameter error */
+ goto out;
+ }
+
+ if (pattern == CRYPT_WIPE_SPECIAL && !device_is_rotational(device)) {
+ log_dbg(cd, "Non-rotational device, using random data wipe mode.");
+ pattern = CRYPT_WIPE_RANDOM;
+ }
+
+ while (offset < dev_size) {
+ if ((offset + wipe_block_size) > dev_size)
+ wipe_block_size = dev_size - offset;
+
+ r = wipe_block(cd, devfd, pattern, sf, bsize, alignment,
+ wipe_block_size, offset, &need_block_init, S_ISBLK(st.st_mode));
+ if (r) {
+ log_err(cd,_("Device wipe error, offset %" PRIu64 "."), offset);
+ break;
+ }
+
+ offset += wipe_block_size;
+
+ if (progress && progress(dev_size, offset, usrptr)) {
+ r = -EINTR;
+ break;
+ }
+ }
+
+ device_sync(cd, device);
+out:
+ free(sf);
+ return r;
+}
+
+int crypt_wipe(struct crypt_device *cd,
+ const char *dev_path,
+ crypt_wipe_pattern pattern,
+ uint64_t offset,
+ uint64_t length,
+ size_t wipe_block_size,
+ uint32_t flags,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
+ void *usrptr)
+{
+ struct device *device;
+ int r;
+
+ if (!cd)
+ return -EINVAL;
+
+ r = init_crypto(cd);
+ if (r < 0)
+ return r;
+
+ if (!dev_path)
+ device = crypt_data_device(cd);
+ else {
+ r = device_alloc_no_check(&device, dev_path);
+ if (r < 0)
+ return r;
+
+ if (flags & CRYPT_WIPE_NO_DIRECT_IO)
+ device_disable_direct_io(device);
+ }
+ if (!device)
+ return -EINVAL;
+
+ if (!wipe_block_size)
+ wipe_block_size = 1024*1024;
+
+ log_dbg(cd, "Wipe [%u] device %s, offset %" PRIu64 ", length %" PRIu64 ", block %zu.",
+ (unsigned)pattern, device_path(device), offset, length, wipe_block_size);
+
+ r = crypt_wipe_device(cd, device, pattern, offset, length,
+ wipe_block_size, progress, usrptr);
+
+ if (dev_path)
+ device_free(cd, device);
+
+ return r;
+}
diff --git a/lib/verity/rs.h b/lib/verity/rs.h
new file mode 100644
index 0000000..7638924
--- /dev/null
+++ b/lib/verity/rs.h
@@ -0,0 +1,63 @@
+/*
+ * Reed-Solomon codecs, based on libfec
+ *
+ * Copyright (C) 2004 Phil Karn, KA9Q
+ * libcryptsetup modifications
+ * Copyright (C) 2017-2023 Red Hat, Inc. All rights reserved.
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _LIBFEC_RS_H
+#define _LIBFEC_RS_H
+
+/* Special reserved value encoding zero in index form. */
+#define A0 (rs->nn)
+
+#define RS_MIN(a, b) ((a) < (b) ? (a) : (b))
+
+typedef unsigned char data_t;
+
+/* Reed-Solomon codec control block */
+struct rs {
+ int mm; /* Bits per symbol */
+ int nn; /* Symbols per block (= (1<<mm)-1) */
+ data_t *alpha_to;/* log lookup table */
+ data_t *index_of;/* Antilog lookup table */
+ data_t *genpoly; /* Generator polynomial */
+ int nroots; /* Number of generator roots = number of parity symbols */
+ int fcr; /* First consecutive root, index form */
+ int prim; /* Primitive element, index form */
+ int iprim; /* prim-th root of 1, index form */
+ int pad; /* Padding bytes in shortened block */
+};
+
+static inline int modnn(struct rs *rs, int x)
+{
+ while (x >= rs->nn) {
+ x -= rs->nn;
+ x = (x >> rs->mm) + (x & rs->nn);
+ }
+ return x;
+}
+
+struct rs *init_rs_char(int symsize, int gfpoly, int fcr, int prim, int nroots, int pad);
+void free_rs_char(struct rs *rs);
+
+/* General purpose RS codec, 8-bit symbols */
+void encode_rs_char(struct rs *rs, data_t *data, data_t *parity);
+int decode_rs_char(struct rs *rs, data_t *data);
+
+#endif
diff --git a/lib/verity/rs_decode_char.c b/lib/verity/rs_decode_char.c
new file mode 100644
index 0000000..4473202
--- /dev/null
+++ b/lib/verity/rs_decode_char.c
@@ -0,0 +1,201 @@
+/*
+ * Reed-Solomon decoder, based on libfec
+ *
+ * Copyright (C) 2002, Phil Karn, KA9Q
+ * libcryptsetup modifications
+ * Copyright (C) 2017-2023 Red Hat, Inc. All rights reserved.
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+
+#include "rs.h"
+
+#define MAX_NR_BUF 256
+
+int decode_rs_char(struct rs* rs, data_t* data)
+{
+ int deg_lambda, el, deg_omega, syn_error, count;
+ int i, j, r, k;
+ data_t q, tmp, num1, num2, den, discr_r;
+ data_t lambda[MAX_NR_BUF], s[MAX_NR_BUF]; /* Err+Eras Locator poly and syndrome poly */
+ data_t b[MAX_NR_BUF], t[MAX_NR_BUF], omega[MAX_NR_BUF];
+ data_t root[MAX_NR_BUF], reg[MAX_NR_BUF], loc[MAX_NR_BUF];
+
+ if (rs->nroots >= MAX_NR_BUF)
+ return -1;
+
+ memset(s, 0, rs->nroots * sizeof(data_t));
+ memset(b, 0, (rs->nroots + 1) * sizeof(data_t));
+
+ /* form the syndromes; i.e., evaluate data(x) at roots of g(x) */
+ for (i = 0; i < rs->nroots; i++)
+ s[i] = data[0];
+
+ for (j = 1; j < rs->nn - rs->pad; j++) {
+ for (i = 0; i < rs->nroots; i++) {
+ if (s[i] == 0) {
+ s[i] = data[j];
+ } else {
+ s[i] = data[j] ^ rs->alpha_to[modnn(rs, rs->index_of[s[i]] + (rs->fcr + i) * rs->prim)];
+ }
+ }
+ }
+
+ /* Convert syndromes to index form, checking for nonzero condition */
+ syn_error = 0;
+ for (i = 0; i < rs->nroots; i++) {
+ syn_error |= s[i];
+ s[i] = rs->index_of[s[i]];
+ }
+
+ /*
+ * if syndrome is zero, data[] is a codeword and there are no
+ * errors to correct. So return data[] unmodified
+ */
+ if (!syn_error)
+ return 0;
+
+ memset(&lambda[1], 0, rs->nroots * sizeof(lambda[0]));
+ lambda[0] = 1;
+
+ for (i = 0; i < rs->nroots + 1; i++)
+ b[i] = rs->index_of[lambda[i]];
+
+ /*
+ * Begin Berlekamp-Massey algorithm to determine error+erasure
+ * locator polynomial
+ */
+ r = 0;
+ el = 0;
+ while (++r <= rs->nroots) { /* r is the step number */
+ /* Compute discrepancy at the r-th step in poly-form */
+ discr_r = 0;
+ for (i = 0; i < r; i++) {
+ if ((lambda[i] != 0) && (s[r - i - 1] != A0)) {
+ discr_r ^= rs->alpha_to[modnn(rs, rs->index_of[lambda[i]] + s[r - i - 1])];
+ }
+ }
+ discr_r = rs->index_of[discr_r]; /* Index form */
+ if (discr_r == A0) {
+ /* 2 lines below: B(x) <-- x*B(x) */
+ memmove(&b[1], b, rs->nroots * sizeof(b[0]));
+ b[0] = A0;
+ } else {
+ /* 7 lines below: T(x) <-- lambda(x) - discr_r*x*b(x) */
+ t[0] = lambda[0];
+ for (i = 0; i < rs->nroots; i++) {
+ if (b[i] != A0)
+ t[i + 1] = lambda[i + 1] ^ rs->alpha_to[modnn(rs, discr_r + b[i])];
+ else
+ t[i + 1] = lambda[i + 1];
+ }
+ if (2 * el <= r - 1) {
+ el = r - el;
+ /*
+ * 2 lines below: B(x) <-- inv(discr_r) *
+ * lambda(x)
+ */
+ for (i = 0; i <= rs->nroots; i++)
+ b[i] = (lambda[i] == 0) ? A0 : modnn(rs, rs->index_of[lambda[i]] - discr_r + rs->nn);
+ } else {
+ /* 2 lines below: B(x) <-- x*B(x) */
+ memmove(&b[1], b, rs->nroots * sizeof(b[0]));
+ b[0] = A0;
+ }
+ memcpy(lambda, t, (rs->nroots + 1) * sizeof(t[0]));
+ }
+ }
+
+ /* Convert lambda to index form and compute deg(lambda(x)) */
+ deg_lambda = 0;
+ for (i = 0; i < rs->nroots + 1; i++) {
+ lambda[i] = rs->index_of[lambda[i]];
+ if (lambda[i] != A0)
+ deg_lambda = i;
+ }
+ /* Find roots of the error+erasure locator polynomial by Chien search */
+ memcpy(&reg[1], &lambda[1], rs->nroots * sizeof(reg[0]));
+ count = 0; /* Number of roots of lambda(x) */
+ for (i = 1, k = rs->iprim - 1; i <= rs->nn; i++, k = modnn(rs, k + rs->iprim)) {
+ q = 1; /* lambda[0] is always 0 */
+ for (j = deg_lambda; j > 0; j--) {
+ if (reg[j] != A0) {
+ reg[j] = modnn(rs, reg[j] + j);
+ q ^= rs->alpha_to[reg[j]];
+ }
+ }
+ if (q != 0)
+ continue; /* Not a root */
+
+ /* store root (index-form) and error location number */
+ root[count] = i;
+ loc[count] = k;
+ /* If we've already found max possible roots, abort the search to save time */
+ if (++count == deg_lambda)
+ break;
+ }
+
+ /*
+ * deg(lambda) unequal to number of roots => uncorrectable
+ * error detected
+ */
+ if (deg_lambda != count)
+ return -1;
+
+ /*
+ * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
+ * x**rs->nroots). in index form. Also find deg(omega).
+ */
+ deg_omega = deg_lambda - 1;
+ for (i = 0; i <= deg_omega; i++) {
+ tmp = 0;
+ for (j = i; j >= 0; j--) {
+ if ((s[i - j] != A0) && (lambda[j] != A0))
+ tmp ^= rs->alpha_to[modnn(rs, s[i - j] + lambda[j])];
+ }
+ omega[i] = rs->index_of[tmp];
+ }
+
+ /*
+ * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
+ * inv(X(l))**(rs->fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form
+ */
+ for (j = count - 1; j >= 0; j--) {
+ num1 = 0;
+ for (i = deg_omega; i >= 0; i--) {
+ if (omega[i] != A0)
+ num1 ^= rs->alpha_to[modnn(rs, omega[i] + i * root[j])];
+ }
+ num2 = rs->alpha_to[modnn(rs, root[j] * (rs->fcr - 1) + rs->nn)];
+ den = 0;
+
+ /* lambda[i+1] for i even is the formal derivative lambda_pr of lambda[i] */
+ for (i = RS_MIN(deg_lambda, rs->nroots - 1) & ~1; i >= 0; i -= 2) {
+ if (lambda[i + 1] != A0)
+ den ^= rs->alpha_to[modnn(rs, lambda[i + 1] + i * root[j])];
+ }
+
+ /* Apply error to data */
+ if (num1 != 0 && loc[j] >= rs->pad) {
+ data[loc[j] - rs->pad] ^= rs->alpha_to[modnn(rs, rs->index_of[num1] +
+ rs->index_of[num2] + rs->nn - rs->index_of[den])];
+ }
+ }
+
+ return count;
+}
diff --git a/lib/verity/rs_encode_char.c b/lib/verity/rs_encode_char.c
new file mode 100644
index 0000000..55b502a
--- /dev/null
+++ b/lib/verity/rs_encode_char.c
@@ -0,0 +1,173 @@
+/*
+ * Reed-Solomon encoder, based on libfec
+ *
+ * Copyright (C) 2002, Phil Karn, KA9Q
+ * libcryptsetup modifications
+ * Copyright (C) 2017-2023 Red Hat, Inc. All rights reserved.
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+
+#include "rs.h"
+
+/* Initialize a Reed-Solomon codec
+ * symsize = symbol size, bits
+ * gfpoly = Field generator polynomial coefficients
+ * fcr = first root of RS code generator polynomial, index form
+ * prim = primitive element to generate polynomial roots
+ * nroots = RS code generator polynomial degree (number of roots)
+ * pad = padding bytes at front of shortened block
+ */
+struct rs *init_rs_char(int symsize, int gfpoly, int fcr, int prim, int nroots, int pad)
+{
+ struct rs *rs;
+ int i, j, sr, root, iprim;
+
+ /* Check parameter ranges */
+ if (symsize < 0 || symsize > 8 * (int)sizeof(data_t))
+ return NULL;
+ if (fcr < 0 || fcr >= (1<<symsize))
+ return NULL;
+ if (prim <= 0 || prim >= (1<<symsize))
+ return NULL;
+ if (nroots < 0 || nroots >= (1<<symsize))
+ return NULL; /* Can't have more roots than symbol values! */
+
+ if (pad < 0 || pad >= ((1<<symsize) - 1 - nroots))
+ return NULL; /* Too much padding */
+
+ rs = calloc(1, sizeof(struct rs));
+ if (rs == NULL)
+ return NULL;
+
+ rs->mm = symsize;
+ rs->nn = (1<<symsize) - 1;
+ rs->pad = pad;
+
+ rs->alpha_to = malloc(sizeof(data_t) * (rs->nn + 1));
+ if (rs->alpha_to == NULL) {
+ free(rs);
+ return NULL;
+ }
+ rs->index_of = malloc(sizeof(data_t) * (rs->nn + 1));
+ if (rs->index_of == NULL) {
+ free(rs->alpha_to);
+ free(rs);
+ return NULL;
+ }
+ memset(rs->index_of, 0, sizeof(data_t) * (rs->nn + 1));
+
+ /* Generate Galois field lookup tables */
+ rs->index_of[0] = A0; /* log(zero) = -inf */
+ rs->alpha_to[A0] = 0; /* alpha**-inf = 0 */
+ sr = 1;
+ for (i = 0; i < rs->nn; i++) {
+ rs->index_of[sr] = i;
+ rs->alpha_to[i] = sr;
+ sr <<= 1;
+ if(sr & (1<<symsize))
+ sr ^= gfpoly;
+ sr &= rs->nn;
+ }
+ if (sr != 1) {
+ /* field generator polynomial is not primitive! */
+ free(rs->alpha_to);
+ free(rs->index_of);
+ free(rs);
+ return NULL;
+ }
+
+ /* Form RS code generator polynomial from its roots */
+ rs->genpoly = malloc(sizeof(data_t) * (nroots + 1));
+ if (rs->genpoly == NULL) {
+ free(rs->alpha_to);
+ free(rs->index_of);
+ free(rs);
+ return NULL;
+ }
+
+ rs->fcr = fcr;
+ rs->prim = prim;
+ rs->nroots = nroots;
+
+ /* Find prim-th root of 1, used in decoding */
+ for (iprim = 1; (iprim % prim) != 0; iprim += rs->nn)
+ ;
+ rs->iprim = iprim / prim;
+
+ rs->genpoly[0] = 1;
+ for (i = 0, root = fcr * prim; i < nroots; i++, root += prim) {
+ rs->genpoly[i + 1] = 1;
+
+ /* Multiply rs->genpoly[] by @**(root + x) */
+ for (j = i; j > 0; j--){
+ if (rs->genpoly[j] != 0)
+ rs->genpoly[j] = rs->genpoly[j - 1] ^ rs->alpha_to[modnn(rs, rs->index_of[rs->genpoly[j]] + root)];
+ else
+ rs->genpoly[j] = rs->genpoly[j - 1];
+ }
+ /* rs->genpoly[0] can never be zero */
+ rs->genpoly[0] = rs->alpha_to[modnn(rs, rs->index_of[rs->genpoly[0]] + root)];
+ }
+ /* convert rs->genpoly[] to index form for quicker encoding */
+ for (i = 0; i <= nroots; i++)
+ rs->genpoly[i] = rs->index_of[rs->genpoly[i]];
+
+ return rs;
+}
+
+void free_rs_char(struct rs *rs)
+{
+ if (!rs)
+ return;
+
+ free(rs->alpha_to);
+ free(rs->index_of);
+ free(rs->genpoly);
+ free(rs);
+}
+
+void encode_rs_char(struct rs *rs, data_t *data, data_t *parity)
+{
+ int i, j;
+ data_t feedback;
+
+ memset(parity, 0, rs->nroots * sizeof(data_t));
+
+ for (i = 0; i < rs->nn - rs->nroots - rs->pad; i++) {
+ feedback = rs->index_of[data[i] ^ parity[0]];
+ if (feedback != A0) {
+ /* feedback term is non-zero */
+#ifdef UNNORMALIZED
+ /* This line is unnecessary when GENPOLY[NROOTS] is unity, as it must
+ * always be for the polynomials constructed by init_rs() */
+ feedback = modnn(rs, rs->nn - rs->genpoly[rs->nroots] + feedback);
+#endif
+ for (j = 1; j < rs->nroots; j++)
+ parity[j] ^= rs->alpha_to[modnn(rs, feedback + rs->genpoly[rs->nroots - j])];
+ }
+
+ /* Shift */
+ memmove(&parity[0], &parity[1], sizeof(data_t) * (rs->nroots - 1));
+
+ if (feedback != A0)
+ parity[rs->nroots - 1] = rs->alpha_to[modnn(rs, feedback + rs->genpoly[0])];
+ else
+ parity[rs->nroots - 1] = 0;
+ }
+}
diff --git a/lib/verity/verity.c b/lib/verity/verity.c
new file mode 100644
index 0000000..0d7a8f5
--- /dev/null
+++ b/lib/verity/verity.c
@@ -0,0 +1,416 @@
+/*
+ * dm-verity volume handling
+ *
+ * Copyright (C) 2012-2023 Red Hat, Inc. All rights reserved.
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <ctype.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <uuid/uuid.h>
+
+#include "libcryptsetup.h"
+#include "verity.h"
+#include "internal.h"
+
+#define VERITY_SIGNATURE "verity\0\0"
+
+/* https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity#verity-superblock-format */
+struct verity_sb {
+ uint8_t signature[8]; /* "verity\0\0" */
+ uint32_t version; /* superblock version */
+ uint32_t hash_type; /* 0 - Chrome OS, 1 - normal */
+ uint8_t uuid[16]; /* UUID of hash device */
+ uint8_t algorithm[32];/* hash algorithm name */
+ uint32_t data_block_size; /* data block in bytes */
+ uint32_t hash_block_size; /* hash block in bytes */
+ uint64_t data_blocks; /* number of data blocks */
+ uint16_t salt_size; /* salt size */
+ uint8_t _pad1[6];
+ uint8_t salt[256]; /* salt */
+ uint8_t _pad2[168];
+} __attribute__((packed));
+
+/* Read verity superblock from disk */
+int VERITY_read_sb(struct crypt_device *cd,
+ uint64_t sb_offset,
+ char **uuid_string,
+ struct crypt_params_verity *params)
+{
+ struct device *device = crypt_metadata_device(cd);
+ struct verity_sb sb = {};
+ ssize_t hdr_size = sizeof(struct verity_sb);
+ int devfd, sb_version;
+
+ log_dbg(cd, "Reading VERITY header of size %zu on device %s, offset %" PRIu64 ".",
+ sizeof(struct verity_sb), device_path(device), sb_offset);
+
+ if (params->flags & CRYPT_VERITY_NO_HEADER) {
+ log_err(cd, _("Verity device %s does not use on-disk header."),
+ device_path(device));
+ return -EINVAL;
+ }
+
+ if (MISALIGNED_512(sb_offset)) {
+ log_err(cd, _("Unsupported VERITY hash offset."));
+ return -EINVAL;
+ }
+
+ devfd = device_open(cd, device, O_RDONLY);
+ if (devfd < 0) {
+ log_err(cd, _("Cannot open device %s."), device_path(device));
+ return -EINVAL;
+ }
+
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), &sb, hdr_size,
+ sb_offset) < hdr_size)
+ return -EIO;
+
+ if (memcmp(sb.signature, VERITY_SIGNATURE, sizeof(sb.signature))) {
+ log_dbg(cd, "No VERITY signature detected.");
+ return -EINVAL;
+ }
+
+ sb_version = le32_to_cpu(sb.version);
+ if (sb_version != 1) {
+ log_err(cd, _("Unsupported VERITY version %d."), sb_version);
+ return -EINVAL;
+ }
+ params->hash_type = le32_to_cpu(sb.hash_type);
+ if (params->hash_type > VERITY_MAX_HASH_TYPE) {
+ log_err(cd, _("Unsupported VERITY hash type %d."), params->hash_type);
+ return -EINVAL;
+ }
+
+ params->data_block_size = le32_to_cpu(sb.data_block_size);
+ params->hash_block_size = le32_to_cpu(sb.hash_block_size);
+ if (VERITY_BLOCK_SIZE_OK(params->data_block_size) ||
+ VERITY_BLOCK_SIZE_OK(params->hash_block_size)) {
+ log_err(cd, _("Unsupported VERITY block size."));
+ return -EINVAL;
+ }
+ params->data_size = le64_to_cpu(sb.data_blocks);
+
+ /* Update block size to be used for loop devices */
+ device_set_block_size(crypt_metadata_device(cd), params->hash_block_size);
+ device_set_block_size(crypt_data_device(cd), params->data_block_size);
+
+ params->hash_name = strndup((const char*)sb.algorithm, sizeof(sb.algorithm));
+ if (!params->hash_name)
+ return -ENOMEM;
+ if (crypt_hash_size(params->hash_name) <= 0) {
+ log_err(cd, _("Hash algorithm %s not supported."),
+ params->hash_name);
+ free(CONST_CAST(char*)params->hash_name);
+ params->hash_name = NULL;
+ return -EINVAL;
+ }
+
+ params->salt_size = le16_to_cpu(sb.salt_size);
+ if (params->salt_size > sizeof(sb.salt)) {
+ log_err(cd, _("VERITY header corrupted."));
+ free(CONST_CAST(char*)params->hash_name);
+ params->hash_name = NULL;
+ return -EINVAL;
+ }
+ params->salt = malloc(params->salt_size);
+ if (!params->salt) {
+ free(CONST_CAST(char*)params->hash_name);
+ params->hash_name = NULL;
+ return -ENOMEM;
+ }
+ memcpy(CONST_CAST(char*)params->salt, sb.salt, params->salt_size);
+
+ if ((*uuid_string = malloc(40)))
+ uuid_unparse(sb.uuid, *uuid_string);
+
+ params->hash_area_offset = sb_offset;
+ return 0;
+}
+
+static void _to_lower(char *str)
+{
+ for(; *str; str++)
+ if (isupper(*str))
+ *str = tolower(*str);
+}
+
+/* Write verity superblock to disk */
+int VERITY_write_sb(struct crypt_device *cd,
+ uint64_t sb_offset,
+ const char *uuid_string,
+ struct crypt_params_verity *params)
+{
+ struct device *device = crypt_metadata_device(cd);
+ struct verity_sb sb = {};
+ ssize_t hdr_size = sizeof(struct verity_sb);
+ size_t block_size;
+ char *algorithm;
+ uuid_t uuid;
+ int r, devfd;
+
+ log_dbg(cd, "Updating VERITY header of size %zu on device %s, offset %" PRIu64 ".",
+ sizeof(struct verity_sb), device_path(device), sb_offset);
+
+ if (!uuid_string || uuid_parse(uuid_string, uuid) == -1) {
+ log_err(cd, _("Wrong VERITY UUID format provided on device %s."),
+ device_path(device));
+ return -EINVAL;
+ }
+
+ if (params->flags & CRYPT_VERITY_NO_HEADER) {
+ log_err(cd, _("Verity device %s does not use on-disk header."),
+ device_path(device));
+ return -EINVAL;
+ }
+
+ /* Avoid possible increasing of image size - FEC could fail later because of it */
+ block_size = device_block_size(cd, device);
+ if (block_size > params->hash_block_size) {
+ device_disable_direct_io(device);
+ block_size = params->hash_block_size;
+ }
+
+ devfd = device_open(cd, device, O_RDWR);
+ if (devfd < 0) {
+ log_err(cd, _("Cannot open device %s."), device_path(device));
+ return -EINVAL;
+ }
+
+ memcpy(&sb.signature, VERITY_SIGNATURE, sizeof(sb.signature));
+ sb.version = cpu_to_le32(1);
+ sb.hash_type = cpu_to_le32(params->hash_type);
+ sb.data_block_size = cpu_to_le32(params->data_block_size);
+ sb.hash_block_size = cpu_to_le32(params->hash_block_size);
+ sb.salt_size = cpu_to_le16(params->salt_size);
+ sb.data_blocks = cpu_to_le64(params->data_size);
+
+ /* Kernel always use lower-case */
+ algorithm = (char *)sb.algorithm;
+ strncpy(algorithm, params->hash_name, sizeof(sb.algorithm)-1);
+ algorithm[sizeof(sb.algorithm)-1] = '\0';
+ _to_lower(algorithm);
+
+ memcpy(sb.salt, params->salt, params->salt_size);
+ memcpy(sb.uuid, uuid, sizeof(sb.uuid));
+
+ r = write_lseek_blockwise(devfd, block_size, device_alignment(device),
+ (char*)&sb, hdr_size, sb_offset) < hdr_size ? -EIO : 0;
+ if (r)
+ log_err(cd, _("Error during update of verity header on device %s."),
+ device_path(device));
+
+ device_sync(cd, device);
+
+ return r;
+}
+
+/* Calculate hash offset in hash blocks */
+uint64_t VERITY_hash_offset_block(struct crypt_params_verity *params)
+{
+ uint64_t hash_offset = params->hash_area_offset;
+
+ if (params->flags & CRYPT_VERITY_NO_HEADER)
+ return hash_offset / params->hash_block_size;
+
+ hash_offset += sizeof(struct verity_sb);
+ hash_offset += params->hash_block_size - 1;
+
+ return hash_offset / params->hash_block_size;
+}
+
+int VERITY_UUID_generate(char **uuid_string)
+{
+ uuid_t uuid;
+
+ *uuid_string = malloc(40);
+ if (!*uuid_string)
+ return -ENOMEM;
+ uuid_generate(uuid);
+ uuid_unparse(uuid, *uuid_string);
+ return 0;
+}
+
+/* Activate verity device in kernel device-mapper */
+int VERITY_activate(struct crypt_device *cd,
+ const char *name,
+ const char *root_hash,
+ size_t root_hash_size,
+ const char *signature_description,
+ struct device *fec_device,
+ struct crypt_params_verity *verity_hdr,
+ uint32_t activation_flags)
+{
+ uint32_t dmv_flags;
+ unsigned int fec_errors = 0;
+ int r, v;
+ struct crypt_dm_active_device dmd = {
+ .size = verity_hdr->data_size * verity_hdr->data_block_size / 512,
+ .flags = activation_flags,
+ .uuid = crypt_get_uuid(cd),
+ };
+
+ log_dbg(cd, "Trying to activate VERITY device %s using hash %s.",
+ name ?: "[none]", verity_hdr->hash_name);
+
+ if (verity_hdr->flags & CRYPT_VERITY_CHECK_HASH) {
+ if (signature_description) {
+ log_err(cd, _("Root hash signature verification is not supported."));
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Verification of data in userspace required.");
+ r = VERITY_verify(cd, verity_hdr, root_hash, root_hash_size);
+
+ if ((r == -EPERM || r == -EFAULT) && fec_device) {
+ v = r;
+ log_dbg(cd, "Verification failed, trying to repair with FEC device.");
+ r = VERITY_FEC_process(cd, verity_hdr, fec_device, 1, &fec_errors);
+ if (r < 0)
+ log_err(cd, _("Errors cannot be repaired with FEC device."));
+ else if (fec_errors) {
+ log_err(cd, _("Found %u repairable errors with FEC device."),
+ fec_errors);
+ /* If root hash failed, we cannot be sure it was properly repaired */
+ }
+ if (v == -EFAULT)
+ r = -EPERM;
+ }
+
+ if (r < 0)
+ return r;
+ }
+
+ if (!name)
+ return 0;
+
+ r = device_block_adjust(cd, crypt_metadata_device(cd), DEV_OK,
+ 0, NULL, NULL);
+ if (r)
+ return r;
+
+ r = device_block_adjust(cd, crypt_data_device(cd), DEV_EXCL,
+ 0, &dmd.size, &dmd.flags);
+ if (r)
+ return r;
+
+ if (fec_device) {
+ r = device_block_adjust(cd, fec_device, DEV_OK,
+ 0, NULL, NULL);
+ if (r)
+ return r;
+ }
+
+ r = dm_verity_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
+ crypt_metadata_device(cd), fec_device, root_hash,
+ root_hash_size, signature_description,
+ VERITY_hash_offset_block(verity_hdr),
+ VERITY_FEC_blocks(cd, fec_device, verity_hdr), verity_hdr);
+
+ if (r)
+ return r;
+
+ r = dm_create_device(cd, name, CRYPT_VERITY, &dmd);
+ if (r < 0 && (dm_flags(cd, DM_VERITY, &dmv_flags) || !(dmv_flags & DM_VERITY_SUPPORTED))) {
+ log_err(cd, _("Kernel does not support dm-verity mapping."));
+ r = -ENOTSUP;
+ }
+ if (r < 0 && signature_description && !(dmv_flags & DM_VERITY_SIGNATURE_SUPPORTED)) {
+ log_err(cd, _("Kernel does not support dm-verity signature option."));
+ r = -ENOTSUP;
+ }
+ if (r < 0)
+ goto out;
+
+ r = dm_status_verity_ok(cd, name);
+ if (r < 0)
+ goto out;
+
+ if (!r)
+ log_err(cd, _("Verity device detected corruption after activation."));
+
+ r = 0;
+out:
+ dm_targets_free(cd, &dmd);
+ return r;
+}
+
+int VERITY_dump(struct crypt_device *cd,
+ struct crypt_params_verity *verity_hdr,
+ const char *root_hash,
+ unsigned int root_hash_size,
+ struct device *fec_device)
+{
+ uint64_t hash_blocks, verity_blocks, fec_blocks = 0, rs_blocks = 0;
+ bool fec_on_hash_device = false;
+
+ hash_blocks = VERITY_hash_blocks(cd, verity_hdr);
+ verity_blocks = VERITY_hash_offset_block(verity_hdr) + hash_blocks;
+
+ if (fec_device && verity_hdr->fec_roots) {
+ fec_blocks = VERITY_FEC_blocks(cd, fec_device, verity_hdr);
+ rs_blocks = VERITY_FEC_RS_blocks(fec_blocks, verity_hdr->fec_roots);
+ fec_on_hash_device = device_is_identical(crypt_metadata_device(cd), fec_device) > 0;
+ /*
+ * No way to access fec_area_offset directly.
+ * Assume FEC area starts directly after hash blocks.
+ */
+ if (fec_on_hash_device)
+ verity_blocks += rs_blocks;
+ }
+
+ log_std(cd, "VERITY header information for %s\n", device_path(crypt_metadata_device(cd)));
+ log_std(cd, "UUID: \t%s\n", crypt_get_uuid(cd) ?: "");
+ log_std(cd, "Hash type: \t%u\n", verity_hdr->hash_type);
+ log_std(cd, "Data blocks: \t%" PRIu64 "\n", verity_hdr->data_size);
+ log_std(cd, "Data block size: \t%u\n", verity_hdr->data_block_size);
+ log_std(cd, "Hash blocks: \t%" PRIu64 "\n", hash_blocks);
+ log_std(cd, "Hash block size: \t%u\n", verity_hdr->hash_block_size);
+ log_std(cd, "Hash algorithm: \t%s\n", verity_hdr->hash_name);
+ if (fec_device && fec_blocks) {
+ log_std(cd, "FEC RS roots: \t%" PRIu32 "\n", verity_hdr->fec_roots);
+ log_std(cd, "FEC blocks: \t%" PRIu64 "\n", rs_blocks);
+ }
+
+ log_std(cd, "Salt: \t");
+ if (verity_hdr->salt_size)
+ crypt_log_hex(cd, verity_hdr->salt, verity_hdr->salt_size, "", 0, NULL);
+ else
+ log_std(cd, "-");
+ log_std(cd, "\n");
+
+ if (root_hash) {
+ log_std(cd, "Root hash: \t");
+ crypt_log_hex(cd, root_hash, root_hash_size, "", 0, NULL);
+ log_std(cd, "\n");
+ }
+
+ /* As dump can take only hash device, we have no idea about offsets here. */
+ if (verity_hdr->hash_area_offset == 0)
+ log_std(cd, "Hash device size: \t%" PRIu64 " [bytes]\n", verity_blocks * verity_hdr->hash_block_size);
+
+ if (fec_device && verity_hdr->fec_area_offset == 0 && fec_blocks && !fec_on_hash_device)
+ log_std(cd, "FEC device size: \t%" PRIu64 " [bytes]\n", rs_blocks * verity_hdr->data_block_size);
+
+ return 0;
+}
diff --git a/lib/verity/verity.h b/lib/verity/verity.h
new file mode 100644
index 0000000..afc411e
--- /dev/null
+++ b/lib/verity/verity.h
@@ -0,0 +1,87 @@
+/*
+ * dm-verity volume handling
+ *
+ * Copyright (C) 2012-2023 Red Hat, Inc. All rights reserved.
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _VERITY_H
+#define _VERITY_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define VERITY_MAX_HASH_TYPE 1
+#define VERITY_BLOCK_SIZE_OK(x) ((x) % 512 || (x) < 512 || \
+ (x) > (512 * 1024) || (x) & ((x)-1))
+
+struct crypt_device;
+struct crypt_params_verity;
+struct device;
+
+int VERITY_read_sb(struct crypt_device *cd,
+ uint64_t sb_offset,
+ char **uuid,
+ struct crypt_params_verity *params);
+
+int VERITY_write_sb(struct crypt_device *cd,
+ uint64_t sb_offset,
+ const char *uuid_string,
+ struct crypt_params_verity *params);
+
+int VERITY_activate(struct crypt_device *cd,
+ const char *name,
+ const char *root_hash,
+ size_t root_hash_size,
+ const char *signature_description,
+ struct device *fec_device,
+ struct crypt_params_verity *verity_hdr,
+ uint32_t activation_flags);
+
+int VERITY_verify(struct crypt_device *cd,
+ struct crypt_params_verity *verity_hdr,
+ const char *root_hash,
+ size_t root_hash_size);
+
+int VERITY_create(struct crypt_device *cd,
+ struct crypt_params_verity *verity_hdr,
+ const char *root_hash,
+ size_t root_hash_size);
+
+int VERITY_FEC_process(struct crypt_device *cd,
+ struct crypt_params_verity *params,
+ struct device *fec_device,
+ int check_fec,
+ unsigned int *errors);
+
+uint64_t VERITY_hash_offset_block(struct crypt_params_verity *params);
+
+uint64_t VERITY_hash_blocks(struct crypt_device *cd, struct crypt_params_verity *params);
+
+uint64_t VERITY_FEC_blocks(struct crypt_device *cd,
+ struct device *fec_device,
+ struct crypt_params_verity *params);
+uint64_t VERITY_FEC_RS_blocks(uint64_t blocks, uint32_t roots);
+
+int VERITY_UUID_generate(char **uuid_string);
+
+int VERITY_dump(struct crypt_device *cd,
+ struct crypt_params_verity *verity_hdr,
+ const char *root_hash,
+ unsigned int root_hash_size,
+ struct device *fec_device);
+
+#endif
diff --git a/lib/verity/verity_fec.c b/lib/verity/verity_fec.c
new file mode 100644
index 0000000..2dbf59e
--- /dev/null
+++ b/lib/verity/verity_fec.c
@@ -0,0 +1,336 @@
+/*
+ * dm-verity Forward Error Correction (FEC) support
+ *
+ * Copyright (C) 2015 Google, Inc. All rights reserved.
+ * Copyright (C) 2017-2023 Red Hat, Inc. All rights reserved.
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+
+#include "verity.h"
+#include "internal.h"
+#include "rs.h"
+
+/* ecc parameters */
+#define FEC_RSM 255
+#define FEC_MIN_RSN 231
+#define FEC_MAX_RSN 253
+
+#define FEC_INPUT_DEVICES 2
+
+/* parameters to init_rs_char */
+#define FEC_PARAMS(roots) \
+ 8, /* symbol size in bits */ \
+ 0x11d, /* field generator polynomial coefficients */ \
+ 0, /* first root of the generator */ \
+ 1, /* primitive element to generate polynomial roots */ \
+ (roots), /* polynomial degree (number of roots) */ \
+ 0 /* padding bytes at the front of shortened block */
+
+struct fec_input_device {
+ struct device *device;
+ int fd;
+ uint64_t start;
+ uint64_t count;
+};
+
+struct fec_context {
+ uint32_t rsn;
+ uint32_t roots;
+ uint64_t size;
+ uint64_t blocks;
+ uint64_t rounds;
+ uint32_t block_size;
+ struct fec_input_device *inputs;
+ size_t ninputs;
+};
+
+/* computes ceil(x / y) */
+static inline uint64_t FEC_div_round_up(uint64_t x, uint64_t y)
+{
+ return (x / y) + (x % y > 0 ? 1 : 0);
+}
+
+/* returns a physical offset for the given RS offset */
+static inline uint64_t FEC_interleave(struct fec_context *ctx, uint64_t offset)
+{
+ return (offset / ctx->rsn) +
+ (offset % ctx->rsn) * ctx->rounds * ctx->block_size;
+}
+
+/* returns data for a byte at the specified RS offset */
+static int FEC_read_interleaved(struct fec_context *ctx, uint64_t i,
+ void *output, size_t count)
+{
+ size_t n;
+ uint64_t offset = FEC_interleave(ctx, i);
+
+ /* offsets outside input area are assumed to contain zeros */
+ if (offset >= ctx->size) {
+ memset(output, 0, count);
+ return 0;
+ }
+
+ /* find the correct input device and read from it */
+ for (n = 0; n < ctx->ninputs; ++n) {
+ if (offset >= ctx->inputs[n].count) {
+ offset -= ctx->inputs[n].count;
+ continue;
+ }
+
+ /* FIXME: read_lseek_blockwise candidate */
+ if (lseek(ctx->inputs[n].fd, ctx->inputs[n].start + offset, SEEK_SET) < 0)
+ return -1;
+ return (read_buffer(ctx->inputs[n].fd, output, count) == (ssize_t)count) ? 0 : -1;
+ }
+
+ /* should never be reached */
+ return -1;
+}
+
+/* encodes/decode inputs to/from fd */
+static int FEC_process_inputs(struct crypt_device *cd,
+ struct crypt_params_verity *params,
+ struct fec_input_device *inputs,
+ size_t ninputs, int fd,
+ int decode, unsigned int *errors)
+{
+ int r = 0;
+ unsigned int i;
+ struct fec_context ctx;
+ uint32_t b;
+ uint64_t n;
+ uint8_t rs_block[FEC_RSM];
+ uint8_t *buf = NULL;
+ void *rs;
+
+ /* initialize parameters */
+ ctx.roots = params->fec_roots;
+ ctx.rsn = FEC_RSM - ctx.roots;
+ ctx.block_size = params->data_block_size;
+ ctx.inputs = inputs;
+ ctx.ninputs = ninputs;
+
+ rs = init_rs_char(FEC_PARAMS(ctx.roots));
+ if (!rs) {
+ log_err(cd, _("Failed to allocate RS context."));
+ return -ENOMEM;
+ }
+
+ /* calculate the total area covered by error correction codes */
+ ctx.size = 0;
+ for (n = 0; n < ctx.ninputs; ++n) {
+ log_dbg(cd, "FEC input %s, offset %" PRIu64 " [bytes], length %" PRIu64 " [bytes]",
+ device_path(ctx.inputs[n].device), ctx.inputs[n].start, ctx.inputs[n].count);
+ ctx.size += ctx.inputs[n].count;
+ }
+
+ /* each byte in a data block is covered by a different code */
+ ctx.blocks = FEC_div_round_up(ctx.size, ctx.block_size);
+ ctx.rounds = FEC_div_round_up(ctx.blocks, ctx.rsn);
+
+ buf = malloc((size_t)ctx.block_size * ctx.rsn);
+ if (!buf) {
+ log_err(cd, _("Failed to allocate buffer."));
+ r = -ENOMEM;
+ goto out;
+ }
+
+ /* encode/decode input */
+ for (n = 0; n < ctx.rounds; ++n) {
+ for (i = 0; i < ctx.rsn; ++i) {
+ if (FEC_read_interleaved(&ctx, n * ctx.rsn * ctx.block_size + i,
+ &buf[i * ctx.block_size], ctx.block_size)) {
+ log_err(cd, _("Failed to read RS block %" PRIu64 " byte %d."), n, i);
+ r = -EIO;
+ goto out;
+ }
+ }
+
+ for (b = 0; b < ctx.block_size; ++b) {
+ for (i = 0; i < ctx.rsn; ++i)
+ rs_block[i] = buf[i * ctx.block_size + b];
+
+ /* decoding from parity device */
+ if (decode) {
+ if (read_buffer(fd, &rs_block[ctx.rsn], ctx.roots) < 0) {
+ log_err(cd, _("Failed to read parity for RS block %" PRIu64 "."), n);
+ r = -EIO;
+ goto out;
+ }
+
+ /* coverity[tainted_data] */
+ r = decode_rs_char(rs, rs_block);
+ if (r < 0) {
+ log_err(cd, _("Failed to repair parity for block %" PRIu64 "."), n);
+ r = -EPERM;
+ goto out;
+ }
+ /* return number of detected errors */
+ if (errors)
+ *errors += r;
+ r = 0;
+ } else {
+ /* encoding and writing parity data to fec device */
+ encode_rs_char(rs, rs_block, &rs_block[ctx.rsn]);
+ if (write_buffer(fd, &rs_block[ctx.rsn], ctx.roots) < 0) {
+ log_err(cd, _("Failed to write parity for RS block %" PRIu64 "."), n);
+ r = -EIO;
+ goto out;
+ }
+ }
+ }
+ }
+out:
+ free_rs_char(rs);
+ free(buf);
+ return r;
+}
+
+static int VERITY_FEC_validate(struct crypt_device *cd, struct crypt_params_verity *params)
+{
+ if (params->data_block_size != params->hash_block_size) {
+ log_err(cd, _("Block sizes must match for FEC."));
+ return -EINVAL;
+ }
+
+ if (params->fec_roots > FEC_RSM - FEC_MIN_RSN ||
+ params->fec_roots < FEC_RSM - FEC_MAX_RSN) {
+ log_err(cd, _("Invalid number of parity bytes."));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int VERITY_FEC_process(struct crypt_device *cd,
+ struct crypt_params_verity *params,
+ struct device *fec_device, int check_fec,
+ unsigned int *errors)
+{
+ int r = -EIO, fd = -1;
+ size_t ninputs = FEC_INPUT_DEVICES;
+ struct fec_input_device inputs[FEC_INPUT_DEVICES] = {
+ {
+ .device = crypt_data_device(cd),
+ .fd = -1,
+ .start = 0,
+ .count = params->data_size * params->data_block_size
+ },{
+ .device = crypt_metadata_device(cd),
+ .fd = -1,
+ .start = VERITY_hash_offset_block(params) * params->data_block_size,
+ .count = (VERITY_FEC_blocks(cd, fec_device, params) - params->data_size) * params->data_block_size
+ }
+ };
+
+ /* validate parameters */
+ r = VERITY_FEC_validate(cd, params);
+ if (r < 0)
+ return r;
+
+ if (!inputs[0].count) {
+ log_err(cd, _("Invalid FEC segment length."));
+ return -EINVAL;
+ }
+ if (!inputs[1].count)
+ ninputs--;
+
+ if (check_fec)
+ fd = open(device_path(fec_device), O_RDONLY);
+ else
+ fd = open(device_path(fec_device), O_RDWR);
+
+ if (fd == -1) {
+ log_err(cd, _("Cannot open device %s."), device_path(fec_device));
+ goto out;
+ }
+
+ if (lseek(fd, params->fec_area_offset, SEEK_SET) < 0) {
+ log_dbg(cd, "Cannot seek to requested position in FEC device.");
+ goto out;
+ }
+
+ /* input devices */
+ inputs[0].fd = open(device_path(inputs[0].device), O_RDONLY);
+ if (inputs[0].fd == -1) {
+ log_err(cd, _("Cannot open device %s."), device_path(inputs[0].device));
+ goto out;
+ }
+ inputs[1].fd = open(device_path(inputs[1].device), O_RDONLY);
+ if (inputs[1].fd == -1) {
+ log_err(cd, _("Cannot open device %s."), device_path(inputs[1].device));
+ goto out;
+ }
+
+ r = FEC_process_inputs(cd, params, inputs, ninputs, fd, check_fec, errors);
+out:
+ if (inputs[0].fd != -1)
+ close(inputs[0].fd);
+ if (inputs[1].fd != -1)
+ close(inputs[1].fd);
+ if (fd != -1)
+ close(fd);
+
+ return r;
+}
+
+/* All blocks that are covered by FEC */
+uint64_t VERITY_FEC_blocks(struct crypt_device *cd,
+ struct device *fec_device,
+ struct crypt_params_verity *params)
+{
+ uint64_t blocks = 0;
+
+ if (!fec_device || VERITY_FEC_validate(cd, params) < 0)
+ return 0;
+
+ /*
+ * FEC covers this data:
+ * | protected data | hash area | padding (optional foreign metadata) |
+ *
+ * If hash device is in a separate image, metadata covers the whole rest of the image after hash area.
+ * If hash and FEC device is in the image, metadata ends on the FEC area offset.
+ */
+ if (device_is_identical(crypt_metadata_device(cd), fec_device) > 0) {
+ log_dbg(cd, "FEC and hash device is the same.");
+ blocks = params->fec_area_offset;
+ } else {
+ /* cover the entire hash device starting from hash_offset */
+ if (device_size(crypt_metadata_device(cd), &blocks)) {
+ log_err(cd, _("Failed to determine size for device %s."),
+ device_path(crypt_metadata_device(cd)));
+ return 0;
+ }
+ }
+
+ blocks /= params->data_block_size;
+ if (blocks)
+ blocks -= VERITY_hash_offset_block(params);
+
+ /* Protected data */
+ blocks += params->data_size;
+
+ return blocks;
+}
+
+/* Blocks needed to store FEC data, blocks must be validated/calculated by VERITY_FEC_blocks() */
+uint64_t VERITY_FEC_RS_blocks(uint64_t blocks, uint32_t roots)
+{
+ return FEC_div_round_up(blocks, FEC_RSM - roots) * roots;
+}
diff --git a/lib/verity/verity_hash.c b/lib/verity/verity_hash.c
new file mode 100644
index 0000000..f33b737
--- /dev/null
+++ b/lib/verity/verity_hash.c
@@ -0,0 +1,444 @@
+/*
+ * dm-verity volume handling
+ *
+ * Copyright (C) 2012-2023 Red Hat, Inc. All rights reserved.
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+
+#include "verity.h"
+#include "internal.h"
+
+#define VERITY_MAX_LEVELS 63
+#define VERITY_MAX_DIGEST_SIZE 1024
+
+static unsigned get_bits_up(size_t u)
+{
+ unsigned i = 0;
+ while ((1U << i) < u)
+ i++;
+ return i;
+}
+
+static unsigned get_bits_down(size_t u)
+{
+ unsigned i = 0;
+ while ((u >> i) > 1U)
+ i++;
+ return i;
+}
+
+static int verify_zero(struct crypt_device *cd, FILE *wr, size_t bytes)
+{
+ char *block = NULL;
+ size_t i;
+ int r;
+
+ block = malloc(bytes);
+ if (!block)
+ return -ENOMEM;
+
+ if (fread(block, bytes, 1, wr) != 1) {
+ log_dbg(cd, "EIO while reading spare area.");
+ r = -EIO;
+ goto out;
+ }
+ for (i = 0; i < bytes; i++)
+ if (block[i]) {
+ log_err(cd, _("Spare area is not zeroed at position %" PRIu64 "."),
+ ftello(wr) - bytes);
+ r = -EPERM;
+ goto out;
+ }
+ r = 0;
+out:
+ free(block);
+ return r;
+}
+
+static int verify_hash_block(const char *hash_name, int version,
+ char *hash, size_t hash_size,
+ const char *data, size_t data_size,
+ const char *salt, size_t salt_size)
+{
+ struct crypt_hash *ctx = NULL;
+ int r;
+
+ if (crypt_hash_init(&ctx, hash_name))
+ return -EINVAL;
+
+ if (version == 1 && (r = crypt_hash_write(ctx, salt, salt_size)))
+ goto out;
+
+ if ((r = crypt_hash_write(ctx, data, data_size)))
+ goto out;
+
+ if (version == 0 && (r = crypt_hash_write(ctx, salt, salt_size)))
+ goto out;
+
+ r = crypt_hash_final(ctx, hash, hash_size);
+out:
+ crypt_hash_destroy(ctx);
+ return r;
+}
+
+static int hash_levels(size_t hash_block_size, size_t digest_size,
+ uint64_t data_file_blocks, uint64_t *hash_position, int *levels,
+ uint64_t *hash_level_block, uint64_t *hash_level_size)
+{
+ size_t hash_per_block_bits;
+ uint64_t s, s_shift;
+ int i;
+
+ if (!digest_size)
+ return -EINVAL;
+
+ hash_per_block_bits = get_bits_down(hash_block_size / digest_size);
+ if (!hash_per_block_bits)
+ return -EINVAL;
+
+ *levels = 0;
+ while (hash_per_block_bits * *levels < 64 &&
+ (data_file_blocks - 1) >> (hash_per_block_bits * *levels))
+ (*levels)++;
+
+ if (*levels > VERITY_MAX_LEVELS)
+ return -EINVAL;
+
+ for (i = *levels - 1; i >= 0; i--) {
+ if (hash_level_block)
+ hash_level_block[i] = *hash_position;
+ // verity position of block data_file_blocks at level i
+ s_shift = (i + 1) * hash_per_block_bits;
+ if (s_shift > 63)
+ return -EINVAL;
+ s = (data_file_blocks + ((uint64_t)1 << s_shift) - 1) >> ((i + 1) * hash_per_block_bits);
+ if (hash_level_size)
+ hash_level_size[i] = s;
+ if ((*hash_position + s) < *hash_position)
+ return -EINVAL;
+ *hash_position += s;
+ }
+
+ return 0;
+}
+
+static int create_or_verify(struct crypt_device *cd, FILE *rd, FILE *wr,
+ uint64_t data_block, size_t data_block_size,
+ uint64_t hash_block, size_t hash_block_size,
+ uint64_t blocks, int version,
+ const char *hash_name, int verify,
+ char *calculated_digest, size_t digest_size,
+ const char *salt, size_t salt_size)
+{
+ char *left_block, *data_buffer;
+ char read_digest[VERITY_MAX_DIGEST_SIZE];
+ size_t hash_per_block = 1 << get_bits_down(hash_block_size / digest_size);
+ size_t digest_size_full = 1 << get_bits_up(digest_size);
+ uint64_t blocks_to_write = (blocks + hash_per_block - 1) / hash_per_block;
+ uint64_t seek_rd, seek_wr;
+ size_t left_bytes;
+ unsigned i;
+ int r;
+
+ if (digest_size > sizeof(read_digest))
+ return -EINVAL;
+
+ if (uint64_mult_overflow(&seek_rd, data_block, data_block_size) ||
+ uint64_mult_overflow(&seek_wr, hash_block, hash_block_size)) {
+ log_err(cd, _("Device offset overflow."));
+ return -EINVAL;
+ }
+
+ if (fseeko(rd, seek_rd, SEEK_SET)) {
+ log_dbg(cd, "Cannot seek to requested position in data device.");
+ return -EIO;
+ }
+
+ if (wr && fseeko(wr, seek_wr, SEEK_SET)) {
+ log_dbg(cd, "Cannot seek to requested position in hash device.");
+ return -EIO;
+ }
+
+ left_block = malloc(hash_block_size);
+ data_buffer = malloc(data_block_size);
+ if (!left_block || !data_buffer) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ memset(left_block, 0, hash_block_size);
+ while (blocks_to_write--) {
+ left_bytes = hash_block_size;
+ for (i = 0; i < hash_per_block; i++) {
+ if (!blocks)
+ break;
+ blocks--;
+ if (fread(data_buffer, data_block_size, 1, rd) != 1) {
+ log_dbg(cd, "Cannot read data device block.");
+ r = -EIO;
+ goto out;
+ }
+
+ if (verify_hash_block(hash_name, version,
+ calculated_digest, digest_size,
+ data_buffer, data_block_size,
+ salt, salt_size)) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!wr)
+ break;
+ if (verify) {
+ if (fread(read_digest, digest_size, 1, wr) != 1) {
+ log_dbg(cd, "Cannot read digest form hash device.");
+ r = -EIO;
+ goto out;
+ }
+ if (crypt_backend_memeq(read_digest, calculated_digest, digest_size)) {
+ log_err(cd, _("Verification failed at position %" PRIu64 "."),
+ ftello(rd) - data_block_size);
+ r = -EPERM;
+ goto out;
+ }
+ } else {
+ if (fwrite(calculated_digest, digest_size, 1, wr) != 1) {
+ log_dbg(cd, "Cannot write digest to hash device.");
+ r = -EIO;
+ goto out;
+ }
+ }
+ if (version == 0) {
+ left_bytes -= digest_size;
+ } else {
+ if (digest_size_full - digest_size) {
+ if (verify) {
+ r = verify_zero(cd, wr, digest_size_full - digest_size);
+ if (r)
+ goto out;
+ } else if (fwrite(left_block, digest_size_full - digest_size, 1, wr) != 1) {
+ log_dbg(cd, "Cannot write spare area to hash device.");
+ r = -EIO;
+ goto out;
+ }
+ }
+ left_bytes -= digest_size_full;
+ }
+ }
+ if (wr && left_bytes) {
+ if (verify) {
+ r = verify_zero(cd , wr, left_bytes);
+ if (r)
+ goto out;
+ } else if (fwrite(left_block, left_bytes, 1, wr) != 1) {
+ log_dbg(cd, "Cannot write remaining spare area to hash device.");
+ r = -EIO;
+ goto out;
+ }
+ }
+ }
+ r = 0;
+out:
+ free(left_block);
+ free(data_buffer);
+ return r;
+}
+
+static int VERITY_create_or_verify_hash(struct crypt_device *cd, bool verify,
+ struct crypt_params_verity *params,
+ char *root_hash, size_t digest_size)
+{
+ char calculated_digest[VERITY_MAX_DIGEST_SIZE];
+ FILE *data_file = NULL;
+ FILE *hash_file = NULL, *hash_file_2;
+ uint64_t hash_level_block[VERITY_MAX_LEVELS];
+ uint64_t hash_level_size[VERITY_MAX_LEVELS];
+ uint64_t data_file_blocks;
+ uint64_t data_device_offset_max = 0, hash_device_offset_max = 0;
+ uint64_t hash_position = VERITY_hash_offset_block(params);
+ uint64_t dev_size;
+ int levels, i, r;
+
+ log_dbg(cd, "Hash %s %s, data device %s, data blocks %" PRIu64
+ ", hash_device %s, offset %" PRIu64 ".",
+ verify ? "verification" : "creation", params->hash_name,
+ device_path(crypt_data_device(cd)), params->data_size,
+ device_path(crypt_metadata_device(cd)), hash_position);
+
+ if (digest_size > sizeof(calculated_digest))
+ return -EINVAL;
+
+ if (!params->data_size) {
+ r = device_size(crypt_data_device(cd), &dev_size);
+ if (r < 0)
+ return r;
+
+ data_file_blocks = dev_size / params->data_block_size;
+ } else
+ data_file_blocks = params->data_size;
+
+ if (uint64_mult_overflow(&data_device_offset_max, params->data_size, params->data_block_size)) {
+ log_err(cd, _("Device offset overflow."));
+ return -EINVAL;
+ }
+ log_dbg(cd, "Data device size required: %" PRIu64 " bytes.", data_device_offset_max);
+
+ if (hash_levels(params->hash_block_size, digest_size, data_file_blocks, &hash_position,
+ &levels, &hash_level_block[0], &hash_level_size[0])) {
+ log_err(cd, _("Hash area overflow."));
+ return -EINVAL;
+ }
+ if (uint64_mult_overflow(&hash_device_offset_max, hash_position, params->hash_block_size)) {
+ log_err(cd, _("Device offset overflow."));
+ return -EINVAL;
+ }
+ log_dbg(cd, "Hash device size required: %" PRIu64 " bytes.",
+ hash_device_offset_max - params->hash_area_offset);
+ log_dbg(cd, "Using %d hash levels.", levels);
+
+ data_file = fopen(device_path(crypt_data_device(cd)), "r");
+ if (!data_file) {
+ log_err(cd, _("Cannot open device %s."),
+ device_path(crypt_data_device(cd))
+ );
+ r = -EIO;
+ goto out;
+ }
+
+ hash_file = fopen(device_path(crypt_metadata_device(cd)), verify ? "r" : "r+");
+ if (!hash_file) {
+ log_err(cd, _("Cannot open device %s."),
+ device_path(crypt_metadata_device(cd)));
+ r = -EIO;
+ goto out;
+ }
+
+ memset(calculated_digest, 0, digest_size);
+
+ for (i = 0; i < levels; i++) {
+ if (!i) {
+ r = create_or_verify(cd, data_file, hash_file,
+ 0, params->data_block_size,
+ hash_level_block[i], params->hash_block_size,
+ data_file_blocks, params->hash_type, params->hash_name, verify,
+ calculated_digest, digest_size, params->salt, params->salt_size);
+ if (r)
+ goto out;
+ } else {
+ hash_file_2 = fopen(device_path(crypt_metadata_device(cd)), "r");
+ if (!hash_file_2) {
+ log_err(cd, _("Cannot open device %s."),
+ device_path(crypt_metadata_device(cd)));
+ r = -EIO;
+ goto out;
+ }
+ r = create_or_verify(cd, hash_file_2, hash_file,
+ hash_level_block[i - 1], params->hash_block_size,
+ hash_level_block[i], params->hash_block_size,
+ hash_level_size[i - 1], params->hash_type, params->hash_name, verify,
+ calculated_digest, digest_size, params->salt, params->salt_size);
+ fclose(hash_file_2);
+ if (r)
+ goto out;
+ }
+ }
+
+ if (levels)
+ r = create_or_verify(cd, hash_file, NULL,
+ hash_level_block[levels - 1], params->hash_block_size,
+ 0, params->hash_block_size,
+ 1, params->hash_type, params->hash_name, verify,
+ calculated_digest, digest_size, params->salt, params->salt_size);
+ else
+ r = create_or_verify(cd, data_file, NULL,
+ 0, params->data_block_size,
+ 0, params->hash_block_size,
+ data_file_blocks, params->hash_type, params->hash_name, verify,
+ calculated_digest, digest_size, params->salt, params->salt_size);
+out:
+ if (verify) {
+ if (r)
+ log_err(cd, _("Verification of data area failed."));
+ else {
+ log_dbg(cd, "Verification of data area succeeded.");
+ r = crypt_backend_memeq(root_hash, calculated_digest, digest_size) ? -EFAULT : 0;
+ if (r)
+ log_err(cd, _("Verification of root hash failed."));
+ else
+ log_dbg(cd, "Verification of root hash succeeded.");
+ }
+ } else {
+ if (r == -EIO)
+ log_err(cd, _("Input/output error while creating hash area."));
+ else if (r)
+ log_err(cd, _("Creation of hash area failed."));
+ else {
+ fsync(fileno(hash_file));
+ memcpy(root_hash, calculated_digest, digest_size);
+ }
+ }
+
+ if (data_file)
+ fclose(data_file);
+ if (hash_file)
+ fclose(hash_file);
+ return r;
+}
+
+/* Verify verity device using userspace crypto backend */
+int VERITY_verify(struct crypt_device *cd,
+ struct crypt_params_verity *verity_hdr,
+ const char *root_hash,
+ size_t root_hash_size)
+{
+ return VERITY_create_or_verify_hash(cd, 1, verity_hdr, CONST_CAST(char*)root_hash, root_hash_size);
+}
+
+/* Create verity hash */
+int VERITY_create(struct crypt_device *cd,
+ struct crypt_params_verity *verity_hdr,
+ const char *root_hash,
+ size_t root_hash_size)
+{
+ unsigned pgsize = (unsigned)crypt_getpagesize();
+
+ if (verity_hdr->salt_size > 256)
+ return -EINVAL;
+
+ if (verity_hdr->data_block_size > pgsize)
+ log_err(cd, _("WARNING: Kernel cannot activate device if data "
+ "block size exceeds page size (%u)."), pgsize);
+
+ return VERITY_create_or_verify_hash(cd, 0, verity_hdr, CONST_CAST(char*)root_hash, root_hash_size);
+}
+
+uint64_t VERITY_hash_blocks(struct crypt_device *cd, struct crypt_params_verity *params)
+{
+ uint64_t hash_position = 0;
+ int levels = 0;
+
+ if (hash_levels(params->hash_block_size, crypt_get_volume_key_size(cd),
+ params->data_size, &hash_position, &levels, NULL, NULL))
+ return 0;
+
+ return (uint64_t)hash_position;
+}
diff --git a/lib/volumekey.c b/lib/volumekey.c
new file mode 100644
index 0000000..00791ac
--- /dev/null
+++ b/lib/volumekey.c
@@ -0,0 +1,147 @@
+/*
+ * cryptsetup volume key implementation
+ *
+ * Copyright (C) 2004-2006 Clemens Fruhwirth <clemens@endorphin.org>
+ * Copyright (C) 2010-2023 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <string.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <errno.h>
+
+#include "internal.h"
+
+struct volume_key *crypt_alloc_volume_key(size_t keylength, const char *key)
+{
+ struct volume_key *vk;
+
+ if (keylength > (SIZE_MAX - sizeof(*vk)))
+ return NULL;
+
+ vk = malloc(sizeof(*vk) + keylength);
+ if (!vk)
+ return NULL;
+
+ vk->key_description = NULL;
+ vk->keylength = keylength;
+ vk->id = -1;
+ vk->next = NULL;
+
+ /* keylength 0 is valid => no key */
+ if (vk->keylength) {
+ if (key)
+ memcpy(&vk->key, key, keylength);
+ else
+ crypt_safe_memzero(&vk->key, keylength);
+ }
+
+ return vk;
+}
+
+int crypt_volume_key_set_description(struct volume_key *vk, const char *key_description)
+{
+ if (!vk)
+ return -EINVAL;
+
+ free(CONST_CAST(void*)vk->key_description);
+ vk->key_description = NULL;
+ if (key_description && !(vk->key_description = strdup(key_description)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void crypt_volume_key_set_id(struct volume_key *vk, int id)
+{
+ if (vk && id >= 0)
+ vk->id = id;
+}
+
+int crypt_volume_key_get_id(const struct volume_key *vk)
+{
+ return vk ? vk->id : -1;
+}
+
+struct volume_key *crypt_volume_key_by_id(struct volume_key *vks, int id)
+{
+ struct volume_key *vk = vks;
+
+ if (id < 0)
+ return NULL;
+
+ while (vk && vk->id != id)
+ vk = vk->next;
+
+ return vk;
+}
+
+void crypt_volume_key_add_next(struct volume_key **vks, struct volume_key *vk)
+{
+ struct volume_key *tmp;
+
+ if (!vks)
+ return;
+
+ if (!*vks) {
+ *vks = vk;
+ return;
+ }
+
+ tmp = *vks;
+
+ while (tmp->next)
+ tmp = tmp->next;
+
+ tmp->next = vk;
+}
+
+struct volume_key *crypt_volume_key_next(struct volume_key *vk)
+{
+ return vk ? vk->next : NULL;
+}
+
+void crypt_free_volume_key(struct volume_key *vk)
+{
+ struct volume_key *vk_next;
+
+ while (vk) {
+ crypt_safe_memzero(vk->key, vk->keylength);
+ vk->keylength = 0;
+ free(CONST_CAST(void*)vk->key_description);
+ vk_next = vk->next;
+ free(vk);
+ vk = vk_next;
+ }
+}
+
+struct volume_key *crypt_generate_volume_key(struct crypt_device *cd, size_t keylength)
+{
+ int r;
+ struct volume_key *vk;
+
+ vk = crypt_alloc_volume_key(keylength, NULL);
+ if (!vk)
+ return NULL;
+
+ r = crypt_random_get(cd, vk->key, keylength, CRYPT_RND_KEY);
+ if(r < 0) {
+ crypt_free_volume_key(vk);
+ return NULL;
+ }
+ return vk;
+}