summaryrefslogtreecommitdiffstats
path: root/lib/luks2
diff options
context:
space:
mode:
Diffstat (limited to 'lib/luks2')
-rw-r--r--lib/luks2/luks2.h497
-rw-r--r--lib/luks2/luks2_digest.c455
-rw-r--r--lib/luks2/luks2_digest_pbkdf2.c210
-rw-r--r--lib/luks2/luks2_disk_metadata.c811
-rw-r--r--lib/luks2/luks2_internal.h388
-rw-r--r--lib/luks2/luks2_json_format.c411
-rw-r--r--lib/luks2/luks2_json_metadata.c2874
-rw-r--r--lib/luks2/luks2_keyslot.c977
-rw-r--r--lib/luks2/luks2_keyslot_luks2.c821
-rw-r--r--lib/luks2/luks2_keyslot_reenc.c752
-rw-r--r--lib/luks2/luks2_luks1_convert.c945
-rw-r--r--lib/luks2/luks2_reencrypt.c4375
-rw-r--r--lib/luks2/luks2_reencrypt_digest.c410
-rw-r--r--lib/luks2/luks2_segment.c426
-rw-r--r--lib/luks2/luks2_token.c1043
-rw-r--r--lib/luks2/luks2_token_keyring.c144
16 files changed, 15539 insertions, 0 deletions
diff --git a/lib/luks2/luks2.h b/lib/luks2/luks2.h
new file mode 100644
index 0000000..dfccf02
--- /dev/null
+++ b/lib/luks2/luks2.h
@@ -0,0 +1,497 @@
+/*
+ * LUKS - Linux Unified Key Setup v2
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTSETUP_LUKS2_ONDISK_H
+#define _CRYPTSETUP_LUKS2_ONDISK_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include "libcryptsetup.h"
+
+#define LUKS2_MAGIC_1ST "LUKS\xba\xbe"
+#define LUKS2_MAGIC_2ND "SKUL\xba\xbe"
+#define LUKS2_MAGIC_L 6
+#define LUKS2_UUID_L 40
+#define LUKS2_LABEL_L 48
+#define LUKS2_SALT_L 64
+#define LUKS2_CHECKSUM_ALG_L 32
+#define LUKS2_CHECKSUM_L 64
+
+#define LUKS2_KEYSLOTS_MAX 32
+#define LUKS2_TOKENS_MAX 32
+#define LUKS2_SEGMENT_MAX 32
+
+#define LUKS2_BUILTIN_TOKEN_PREFIX "luks2-"
+#define LUKS2_BUILTIN_TOKEN_PREFIX_LEN 6
+
+#define LUKS2_TOKEN_NAME_MAX 64
+
+#define LUKS2_TOKEN_KEYRING LUKS2_BUILTIN_TOKEN_PREFIX "keyring"
+
+#define LUKS2_DIGEST_MAX 8
+
+#define CRYPT_ANY_SEGMENT -1
+#define CRYPT_DEFAULT_SEGMENT -2
+#define CRYPT_ONE_SEGMENT -3
+
+#define CRYPT_ANY_DIGEST -1
+
+/* 20 MiBs */
+#define LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH 0x1400000
+
+/* 1 GiB */
+#define LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH 0x40000000
+
+/* supported reencryption requirement versions */
+#define LUKS2_REENCRYPT_REQ_VERSION UINT8_C(2)
+#define LUKS2_DECRYPT_DATASHIFT_REQ_VERSION UINT8_C(3)
+
+/* see reencrypt_assembly_verification_data() in luks2_reencrypt_digest.c */
+/* LUKS2_REENCRYPT_MAX_VERSION UINT8_C(207) */
+
+struct device;
+struct luks2_reencrypt;
+struct reenc_protection;
+struct crypt_lock_handle;
+struct crypt_dm_active_device;
+struct luks_phdr; /* LUKS1 for conversion */
+
+/*
+ * LUKS2 header on-disk.
+ *
+ * Binary header is followed by JSON area.
+ * JSON area is followed by keyslot area and data area,
+ * these are described in JSON metadata.
+ *
+ * Note: uuid, csum_alg are intentionally on the same offset as LUKS1
+ * (checksum alg replaces hash in LUKS1)
+ *
+ * String (char) should be zero terminated.
+ * Padding should be wiped.
+ * Checksum is calculated with csum zeroed (+ full JSON area).
+ */
+struct luks2_hdr_disk {
+ char magic[LUKS2_MAGIC_L];
+ uint16_t version; /* Version 2 */
+ uint64_t hdr_size; /* in bytes, including JSON area */
+ uint64_t seqid; /* increased on every update */
+ char label[LUKS2_LABEL_L];
+ char checksum_alg[LUKS2_CHECKSUM_ALG_L];
+ uint8_t salt[LUKS2_SALT_L]; /* unique for every header/offset */
+ char uuid[LUKS2_UUID_L];
+ char subsystem[LUKS2_LABEL_L]; /* owner subsystem label */
+ uint64_t hdr_offset; /* offset from device start in bytes */
+ char _padding[184];
+ uint8_t csum[LUKS2_CHECKSUM_L];
+ char _padding4096[7*512];
+ /* JSON area starts here */
+} __attribute__ ((packed));
+
+/*
+ * LUKS2 header in-memory.
+ */
+struct luks2_hdr {
+ size_t hdr_size;
+ uint64_t seqid;
+ unsigned int version;
+ char label[LUKS2_LABEL_L];
+ char subsystem[LUKS2_LABEL_L];
+ char checksum_alg[LUKS2_CHECKSUM_ALG_L];
+ uint8_t salt1[LUKS2_SALT_L];
+ uint8_t salt2[LUKS2_SALT_L];
+ char uuid[LUKS2_UUID_L];
+ void *jobj;
+ void *jobj_rollback;
+};
+
+struct luks2_keyslot_params {
+ enum { LUKS2_KEYSLOT_AF_LUKS1 = 0 } af_type;
+ enum { LUKS2_KEYSLOT_AREA_RAW = 0 } area_type;
+
+ union {
+ struct {
+ char hash[LUKS2_CHECKSUM_ALG_L]; // or include luks.h
+ unsigned int stripes;
+ } luks1;
+ } af;
+
+ union {
+ struct {
+ char encryption[65]; // or include utils_crypt.h
+ size_t key_size;
+ } raw;
+ } area;
+};
+
+/*
+ * Supportable header sizes (hdr_disk + JSON area)
+ * Also used as offset for the 2nd header.
+ */
+#define LUKS2_HDR_16K_LEN 0x4000
+
+#define LUKS2_HDR_BIN_LEN sizeof(struct luks2_hdr_disk)
+
+//#define LUKS2_DEFAULT_HDR_SIZE 0x400000 /* 4 MiB */
+#define LUKS2_DEFAULT_HDR_SIZE 0x1000000 /* 16 MiB */
+
+#define LUKS2_MAX_KEYSLOTS_SIZE 0x8000000 /* 128 MiB */
+
+#define LUKS2_HDR_OFFSET_MAX 0x400000 /* 4 MiB */
+
+/* Offsets for secondary header (for scan if primary header is corrupted). */
+#define LUKS2_HDR2_OFFSETS { 0x04000, 0x008000, 0x010000, 0x020000, \
+ 0x40000, 0x080000, 0x100000, 0x200000, LUKS2_HDR_OFFSET_MAX }
+
+int LUKS2_hdr_version_unlocked(struct crypt_device *cd,
+ const char *backup_file);
+
+int LUKS2_hdr_read(struct crypt_device *cd, struct luks2_hdr *hdr, int repair);
+int LUKS2_hdr_write(struct crypt_device *cd, struct luks2_hdr *hdr);
+int LUKS2_hdr_write_force(struct crypt_device *cd, struct luks2_hdr *hdr);
+int LUKS2_hdr_rollback(struct crypt_device *cd, struct luks2_hdr *hdr);
+int LUKS2_hdr_dump(struct crypt_device *cd, struct luks2_hdr *hdr);
+int LUKS2_hdr_dump_json(struct crypt_device *cd, struct luks2_hdr *hdr, const char **json);
+
+int LUKS2_hdr_uuid(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const char *uuid);
+
+int LUKS2_hdr_labels(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const char *label,
+ const char *subsystem,
+ int commit);
+
+void LUKS2_hdr_free(struct crypt_device *cd, struct luks2_hdr *hdr);
+
+int LUKS2_hdr_backup(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const char *backup_file);
+int LUKS2_hdr_restore(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const char *backup_file);
+
+uint64_t LUKS2_hdr_and_areas_size(struct luks2_hdr *hdr);
+uint64_t LUKS2_keyslots_size(struct luks2_hdr *hdr);
+uint64_t LUKS2_metadata_size(struct luks2_hdr *hdr);
+
+int LUKS2_keyslot_cipher_incompatible(struct crypt_device *cd, const char *cipher_spec);
+
+/*
+ * Generic LUKS2 keyslot
+ */
+int LUKS2_keyslot_open(struct crypt_device *cd,
+ int keyslot,
+ int segment,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vk);
+
+int LUKS2_keyslot_open_all_segments(struct crypt_device *cd,
+ int keyslot_old,
+ int keyslot_new,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vks);
+
+int LUKS2_keyslot_store(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const char *password,
+ size_t password_len,
+ const struct volume_key *vk,
+ const struct luks2_keyslot_params *params);
+
+int LUKS2_keyslot_wipe(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int wipe_area_only);
+
+crypt_keyslot_priority LUKS2_keyslot_priority_get(struct luks2_hdr *hdr, int keyslot);
+
+int LUKS2_keyslot_priority_set(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ crypt_keyslot_priority priority,
+ int commit);
+
+int LUKS2_keyslot_swap(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int keyslot2);
+
+/*
+ * Generic LUKS2 token
+ */
+int LUKS2_token_json_get(struct luks2_hdr *hdr,
+ int token,
+ const char **json);
+
+int LUKS2_token_assign(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int token,
+ int assign,
+ int commit);
+
+int LUKS2_token_is_assigned(struct luks2_hdr *hdr,
+ int keyslot,
+ int token);
+
+int LUKS2_token_assignment_copy(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot_from,
+ int keyslot_to,
+ int commit);
+
+int LUKS2_token_create(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *json,
+ int commit);
+
+crypt_token_info LUKS2_token_status(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char **type);
+
+int LUKS2_token_open_and_activate(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *name,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ uint32_t flags,
+ void *usrptr);
+
+int LUKS2_token_unlock_key(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ int segment,
+ void *usrptr,
+ struct volume_key **vk);
+
+int LUKS2_token_keyring_get(struct luks2_hdr *hdr,
+ int token,
+ struct crypt_token_params_luks2_keyring *keyring_params);
+
+int LUKS2_token_keyring_json(char *buffer, size_t buffer_size,
+ const struct crypt_token_params_luks2_keyring *keyring_params);
+
+int LUKS2_token_unlock_passphrase(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ void *usrptr,
+ char **passphrase,
+ size_t *passphrase_size);
+
+void crypt_token_unload_external_all(struct crypt_device *cd);
+
+/*
+ * Generic LUKS2 digest
+ */
+int LUKS2_digest_any_matching(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk);
+
+int LUKS2_digest_verify_by_segment(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int segment,
+ const struct volume_key *vk);
+
+int LUKS2_digest_verify(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk,
+ int keyslot);
+
+int LUKS2_digest_assign(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int digest,
+ int assign,
+ int commit);
+
+int LUKS2_digest_segment_assign(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int segment,
+ int digest,
+ int assign,
+ int commit);
+
+int LUKS2_digest_by_keyslot(struct luks2_hdr *hdr, int keyslot);
+
+int LUKS2_digest_by_segment(struct luks2_hdr *hdr, int segment);
+
+int LUKS2_digest_create(struct crypt_device *cd,
+ const char *type,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk);
+
+/*
+ * LUKS2 generic
+ */
+int LUKS2_activate(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vk,
+ uint32_t flags);
+
+int LUKS2_activate_multi(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vks,
+ uint64_t device_size,
+ uint32_t flags);
+
+int LUKS2_deactivate(struct crypt_device *cd,
+ const char *name,
+ struct luks2_hdr *hdr,
+ struct crypt_dm_active_device *dmd,
+ uint32_t flags);
+
+int LUKS2_generate_hdr(
+ struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk,
+ const char *cipherName,
+ const char *cipherMode,
+ const char *integrity,
+ const char *uuid,
+ unsigned int sector_size,
+ uint64_t data_offset,
+ uint64_t align_offset,
+ uint64_t required_alignment,
+ uint64_t metadata_size,
+ uint64_t keyslots_size);
+
+int LUKS2_check_metadata_area_size(uint64_t metadata_size);
+int LUKS2_check_keyslots_area_size(uint64_t keyslots_size);
+
+int LUKS2_wipe_header_areas(struct crypt_device *cd,
+ struct luks2_hdr *hdr, bool detached_header);
+
+uint64_t LUKS2_get_data_offset(struct luks2_hdr *hdr);
+int LUKS2_get_data_size(struct luks2_hdr *hdr, uint64_t *size, bool *dynamic);
+uint32_t LUKS2_get_sector_size(struct luks2_hdr *hdr);
+const char *LUKS2_get_cipher(struct luks2_hdr *hdr, int segment);
+const char *LUKS2_get_integrity(struct luks2_hdr *hdr, int segment);
+int LUKS2_keyslot_params_default(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct luks2_keyslot_params *params);
+int LUKS2_get_volume_key_size(struct luks2_hdr *hdr, int segment);
+int LUKS2_get_keyslot_stored_key_size(struct luks2_hdr *hdr, int keyslot);
+const char *LUKS2_get_keyslot_cipher(struct luks2_hdr *hdr, int keyslot, size_t *key_size);
+int LUKS2_keyslot_find_empty(struct crypt_device *cd, struct luks2_hdr *hdr, size_t keylength);
+int LUKS2_keyslot_active_count(struct luks2_hdr *hdr, int segment);
+crypt_keyslot_info LUKS2_keyslot_info(struct luks2_hdr *hdr, int keyslot);
+int LUKS2_keyslot_area(struct luks2_hdr *hdr,
+ int keyslot,
+ uint64_t *offset,
+ uint64_t *length);
+int LUKS2_keyslot_pbkdf(struct luks2_hdr *hdr, int keyslot, struct crypt_pbkdf_type *pbkdf);
+
+/*
+ * Permanent activation flags stored in header
+ */
+int LUKS2_config_get_flags(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t *flags);
+int LUKS2_config_set_flags(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t flags);
+
+/*
+ * Requirements for device activation or header modification
+ */
+int LUKS2_config_get_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t *reqs);
+int LUKS2_config_set_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t reqs, bool commit);
+int LUKS2_config_set_requirement_version(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t req_id, uint8_t req_version, bool commit);
+
+int LUKS2_config_get_reencrypt_version(struct luks2_hdr *hdr, uint8_t *version);
+
+bool LUKS2_reencrypt_requirement_candidate(struct luks2_hdr *hdr);
+
+int LUKS2_unmet_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t reqs_mask, int quiet);
+
+int LUKS2_key_description_by_segment(struct crypt_device *cd,
+ struct luks2_hdr *hdr, struct volume_key *vk, int segment);
+int LUKS2_volume_key_load_in_keyring_by_keyslot(struct crypt_device *cd,
+ struct luks2_hdr *hdr, struct volume_key *vk, int keyslot);
+int LUKS2_volume_key_load_in_keyring_by_digest(struct crypt_device *cd,
+ struct volume_key *vk, int digest);
+
+int LUKS2_luks1_to_luks2(struct crypt_device *cd,
+ struct luks_phdr *hdr1,
+ struct luks2_hdr *hdr2);
+int LUKS2_luks2_to_luks1(struct crypt_device *cd,
+ struct luks2_hdr *hdr2,
+ struct luks_phdr *hdr1);
+
+/*
+ * LUKS2 reencryption
+ */
+int LUKS2_reencrypt_locked_recovery_by_passphrase(struct crypt_device *cd,
+ int keyslot_old,
+ int keyslot_new,
+ const char *passphrase,
+ size_t passphrase_size,
+ struct volume_key **vks);
+
+void LUKS2_reencrypt_free(struct crypt_device *cd,
+ struct luks2_reencrypt *rh);
+
+crypt_reencrypt_info LUKS2_reencrypt_status(struct luks2_hdr *hdr);
+
+crypt_reencrypt_info LUKS2_reencrypt_get_params(struct luks2_hdr *hdr,
+ struct crypt_params_reencrypt *params);
+
+int LUKS2_reencrypt_lock(struct crypt_device *cd,
+ struct crypt_lock_handle **reencrypt_lock);
+
+int LUKS2_reencrypt_lock_by_dm_uuid(struct crypt_device *cd,
+ const char *dm_uuid,
+ struct crypt_lock_handle **reencrypt_lock);
+
+void LUKS2_reencrypt_unlock(struct crypt_device *cd,
+ struct crypt_lock_handle *reencrypt_lock);
+
+int LUKS2_reencrypt_check_device_size(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint64_t check_size,
+ uint64_t *dev_size,
+ bool activation,
+ bool dynamic);
+
+int LUKS2_reencrypt_digest_verify(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct volume_key *vks);
+
+int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct reenc_protection *rp,
+ int reencrypt_keyslot,
+ uint64_t *r_length);
+
+void LUKS2_reencrypt_protection_erase(struct reenc_protection *rp);
+
+#endif
diff --git a/lib/luks2/luks2_digest.c b/lib/luks2/luks2_digest.c
new file mode 100644
index 0000000..933b059
--- /dev/null
+++ b/lib/luks2/luks2_digest.c
@@ -0,0 +1,455 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, digest handling
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+extern const digest_handler PBKDF2_digest;
+
+static const digest_handler *digest_handlers[LUKS2_DIGEST_MAX] = {
+ &PBKDF2_digest,
+ NULL
+};
+
+static const digest_handler *LUKS2_digest_handler_type(const char *type)
+{
+ int i;
+
+ for (i = 0; i < LUKS2_DIGEST_MAX && digest_handlers[i]; i++) {
+ if (!strcmp(digest_handlers[i]->name, type))
+ return digest_handlers[i];
+ }
+
+ return NULL;
+}
+
+static const digest_handler *LUKS2_digest_handler(struct crypt_device *cd, int digest)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj1, *jobj2;
+
+ if (digest < 0)
+ return NULL;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return NULL;
+
+ if (!(jobj1 = LUKS2_get_digest_jobj(hdr, digest)))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj1, "type", &jobj2))
+ return NULL;
+
+ return LUKS2_digest_handler_type(json_object_get_string(jobj2));
+}
+
+static int LUKS2_digest_find_free(struct luks2_hdr *hdr)
+{
+ int digest = 0;
+
+ while (LUKS2_get_digest_jobj(hdr, digest) && digest < LUKS2_DIGEST_MAX)
+ digest++;
+
+ return digest < LUKS2_DIGEST_MAX ? digest : -1;
+}
+
+int LUKS2_digest_create(struct crypt_device *cd,
+ const char *type,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk)
+{
+ int digest;
+ const digest_handler *dh;
+
+ dh = LUKS2_digest_handler_type(type);
+ if (!dh)
+ return -EINVAL;
+
+ digest = LUKS2_digest_find_free(hdr);
+ if (digest < 0)
+ return -EINVAL;
+
+ log_dbg(cd, "Creating new digest %d (%s).", digest, type);
+
+ return dh->store(cd, digest, vk->key, vk->keylength) ?: digest;
+}
+
+int LUKS2_digest_by_keyslot(struct luks2_hdr *hdr, int keyslot)
+{
+ char keyslot_name[16];
+ json_object *jobj_digests, *jobj_digest_keyslots;
+
+ if (snprintf(keyslot_name, sizeof(keyslot_name), "%u", keyslot) < 1)
+ return -ENOMEM;
+
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ json_object_object_get_ex(val, "keyslots", &jobj_digest_keyslots);
+ if (LUKS2_array_jobj(jobj_digest_keyslots, keyslot_name))
+ return atoi(key);
+ }
+
+ return -ENOENT;
+}
+
+int LUKS2_digest_verify_by_digest(struct crypt_device *cd,
+ int digest,
+ const struct volume_key *vk)
+{
+ const digest_handler *h;
+ int r;
+
+ h = LUKS2_digest_handler(cd, digest);
+ if (!h)
+ return -EINVAL;
+
+ r = h->verify(cd, digest, vk->key, vk->keylength);
+ if (r < 0) {
+ log_dbg(cd, "Digest %d (%s) verify failed with %d.", digest, h->name, r);
+ return r;
+ }
+
+ return digest;
+}
+
+int LUKS2_digest_verify(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk,
+ int keyslot)
+{
+ int digest;
+
+ digest = LUKS2_digest_by_keyslot(hdr, keyslot);
+ if (digest < 0)
+ return digest;
+
+ log_dbg(cd, "Verifying key from keyslot %d, digest %d.", keyslot, digest);
+
+ return LUKS2_digest_verify_by_digest(cd, digest, vk);
+}
+
+int LUKS2_digest_dump(struct crypt_device *cd, int digest)
+{
+ const digest_handler *h;
+
+ if (!(h = LUKS2_digest_handler(cd, digest)))
+ return -EINVAL;
+
+ return h->dump(cd, digest);
+}
+
+int LUKS2_digest_any_matching(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk)
+{
+ int digest;
+
+ for (digest = 0; digest < LUKS2_DIGEST_MAX; digest++)
+ if (LUKS2_digest_verify_by_digest(cd, digest, vk) == digest)
+ return digest;
+
+ return -ENOENT;
+}
+
+int LUKS2_digest_verify_by_segment(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int segment,
+ const struct volume_key *vk)
+{
+ return LUKS2_digest_verify_by_digest(cd, LUKS2_digest_by_segment(hdr, segment), vk);
+}
+
+/* FIXME: segment can have more digests */
+int LUKS2_digest_by_segment(struct luks2_hdr *hdr, int segment)
+{
+ char segment_name[16];
+ json_object *jobj_digests, *jobj_digest_segments;
+
+ if (segment == CRYPT_DEFAULT_SEGMENT)
+ segment = LUKS2_get_default_segment(hdr);
+
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+
+ if (snprintf(segment_name, sizeof(segment_name), "%u", segment) < 1)
+ return -EINVAL;
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ json_object_object_get_ex(val, "segments", &jobj_digest_segments);
+ if (!LUKS2_array_jobj(jobj_digest_segments, segment_name))
+ continue;
+
+ return atoi(key);
+ }
+
+ return -ENOENT;
+}
+
+static int assign_one_digest(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, int digest, int assign)
+{
+ json_object *jobj1, *jobj_digest, *jobj_digest_keyslots;
+ char num[16];
+
+ log_dbg(cd, "Keyslot %i %s digest %i.", keyslot, assign ? "assigned to" : "unassigned from", digest);
+
+ jobj_digest = LUKS2_get_digest_jobj(hdr, digest);
+ if (!jobj_digest)
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_digest, "keyslots", &jobj_digest_keyslots);
+ if (!jobj_digest_keyslots)
+ return -EINVAL;
+
+ if (snprintf(num, sizeof(num), "%d", keyslot) < 0)
+ return -EINVAL;
+
+ if (assign) {
+ jobj1 = LUKS2_array_jobj(jobj_digest_keyslots, num);
+ if (!jobj1)
+ json_object_array_add(jobj_digest_keyslots, json_object_new_string(num));
+ } else {
+ jobj1 = LUKS2_array_remove(jobj_digest_keyslots, num);
+ if (jobj1)
+ json_object_object_add(jobj_digest, "keyslots", jobj1);
+ }
+
+ return 0;
+}
+
+int LUKS2_digest_assign(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, int digest, int assign, int commit)
+{
+ json_object *jobj_digests;
+ int r = 0;
+
+ if (digest == CRYPT_ANY_DIGEST) {
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ UNUSED(val);
+ r = assign_one_digest(cd, hdr, keyslot, atoi(key), assign);
+ if (r < 0)
+ break;
+ }
+ } else
+ r = assign_one_digest(cd, hdr, keyslot, digest, assign);
+
+ if (r < 0)
+ return r;
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+static int assign_all_segments(struct luks2_hdr *hdr, int digest, int assign)
+{
+ json_object *jobj1, *jobj_digest, *jobj_digest_segments;
+
+ jobj_digest = LUKS2_get_digest_jobj(hdr, digest);
+ if (!jobj_digest)
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_digest, "segments", &jobj_digest_segments);
+ if (!jobj_digest_segments)
+ return -EINVAL;
+
+ if (assign) {
+ json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, value) {
+ UNUSED(value);
+ jobj1 = LUKS2_array_jobj(jobj_digest_segments, key);
+ if (!jobj1)
+ json_object_array_add(jobj_digest_segments, json_object_new_string(key));
+ }
+ } else {
+ jobj1 = json_object_new_array();
+ if (!jobj1)
+ return -ENOMEM;
+ json_object_object_add(jobj_digest, "segments", jobj1);
+ }
+
+ return 0;
+}
+
+static int assign_one_segment(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int segment, int digest, int assign)
+{
+ json_object *jobj1, *jobj_digest, *jobj_digest_segments;
+ char num[16];
+
+ log_dbg(cd, "Segment %i %s digest %i.", segment, assign ? "assigned to" : "unassigned from", digest);
+
+ jobj_digest = LUKS2_get_digest_jobj(hdr, digest);
+ if (!jobj_digest)
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_digest, "segments", &jobj_digest_segments);
+ if (!jobj_digest_segments)
+ return -EINVAL;
+
+ if (snprintf(num, sizeof(num), "%d", segment) < 0)
+ return -EINVAL;
+
+ if (assign) {
+ jobj1 = LUKS2_array_jobj(jobj_digest_segments, num);
+ if (!jobj1)
+ json_object_array_add(jobj_digest_segments, json_object_new_string(num));
+ } else {
+ jobj1 = LUKS2_array_remove(jobj_digest_segments, num);
+ if (jobj1)
+ json_object_object_add(jobj_digest, "segments", jobj1);
+ }
+
+ return 0;
+}
+
+int LUKS2_digest_segment_assign(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int segment, int digest, int assign, int commit)
+{
+ json_object *jobj_digests;
+ int r = 0;
+
+ if (segment == CRYPT_DEFAULT_SEGMENT)
+ segment = LUKS2_get_default_segment(hdr);
+
+ if (digest == CRYPT_ANY_DIGEST) {
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ UNUSED(val);
+ if (segment == CRYPT_ANY_SEGMENT)
+ r = assign_all_segments(hdr, atoi(key), assign);
+ else
+ r = assign_one_segment(cd, hdr, segment, atoi(key), assign);
+ if (r < 0)
+ break;
+ }
+ } else {
+ if (segment == CRYPT_ANY_SEGMENT)
+ r = assign_all_segments(hdr, digest, assign);
+ else
+ r = assign_one_segment(cd, hdr, segment, digest, assign);
+ }
+
+ if (r < 0)
+ return r;
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+static int digest_unused(json_object *jobj_digest)
+{
+ json_object *jobj;
+
+ json_object_object_get_ex(jobj_digest, "segments", &jobj);
+ if (!jobj || !json_object_is_type(jobj, json_type_array) || json_object_array_length(jobj) > 0)
+ return 0;
+
+ json_object_object_get_ex(jobj_digest, "keyslots", &jobj);
+ if (!jobj || !json_object_is_type(jobj, json_type_array))
+ return 0;
+
+ return json_object_array_length(jobj) > 0 ? 0 : 1;
+}
+
+void LUKS2_digests_erase_unused(struct crypt_device *cd,
+ struct luks2_hdr *hdr)
+{
+ json_object *jobj_digests;
+
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+ if (!jobj_digests || !json_object_is_type(jobj_digests, json_type_object))
+ return;
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ if (digest_unused(val)) {
+ log_dbg(cd, "Erasing unused digest %d.", atoi(key));
+ json_object_object_del(jobj_digests, key);
+ }
+ }
+}
+
+/* Key description helpers */
+static char *get_key_description_by_digest(struct crypt_device *cd, int digest)
+{
+ char *desc, digest_str[3];
+ int r;
+ size_t len;
+
+ if (!crypt_get_uuid(cd))
+ return NULL;
+
+ r = snprintf(digest_str, sizeof(digest_str), "d%u", digest);
+ if (r < 0 || (size_t)r >= sizeof(digest_str))
+ return NULL;
+
+ /* "cryptsetup:<uuid>-<digest_str>" + \0 */
+ len = strlen(crypt_get_uuid(cd)) + strlen(digest_str) + 13;
+
+ desc = malloc(len);
+ if (!desc)
+ return NULL;
+
+ r = snprintf(desc, len, "%s:%s-%s", "cryptsetup", crypt_get_uuid(cd), digest_str);
+ if (r < 0 || (size_t)r >= len) {
+ free(desc);
+ return NULL;
+ }
+
+ return desc;
+}
+
+int LUKS2_key_description_by_segment(struct crypt_device *cd,
+ struct luks2_hdr *hdr, struct volume_key *vk, int segment)
+{
+ char *desc = get_key_description_by_digest(cd, LUKS2_digest_by_segment(hdr, segment));
+ int r;
+
+ r = crypt_volume_key_set_description(vk, desc);
+ free(desc);
+ return r;
+}
+
+int LUKS2_volume_key_load_in_keyring_by_keyslot(struct crypt_device *cd,
+ struct luks2_hdr *hdr, struct volume_key *vk, int keyslot)
+{
+ char *desc = get_key_description_by_digest(cd, LUKS2_digest_by_keyslot(hdr, keyslot));
+ int r;
+
+ r = crypt_volume_key_set_description(vk, desc);
+ if (!r)
+ r = crypt_volume_key_load_in_keyring(cd, vk);
+
+ free(desc);
+ return r;
+}
+
+int LUKS2_volume_key_load_in_keyring_by_digest(struct crypt_device *cd,
+ struct volume_key *vk, int digest)
+{
+ char *desc = get_key_description_by_digest(cd, digest);
+ int r;
+
+ r = crypt_volume_key_set_description(vk, desc);
+ if (!r)
+ r = crypt_volume_key_load_in_keyring(cd, vk);
+
+ free(desc);
+ return r;
+}
diff --git a/lib/luks2/luks2_digest_pbkdf2.c b/lib/luks2/luks2_digest_pbkdf2.c
new file mode 100644
index 0000000..1009cfb
--- /dev/null
+++ b/lib/luks2/luks2_digest_pbkdf2.c
@@ -0,0 +1,210 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, PBKDF2 digest handler (LUKS1 compatible)
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+#define LUKS_DIGESTSIZE 20 // since SHA1
+#define LUKS_SALTSIZE 32
+#define LUKS_MKD_ITERATIONS_MS 125
+
+static int PBKDF2_digest_verify(struct crypt_device *cd,
+ int digest,
+ const char *volume_key,
+ size_t volume_key_len)
+{
+ char checkHashBuf[64];
+ json_object *jobj_digest, *jobj1;
+ const char *hashSpec;
+ char *mkDigest = NULL, *mkDigestSalt = NULL;
+ unsigned int mkDigestIterations;
+ size_t len;
+ int r = -EINVAL;
+
+ /* This can be done only for internally linked digests */
+ jobj_digest = LUKS2_get_digest_jobj(crypt_get_hdr(cd, CRYPT_LUKS2), digest);
+ if (!jobj_digest)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_digest, "hash", &jobj1))
+ return -EINVAL;
+ hashSpec = json_object_get_string(jobj1);
+
+ if (!json_object_object_get_ex(jobj_digest, "iterations", &jobj1))
+ return -EINVAL;
+ mkDigestIterations = json_object_get_int64(jobj1);
+
+ if (!json_object_object_get_ex(jobj_digest, "salt", &jobj1))
+ return -EINVAL;
+ r = crypt_base64_decode(&mkDigestSalt, &len, json_object_get_string(jobj1),
+ json_object_get_string_len(jobj1));
+ if (r < 0)
+ goto out;
+ if (len != LUKS_SALTSIZE)
+ goto out;
+
+ if (!json_object_object_get_ex(jobj_digest, "digest", &jobj1))
+ goto out;
+ r = crypt_base64_decode(&mkDigest, &len, json_object_get_string(jobj1),
+ json_object_get_string_len(jobj1));
+ if (r < 0)
+ goto out;
+ if (len < LUKS_DIGESTSIZE ||
+ len > sizeof(checkHashBuf) ||
+ (len != LUKS_DIGESTSIZE && len != (size_t)crypt_hash_size(hashSpec)))
+ goto out;
+
+ r = -EPERM;
+ if (crypt_pbkdf(CRYPT_KDF_PBKDF2, hashSpec, volume_key, volume_key_len,
+ mkDigestSalt, LUKS_SALTSIZE,
+ checkHashBuf, len,
+ mkDigestIterations, 0, 0) < 0) {
+ r = -EINVAL;
+ } else {
+ if (crypt_backend_memeq(checkHashBuf, mkDigest, len) == 0)
+ r = 0;
+ }
+out:
+ free(mkDigest);
+ free(mkDigestSalt);
+ return r;
+}
+
+static int PBKDF2_digest_store(struct crypt_device *cd,
+ int digest,
+ const char *volume_key,
+ size_t volume_key_len)
+{
+ json_object *jobj_digest, *jobj_digests;
+ char salt[LUKS_SALTSIZE], digest_raw[128];
+ int hmac_size, r;
+ char *base64_str;
+ struct luks2_hdr *hdr;
+ struct crypt_pbkdf_limits pbkdf_limits;
+ const struct crypt_pbkdf_type *pbkdf_cd;
+ struct crypt_pbkdf_type pbkdf = {
+ .type = CRYPT_KDF_PBKDF2,
+ .time_ms = LUKS_MKD_ITERATIONS_MS,
+ };
+
+ /* Inherit hash from PBKDF setting */
+ pbkdf_cd = crypt_get_pbkdf_type(cd);
+ if (pbkdf_cd)
+ pbkdf.hash = pbkdf_cd->hash;
+ if (!pbkdf.hash)
+ pbkdf.hash = DEFAULT_LUKS1_HASH;
+
+ log_dbg(cd, "Setting PBKDF2 type key digest %d.", digest);
+
+ r = crypt_random_get(cd, salt, LUKS_SALTSIZE, CRYPT_RND_SALT);
+ if (r < 0)
+ return r;
+
+ r = crypt_pbkdf_get_limits(CRYPT_KDF_PBKDF2, &pbkdf_limits);
+ if (r < 0)
+ return r;
+
+ if (crypt_get_pbkdf(cd)->flags & CRYPT_PBKDF_NO_BENCHMARK)
+ pbkdf.iterations = pbkdf_limits.min_iterations;
+ else {
+ r = crypt_benchmark_pbkdf_internal(cd, &pbkdf, volume_key_len);
+ if (r < 0)
+ return r;
+ }
+
+ hmac_size = crypt_hmac_size(pbkdf.hash);
+ if (hmac_size < 0 || hmac_size > (int)sizeof(digest_raw))
+ return -EINVAL;
+
+ r = crypt_pbkdf(CRYPT_KDF_PBKDF2, pbkdf.hash, volume_key, volume_key_len,
+ salt, LUKS_SALTSIZE, digest_raw, hmac_size,
+ pbkdf.iterations, 0, 0);
+ if (r < 0)
+ return r;
+
+ jobj_digest = LUKS2_get_digest_jobj(crypt_get_hdr(cd, CRYPT_LUKS2), digest);
+ jobj_digests = NULL;
+ if (!jobj_digest) {
+ hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ jobj_digest = json_object_new_object();
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+ }
+
+ json_object_object_add(jobj_digest, "type", json_object_new_string("pbkdf2"));
+ json_object_object_add(jobj_digest, "keyslots", json_object_new_array());
+ json_object_object_add(jobj_digest, "segments", json_object_new_array());
+ json_object_object_add(jobj_digest, "hash", json_object_new_string(pbkdf.hash));
+ json_object_object_add(jobj_digest, "iterations", json_object_new_int(pbkdf.iterations));
+
+ r = crypt_base64_encode(&base64_str, NULL, salt, LUKS_SALTSIZE);
+ if (r < 0) {
+ json_object_put(jobj_digest);
+ return r;
+ }
+ json_object_object_add(jobj_digest, "salt", json_object_new_string(base64_str));
+ free(base64_str);
+
+ r = crypt_base64_encode(&base64_str, NULL, digest_raw, hmac_size);
+ if (r < 0) {
+ json_object_put(jobj_digest);
+ return r;
+ }
+ json_object_object_add(jobj_digest, "digest", json_object_new_string(base64_str));
+ free(base64_str);
+
+ if (jobj_digests)
+ json_object_object_add_by_uint(jobj_digests, digest, jobj_digest);
+
+ JSON_DBG(cd, jobj_digest, "Digest JSON:");
+ return 0;
+}
+
+static int PBKDF2_digest_dump(struct crypt_device *cd, int digest)
+{
+ json_object *jobj_digest, *jobj1;
+
+ /* This can be done only for internally linked digests */
+ jobj_digest = LUKS2_get_digest_jobj(crypt_get_hdr(cd, CRYPT_LUKS2), digest);
+ if (!jobj_digest)
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_digest, "hash", &jobj1);
+ log_std(cd, "\tHash: %s\n", json_object_get_string(jobj1));
+
+ json_object_object_get_ex(jobj_digest, "iterations", &jobj1);
+ log_std(cd, "\tIterations: %" PRIu64 "\n", json_object_get_int64(jobj1));
+
+ json_object_object_get_ex(jobj_digest, "salt", &jobj1);
+ log_std(cd, "\tSalt: ");
+ hexprint_base64(cd, jobj1, " ", " ");
+
+ json_object_object_get_ex(jobj_digest, "digest", &jobj1);
+ log_std(cd, "\tDigest: ");
+ hexprint_base64(cd, jobj1, " ", " ");
+
+ return 0;
+}
+
+const digest_handler PBKDF2_digest = {
+ .name = "pbkdf2",
+ .verify = PBKDF2_digest_verify,
+ .store = PBKDF2_digest_store,
+ .dump = PBKDF2_digest_dump,
+};
diff --git a/lib/luks2/luks2_disk_metadata.c b/lib/luks2/luks2_disk_metadata.c
new file mode 100644
index 0000000..e995959
--- /dev/null
+++ b/lib/luks2/luks2_disk_metadata.c
@@ -0,0 +1,811 @@
+/*
+ * LUKS - Linux Unified Key Setup v2
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+/*
+ * Helper functions
+ */
+static json_object *parse_json_len(struct crypt_device *cd, const char *json_area,
+ uint64_t max_length, int *json_len)
+{
+ json_object *jobj;
+ struct json_tokener *jtok;
+
+ /* INT32_MAX is internal (json-c) json_tokener_parse_ex() limit */
+ if (!json_area || max_length > INT32_MAX)
+ return NULL;
+
+ jtok = json_tokener_new();
+ if (!jtok) {
+ log_dbg(cd, "ERROR: Failed to init json tokener");
+ return NULL;
+ }
+
+ jobj = json_tokener_parse_ex(jtok, json_area, max_length);
+ if (!jobj)
+ log_dbg(cd, "ERROR: Failed to parse json data (%d): %s",
+ json_tokener_get_error(jtok),
+ json_tokener_error_desc(json_tokener_get_error(jtok)));
+ else
+ *json_len = jtok->char_offset;
+
+ json_tokener_free(jtok);
+
+ return jobj;
+}
+
+static void log_dbg_checksum(struct crypt_device *cd,
+ const uint8_t *csum, const char *csum_alg, const char *info)
+{
+ char csum_txt[2*LUKS2_CHECKSUM_L+1];
+ int i;
+
+ for (i = 0; i < crypt_hash_size(csum_alg); i++)
+ if (snprintf(&csum_txt[i*2], 3, "%02hhx", (const char)csum[i]) != 2)
+ return;
+
+ log_dbg(cd, "Checksum:%s (%s)", &csum_txt[0], info);
+}
+
+/*
+ * Calculate hash (checksum) of |LUKS2_bin|LUKS2_JSON_area| from in-memory structs.
+ * LUKS2 on-disk header contains uniques salt both for primary and secondary header.
+ * Checksum is always calculated with zeroed checksum field in binary header.
+ */
+static int hdr_checksum_calculate(const char *alg, struct luks2_hdr_disk *hdr_disk,
+ const char *json_area, size_t json_len)
+{
+ struct crypt_hash *hd = NULL;
+ int hash_size, r;
+
+ hash_size = crypt_hash_size(alg);
+ if (hash_size <= 0 || crypt_hash_init(&hd, alg))
+ return -EINVAL;
+
+ /* Binary header, csum zeroed. */
+ r = crypt_hash_write(hd, (char*)hdr_disk, LUKS2_HDR_BIN_LEN);
+
+ /* JSON area (including unused space) */
+ if (!r)
+ r = crypt_hash_write(hd, json_area, json_len);
+
+ if (!r)
+ r = crypt_hash_final(hd, (char*)hdr_disk->csum, (size_t)hash_size);
+
+ crypt_hash_destroy(hd);
+ return r;
+}
+
+/*
+ * Compare hash (checksum) of on-disk and in-memory header.
+ */
+static int hdr_checksum_check(struct crypt_device *cd,
+ const char *alg, struct luks2_hdr_disk *hdr_disk,
+ const char *json_area, size_t json_len)
+{
+ struct luks2_hdr_disk hdr_tmp;
+ int hash_size, r;
+
+ hash_size = crypt_hash_size(alg);
+ if (hash_size <= 0)
+ return -EINVAL;
+
+ /* Copy header and zero checksum. */
+ memcpy(&hdr_tmp, hdr_disk, LUKS2_HDR_BIN_LEN);
+ memset(&hdr_tmp.csum, 0, sizeof(hdr_tmp.csum));
+
+ r = hdr_checksum_calculate(alg, &hdr_tmp, json_area, json_len);
+ if (r < 0)
+ return r;
+
+ log_dbg_checksum(cd, hdr_disk->csum, alg, "on-disk");
+ log_dbg_checksum(cd, hdr_tmp.csum, alg, "in-memory");
+
+ if (memcmp(hdr_tmp.csum, hdr_disk->csum, (size_t)hash_size))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Convert header from on-disk format to in-memory struct
+ */
+static void hdr_from_disk(struct luks2_hdr_disk *hdr_disk1,
+ struct luks2_hdr_disk *hdr_disk2,
+ struct luks2_hdr *hdr,
+ int secondary)
+{
+ hdr->version = be16_to_cpu(hdr_disk1->version);
+ hdr->hdr_size = be64_to_cpu(hdr_disk1->hdr_size);
+ hdr->seqid = be64_to_cpu(hdr_disk1->seqid);
+
+ memcpy(hdr->label, hdr_disk1->label, LUKS2_LABEL_L);
+ hdr->label[LUKS2_LABEL_L - 1] = '\0';
+ memcpy(hdr->subsystem, hdr_disk1->subsystem, LUKS2_LABEL_L);
+ hdr->subsystem[LUKS2_LABEL_L - 1] = '\0';
+ memcpy(hdr->checksum_alg, hdr_disk1->checksum_alg, LUKS2_CHECKSUM_ALG_L);
+ hdr->checksum_alg[LUKS2_CHECKSUM_ALG_L - 1] = '\0';
+ memcpy(hdr->uuid, hdr_disk1->uuid, LUKS2_UUID_L);
+ hdr->uuid[LUKS2_UUID_L - 1] = '\0';
+
+ if (secondary) {
+ memcpy(hdr->salt1, hdr_disk2->salt, LUKS2_SALT_L);
+ memcpy(hdr->salt2, hdr_disk1->salt, LUKS2_SALT_L);
+ } else {
+ memcpy(hdr->salt1, hdr_disk1->salt, LUKS2_SALT_L);
+ memcpy(hdr->salt2, hdr_disk2->salt, LUKS2_SALT_L);
+ }
+}
+
+/*
+ * Convert header from in-memory struct to on-disk format
+ */
+static void hdr_to_disk(struct luks2_hdr *hdr,
+ struct luks2_hdr_disk *hdr_disk,
+ int secondary, uint64_t offset)
+{
+ assert(((char*)&(hdr_disk->_padding4096) - (char*)&(hdr_disk->magic)) == 512);
+
+ memset(hdr_disk, 0, LUKS2_HDR_BIN_LEN);
+
+ memcpy(&hdr_disk->magic, secondary ? LUKS2_MAGIC_2ND : LUKS2_MAGIC_1ST, LUKS2_MAGIC_L);
+ hdr_disk->version = cpu_to_be16(hdr->version);
+ hdr_disk->hdr_size = cpu_to_be64(hdr->hdr_size);
+ hdr_disk->hdr_offset = cpu_to_be64(offset);
+ hdr_disk->seqid = cpu_to_be64(hdr->seqid);
+
+ memcpy(hdr_disk->label, hdr->label, MIN(strlen(hdr->label), LUKS2_LABEL_L));
+ hdr_disk->label[LUKS2_LABEL_L - 1] = '\0';
+ memcpy(hdr_disk->subsystem, hdr->subsystem, MIN(strlen(hdr->subsystem), LUKS2_LABEL_L));
+ hdr_disk->subsystem[LUKS2_LABEL_L - 1] = '\0';
+ memcpy(hdr_disk->checksum_alg, hdr->checksum_alg, MIN(strlen(hdr->checksum_alg), LUKS2_CHECKSUM_ALG_L));
+ hdr_disk->checksum_alg[LUKS2_CHECKSUM_ALG_L - 1] = '\0';
+ memcpy(hdr_disk->uuid, hdr->uuid, MIN(strlen(hdr->uuid), LUKS2_UUID_L));
+ hdr_disk->uuid[LUKS2_UUID_L - 1] = '\0';
+
+ memcpy(hdr_disk->salt, secondary ? hdr->salt2 : hdr->salt1, LUKS2_SALT_L);
+}
+
+/*
+ * Sanity checks before checksum is validated
+ */
+static int hdr_disk_sanity_check_pre(struct crypt_device *cd,
+ struct luks2_hdr_disk *hdr,
+ size_t *hdr_json_size, int secondary,
+ uint64_t offset)
+{
+ uint64_t hdr_size;
+
+ if (memcmp(hdr->magic, secondary ? LUKS2_MAGIC_2ND : LUKS2_MAGIC_1ST, LUKS2_MAGIC_L))
+ return -EINVAL;
+
+ if (be16_to_cpu(hdr->version) != 2) {
+ log_dbg(cd, "Unsupported LUKS2 header version %u.", be16_to_cpu(hdr->version));
+ return -EINVAL;
+ }
+
+ if (offset != be64_to_cpu(hdr->hdr_offset)) {
+ log_dbg(cd, "LUKS2 offset 0x%04" PRIx64 " on device differs to expected offset 0x%04" PRIx64 ".",
+ be64_to_cpu(hdr->hdr_offset), offset);
+ return -EINVAL;
+ }
+
+ hdr_size = be64_to_cpu(hdr->hdr_size);
+
+ if (hdr_size < LUKS2_HDR_16K_LEN || hdr_size > LUKS2_HDR_OFFSET_MAX) {
+ log_dbg(cd, "LUKS2 header has bogus size 0x%04" PRIx64 ".", hdr_size);
+ return -EINVAL;
+ }
+
+ if (secondary && (offset != hdr_size)) {
+ log_dbg(cd, "LUKS2 offset 0x%04" PRIx64 " in secondary header does not match size 0x%04" PRIx64 ".",
+ offset, hdr_size);
+ return -EINVAL;
+ }
+
+ /* FIXME: sanity check checksum alg. */
+
+ log_dbg(cd, "LUKS2 header version %u of size %" PRIu64 " bytes, checksum %s.",
+ be16_to_cpu(hdr->version), hdr_size,
+ hdr->checksum_alg);
+
+ *hdr_json_size = hdr_size - LUKS2_HDR_BIN_LEN;
+ return 0;
+}
+
+/*
+ * Read LUKS2 header from disk at specific offset.
+ */
+static int hdr_read_disk(struct crypt_device *cd,
+ struct device *device, struct luks2_hdr_disk *hdr_disk,
+ char **json_area, uint64_t offset, int secondary)
+{
+ size_t hdr_json_size = 0;
+ int devfd, r;
+
+ log_dbg(cd, "Trying to read %s LUKS2 header at offset 0x%" PRIx64 ".",
+ secondary ? "secondary" : "primary", offset);
+
+ devfd = device_open_locked(cd, device, O_RDONLY);
+ if (devfd < 0)
+ return devfd == -1 ? -EIO : devfd;
+
+ /*
+ * Read binary header and run sanity check before reading
+ * JSON area and validating checksum.
+ */
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), hdr_disk,
+ LUKS2_HDR_BIN_LEN, offset) != LUKS2_HDR_BIN_LEN) {
+ return -EIO;
+ }
+
+ /*
+ * hdr_json_size is validated if this call succeeds
+ */
+ r = hdr_disk_sanity_check_pre(cd, hdr_disk, &hdr_json_size, secondary, offset);
+ if (r < 0)
+ return r;
+
+ /*
+ * Allocate and read JSON area. Always the whole area must be read.
+ */
+ *json_area = malloc(hdr_json_size);
+ if (!*json_area)
+ return -ENOMEM;
+
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), *json_area, hdr_json_size,
+ offset + LUKS2_HDR_BIN_LEN) != (ssize_t)hdr_json_size) {
+ free(*json_area);
+ *json_area = NULL;
+ return -EIO;
+ }
+
+ /*
+ * Calculate and validate checksum and zero it afterwards.
+ */
+ if (hdr_checksum_check(cd, hdr_disk->checksum_alg, hdr_disk,
+ *json_area, hdr_json_size)) {
+ log_dbg(cd, "LUKS2 header checksum error (offset %" PRIu64 ").", offset);
+ free(*json_area);
+ *json_area = NULL;
+ r = -EINVAL;
+ }
+ memset(hdr_disk->csum, 0, LUKS2_CHECKSUM_L);
+
+ return r;
+}
+
+/*
+ * Write LUKS2 header to disk at specific offset.
+ */
+static int hdr_write_disk(struct crypt_device *cd,
+ struct device *device, struct luks2_hdr *hdr,
+ const char *json_area, int secondary)
+{
+ struct luks2_hdr_disk hdr_disk;
+ uint64_t offset = secondary ? hdr->hdr_size : 0;
+ size_t hdr_json_len;
+ int devfd, r;
+
+ log_dbg(cd, "Trying to write LUKS2 header (%zu bytes) at offset %" PRIu64 ".",
+ hdr->hdr_size, offset);
+
+ devfd = device_open_locked(cd, device, O_RDWR);
+ if (devfd < 0)
+ return devfd == -1 ? -EINVAL : devfd;
+
+ hdr_json_len = hdr->hdr_size - LUKS2_HDR_BIN_LEN;
+
+ hdr_to_disk(hdr, &hdr_disk, secondary, offset);
+
+ /*
+ * Write header without checksum but with proper seqid.
+ */
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), (char *)&hdr_disk,
+ LUKS2_HDR_BIN_LEN, offset) < (ssize_t)LUKS2_HDR_BIN_LEN) {
+ return -EIO;
+ }
+
+ /*
+ * Write json area.
+ */
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device),
+ CONST_CAST(char*)json_area, hdr_json_len,
+ LUKS2_HDR_BIN_LEN + offset) < (ssize_t)hdr_json_len) {
+ return -EIO;
+ }
+
+ /*
+ * Calculate checksum and write header with checksum.
+ */
+ r = hdr_checksum_calculate(hdr_disk.checksum_alg, &hdr_disk,
+ json_area, hdr_json_len);
+ if (r < 0) {
+ return r;
+ }
+ log_dbg_checksum(cd, hdr_disk.csum, hdr_disk.checksum_alg, "in-memory");
+
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), (char *)&hdr_disk,
+ LUKS2_HDR_BIN_LEN, offset) < (ssize_t)LUKS2_HDR_BIN_LEN)
+ r = -EIO;
+
+ device_sync(cd, device);
+ return r;
+}
+
+static int LUKS2_check_sequence_id(struct crypt_device *cd, struct luks2_hdr *hdr, struct device *device)
+{
+ int devfd;
+ struct luks2_hdr_disk dhdr;
+
+ if (!hdr)
+ return -EINVAL;
+
+ devfd = device_open_locked(cd, device, O_RDONLY);
+ if (devfd < 0)
+ return devfd == -1 ? -EINVAL : devfd;
+
+ /* we need only first 512 bytes, see luks2_hdr_disk structure */
+ if ((read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), &dhdr, 512, 0) != 512))
+ return -EIO;
+
+ /* there's nothing to check if there's no LUKS2 header */
+ if ((be16_to_cpu(dhdr.version) != 2) ||
+ memcmp(dhdr.magic, LUKS2_MAGIC_1ST, LUKS2_MAGIC_L) ||
+ strcmp(dhdr.uuid, hdr->uuid))
+ return 0;
+
+ return hdr->seqid != be64_to_cpu(dhdr.seqid);
+}
+
+int LUKS2_device_write_lock(struct crypt_device *cd, struct luks2_hdr *hdr, struct device *device)
+{
+ int r = device_write_lock(cd, device);
+
+ if (r < 0) {
+ log_err(cd, _("Failed to acquire write lock on device %s."), device_path(device));
+ return r;
+ }
+
+ /* run sequence id check only on first write lock (r == 1) and w/o LUKS2 reencryption in-progress */
+ if (r == 1 && !crypt_get_luks2_reencrypt(cd)) {
+ log_dbg(cd, "Checking context sequence id matches value stored on disk.");
+ if (LUKS2_check_sequence_id(cd, hdr, device)) {
+ device_write_unlock(cd, device);
+ log_err(cd, _("Detected attempt for concurrent LUKS2 metadata update. Aborting operation."));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Convert in-memory LUKS2 header and write it to disk.
+ * This will increase sequence id, write both header copies and calculate checksum.
+ */
+int LUKS2_disk_hdr_write(struct crypt_device *cd, struct luks2_hdr *hdr, struct device *device, bool seqid_check)
+{
+ char *json_area;
+ const char *json_text;
+ size_t json_area_len;
+ int r;
+
+ if (hdr->version != 2) {
+ log_dbg(cd, "Unsupported LUKS2 header version (%u).", hdr->version);
+ return -EINVAL;
+ }
+
+ r = device_check_size(cd, crypt_metadata_device(cd), LUKS2_hdr_and_areas_size(hdr), 1);
+ if (r)
+ return r;
+
+ /*
+ * Allocate and zero JSON area (of proper header size).
+ */
+ json_area_len = hdr->hdr_size - LUKS2_HDR_BIN_LEN;
+ json_area = crypt_zalloc(json_area_len);
+ if (!json_area)
+ return -ENOMEM;
+
+ /*
+ * Generate text space-efficient JSON representation to json area.
+ */
+ json_text = json_object_to_json_string_ext(hdr->jobj,
+ JSON_C_TO_STRING_PLAIN | JSON_C_TO_STRING_NOSLASHESCAPE);
+ if (!json_text || !*json_text) {
+ log_dbg(cd, "Cannot parse JSON object to text representation.");
+ free(json_area);
+ return -ENOMEM;
+ }
+ if (strlen(json_text) > (json_area_len - 1)) {
+ log_dbg(cd, "JSON is too large (%zu > %zu).", strlen(json_text), json_area_len);
+ free(json_area);
+ return -EINVAL;
+ }
+ strncpy(json_area, json_text, json_area_len);
+
+ if (seqid_check)
+ r = LUKS2_device_write_lock(cd, hdr, device);
+ else
+ r = device_write_lock(cd, device);
+ if (r < 0) {
+ free(json_area);
+ return r;
+ }
+
+ /* Increase sequence id before writing it to disk. */
+ hdr->seqid++;
+
+ /* Write primary and secondary header */
+ r = hdr_write_disk(cd, device, hdr, json_area, 0);
+ if (!r)
+ r = hdr_write_disk(cd, device, hdr, json_area, 1);
+
+ if (r)
+ log_dbg(cd, "LUKS2 header write failed (%d).", r);
+
+ device_write_unlock(cd, device);
+
+ free(json_area);
+ return r;
+}
+static int validate_json_area(struct crypt_device *cd, const char *json_area,
+ uint64_t json_len, uint64_t max_length)
+{
+ char c;
+
+ /* Enforce there are no needless opening bytes */
+ if (*json_area != '{') {
+ log_dbg(cd, "ERROR: Opening character must be left curly bracket: '{'.");
+ return -EINVAL;
+ }
+
+ if (json_len >= max_length) {
+ log_dbg(cd, "ERROR: Missing trailing null byte beyond parsed json data string.");
+ return -EINVAL;
+ }
+
+ /*
+ * TODO:
+ * validate there are legal json format characters between
+ * 'json_area' and 'json_area + json_len'
+ */
+
+ do {
+ c = *(json_area + json_len);
+ if (c != '\0') {
+ log_dbg(cd, "ERROR: Forbidden ascii code 0x%02hhx found beyond json data string at offset %" PRIu64,
+ c, json_len);
+ return -EINVAL;
+ }
+ } while (++json_len < max_length);
+
+ return 0;
+}
+
+static int validate_luks2_json_object(struct crypt_device *cd, json_object *jobj_hdr, uint64_t length)
+{
+ int r;
+
+ /* we require top level object to be of json_type_object */
+ r = !json_object_is_type(jobj_hdr, json_type_object);
+ if (r) {
+ log_dbg(cd, "ERROR: Resulting object is not a json object type");
+ return r;
+ }
+
+ r = LUKS2_hdr_validate(cd, jobj_hdr, length);
+ if (r) {
+ log_dbg(cd, "Repairing JSON metadata.");
+ /* try to correct known glitches */
+ LUKS2_hdr_repair(cd, jobj_hdr);
+
+ /* run validation again */
+ r = LUKS2_hdr_validate(cd, jobj_hdr, length);
+ }
+
+ if (r)
+ log_dbg(cd, "ERROR: LUKS2 validation failed");
+
+ return r;
+}
+
+static json_object *parse_and_validate_json(struct crypt_device *cd,
+ const char *json_area, uint64_t max_length)
+{
+ int json_len, r;
+ json_object *jobj = parse_json_len(cd, json_area, max_length, &json_len);
+
+ if (!jobj)
+ return NULL;
+
+ /* successful parse_json_len must not return offset <= 0 */
+ assert(json_len > 0);
+
+ r = validate_json_area(cd, json_area, json_len, max_length);
+ if (!r)
+ r = validate_luks2_json_object(cd, jobj, max_length);
+
+ if (r) {
+ json_object_put(jobj);
+ jobj = NULL;
+ }
+
+ return jobj;
+}
+
+static int detect_device_signatures(struct crypt_device *cd, const char *path)
+{
+ blk_probe_status prb_state;
+ int r;
+ struct blkid_handle *h;
+
+ if (!blk_supported()) {
+ log_dbg(cd, "Blkid probing of device signatures disabled.");
+ return 0;
+ }
+
+ if ((r = blk_init_by_path(&h, path))) {
+ log_dbg(cd, "Failed to initialize blkid_handle by path.");
+ return -EINVAL;
+ }
+
+ /* We don't care about details. Be fast. */
+ blk_set_chains_for_fast_detection(h);
+
+ /* Filter out crypto_LUKS. we don't care now */
+ blk_superblocks_filter_luks(h);
+
+ prb_state = blk_safeprobe(h);
+
+ switch (prb_state) {
+ case PRB_AMBIGUOUS:
+ log_dbg(cd, "Blkid probe couldn't decide device type unambiguously.");
+ /* fall through */
+ case PRB_FAIL:
+ log_dbg(cd, "Blkid probe failed.");
+ r = -EINVAL;
+ break;
+ case PRB_OK: /* crypto_LUKS type is filtered out */
+ r = -EINVAL;
+
+ if (blk_is_partition(h))
+ log_dbg(cd, "Blkid probe detected partition type '%s'", blk_get_partition_type(h));
+ else if (blk_is_superblock(h))
+ log_dbg(cd, "blkid probe detected superblock type '%s'", blk_get_superblock_type(h));
+ break;
+ case PRB_EMPTY:
+ log_dbg(cd, "Blkid probe detected no foreign device signature.");
+ }
+ blk_free(h);
+ return r;
+}
+
+/*
+ * Read and convert on-disk LUKS2 header to in-memory representation..
+ * Try to do recovery if on-disk state is not consistent.
+ */
+int LUKS2_disk_hdr_read(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct device *device, int do_recovery, int do_blkprobe)
+{
+ enum { HDR_OK, HDR_OBSOLETE, HDR_FAIL, HDR_FAIL_IO } state_hdr1, state_hdr2;
+ struct luks2_hdr_disk hdr_disk1, hdr_disk2;
+ char *json_area1 = NULL, *json_area2 = NULL;
+ json_object *jobj_hdr1 = NULL, *jobj_hdr2 = NULL;
+ unsigned int i;
+ int r;
+ uint64_t hdr_size;
+ uint64_t hdr2_offsets[] = LUKS2_HDR2_OFFSETS;
+
+ /* Skip auto-recovery if locks are disabled and we're not doing LUKS2 explicit repair */
+ if (do_recovery && do_blkprobe && !crypt_metadata_locking_enabled()) {
+ do_recovery = 0;
+ log_dbg(cd, "Disabling header auto-recovery due to locking being disabled.");
+ }
+
+ /*
+ * Read primary LUKS2 header (offset 0).
+ */
+ state_hdr1 = HDR_FAIL;
+ r = hdr_read_disk(cd, device, &hdr_disk1, &json_area1, 0, 0);
+ if (r == 0) {
+ jobj_hdr1 = parse_and_validate_json(cd, json_area1, be64_to_cpu(hdr_disk1.hdr_size) - LUKS2_HDR_BIN_LEN);
+ state_hdr1 = jobj_hdr1 ? HDR_OK : HDR_OBSOLETE;
+ } else if (r == -EIO)
+ state_hdr1 = HDR_FAIL_IO;
+
+ /*
+ * Read secondary LUKS2 header (follows primary).
+ */
+ state_hdr2 = HDR_FAIL;
+ if (state_hdr1 != HDR_FAIL && state_hdr1 != HDR_FAIL_IO) {
+ r = hdr_read_disk(cd, device, &hdr_disk2, &json_area2, be64_to_cpu(hdr_disk1.hdr_size), 1);
+ if (r == 0) {
+ jobj_hdr2 = parse_and_validate_json(cd, json_area2, be64_to_cpu(hdr_disk2.hdr_size) - LUKS2_HDR_BIN_LEN);
+ state_hdr2 = jobj_hdr2 ? HDR_OK : HDR_OBSOLETE;
+ } else if (r == -EIO)
+ state_hdr2 = HDR_FAIL_IO;
+ } else {
+ /*
+ * No header size, check all known offsets.
+ */
+ for (r = -EINVAL,i = 0; r < 0 && i < ARRAY_SIZE(hdr2_offsets); i++)
+ r = hdr_read_disk(cd, device, &hdr_disk2, &json_area2, hdr2_offsets[i], 1);
+
+ if (r == 0) {
+ jobj_hdr2 = parse_and_validate_json(cd, json_area2, be64_to_cpu(hdr_disk2.hdr_size) - LUKS2_HDR_BIN_LEN);
+ state_hdr2 = jobj_hdr2 ? HDR_OK : HDR_OBSOLETE;
+ } else if (r == -EIO)
+ state_hdr2 = HDR_FAIL_IO;
+ }
+
+ /*
+ * Check sequence id if both headers are read correctly.
+ */
+ if (state_hdr1 == HDR_OK && state_hdr2 == HDR_OK) {
+ if (be64_to_cpu(hdr_disk1.seqid) > be64_to_cpu(hdr_disk2.seqid))
+ state_hdr2 = HDR_OBSOLETE;
+ else if (be64_to_cpu(hdr_disk1.seqid) < be64_to_cpu(hdr_disk2.seqid))
+ state_hdr1 = HDR_OBSOLETE;
+ }
+
+ /* check header with keyslots to fit the device */
+ if (state_hdr1 == HDR_OK)
+ hdr_size = LUKS2_hdr_and_areas_size_jobj(jobj_hdr1);
+ else if (state_hdr2 == HDR_OK)
+ hdr_size = LUKS2_hdr_and_areas_size_jobj(jobj_hdr2);
+ else {
+ r = (state_hdr1 == HDR_FAIL_IO && state_hdr2 == HDR_FAIL_IO) ? -EIO : -EINVAL;
+ goto err;
+ }
+
+ r = device_check_size(cd, device, hdr_size, 0);
+ if (r)
+ goto err;
+
+ /*
+ * Try to rewrite (recover) bad header. Always regenerate salt for bad header.
+ */
+ if (state_hdr1 == HDR_OK && state_hdr2 != HDR_OK) {
+ log_dbg(cd, "Secondary LUKS2 header requires recovery.");
+
+ if (do_blkprobe && (r = detect_device_signatures(cd, device_path(device)))) {
+ log_err(cd, _("Device contains ambiguous signatures, cannot auto-recover LUKS2.\n"
+ "Please run \"cryptsetup repair\" for recovery."));
+ goto err;
+ }
+
+ if (do_recovery) {
+ memcpy(&hdr_disk2, &hdr_disk1, LUKS2_HDR_BIN_LEN);
+ r = crypt_random_get(cd, (char*)hdr_disk2.salt, sizeof(hdr_disk2.salt), CRYPT_RND_SALT);
+ if (r)
+ log_dbg(cd, "Cannot generate header salt.");
+ else {
+ hdr_from_disk(&hdr_disk1, &hdr_disk2, hdr, 0);
+ r = hdr_write_disk(cd, device, hdr, json_area1, 1);
+ }
+ if (r)
+ log_dbg(cd, "Secondary LUKS2 header recovery failed.");
+ }
+ } else if (state_hdr1 != HDR_OK && state_hdr2 == HDR_OK) {
+ log_dbg(cd, "Primary LUKS2 header requires recovery.");
+
+ if (do_blkprobe && (r = detect_device_signatures(cd, device_path(device)))) {
+ log_err(cd, _("Device contains ambiguous signatures, cannot auto-recover LUKS2.\n"
+ "Please run \"cryptsetup repair\" for recovery."));
+ goto err;
+ }
+
+ if (do_recovery) {
+ memcpy(&hdr_disk1, &hdr_disk2, LUKS2_HDR_BIN_LEN);
+ r = crypt_random_get(cd, (char*)hdr_disk1.salt, sizeof(hdr_disk1.salt), CRYPT_RND_SALT);
+ if (r)
+ log_dbg(cd, "Cannot generate header salt.");
+ else {
+ hdr_from_disk(&hdr_disk2, &hdr_disk1, hdr, 1);
+ r = hdr_write_disk(cd, device, hdr, json_area2, 0);
+ }
+ if (r)
+ log_dbg(cd, "Primary LUKS2 header recovery failed.");
+ }
+ }
+
+ free(json_area1);
+ json_area1 = NULL;
+ free(json_area2);
+ json_area2 = NULL;
+
+ /* wrong lock for write mode during recovery attempt */
+ if (r == -EAGAIN)
+ goto err;
+
+ /*
+ * Even if status is failed, the second header includes salt.
+ */
+ if (state_hdr1 == HDR_OK) {
+ hdr_from_disk(&hdr_disk1, &hdr_disk2, hdr, 0);
+ hdr->jobj = jobj_hdr1;
+ json_object_put(jobj_hdr2);
+ } else if (state_hdr2 == HDR_OK) {
+ hdr_from_disk(&hdr_disk2, &hdr_disk1, hdr, 1);
+ hdr->jobj = jobj_hdr2;
+ json_object_put(jobj_hdr1);
+ }
+
+ /*
+ * FIXME: should this fail? At least one header was read correctly.
+ * r = (state_hdr1 == HDR_FAIL_IO || state_hdr2 == HDR_FAIL_IO) ? -EIO : -EINVAL;
+ */
+ return 0;
+err:
+ log_dbg(cd, "LUKS2 header read failed (%d).", r);
+
+ free(json_area1);
+ free(json_area2);
+ json_object_put(jobj_hdr1);
+ json_object_put(jobj_hdr2);
+ hdr->jobj = NULL;
+ return r;
+}
+
+int LUKS2_hdr_version_unlocked(struct crypt_device *cd, const char *backup_file)
+{
+ struct {
+ char magic[LUKS2_MAGIC_L];
+ uint16_t version;
+ } __attribute__ ((packed)) hdr;
+ struct device *device = NULL;
+ int r = 0, devfd = -1, flags;
+
+ if (!backup_file)
+ device = crypt_metadata_device(cd);
+ else if (device_alloc(cd, &device, backup_file) < 0)
+ return 0;
+
+ if (!device)
+ return 0;
+
+ flags = O_RDONLY;
+ if (device_direct_io(device))
+ flags |= O_DIRECT;
+
+ devfd = open(device_path(device), flags);
+ if (devfd != -1 && (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), &hdr, sizeof(hdr), 0) == sizeof(hdr)) &&
+ !memcmp(hdr.magic, LUKS2_MAGIC_1ST, LUKS2_MAGIC_L))
+ r = (int)be16_to_cpu(hdr.version);
+
+ if (devfd != -1)
+ close(devfd);
+
+ if (backup_file)
+ device_free(cd, device);
+
+ return r;
+}
diff --git a/lib/luks2/luks2_internal.h b/lib/luks2/luks2_internal.h
new file mode 100644
index 0000000..b564a48
--- /dev/null
+++ b/lib/luks2/luks2_internal.h
@@ -0,0 +1,388 @@
+/*
+ * LUKS - Linux Unified Key Setup v2
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTSETUP_LUKS2_INTERNAL_H
+#define _CRYPTSETUP_LUKS2_INTERNAL_H
+
+#include <stdio.h>
+#include <errno.h>
+#include <json-c/json.h>
+
+#include "internal.h"
+#include "luks2.h"
+
+/* override useless forward slash escape when supported by json-c */
+#ifndef JSON_C_TO_STRING_NOSLASHESCAPE
+#define JSON_C_TO_STRING_NOSLASHESCAPE 0
+#endif
+
+/*
+ * On-disk access function prototypes
+ */
+int LUKS2_disk_hdr_read(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct device *device, int do_recovery, int do_blkprobe);
+int LUKS2_disk_hdr_write(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct device *device, bool seqid_check);
+int LUKS2_device_write_lock(struct crypt_device *cd,
+ struct luks2_hdr *hdr, struct device *device);
+
+/*
+ * JSON struct access helpers
+ */
+json_object *LUKS2_get_keyslot_jobj(struct luks2_hdr *hdr, int keyslot);
+json_object *LUKS2_get_token_jobj(struct luks2_hdr *hdr, int token);
+json_object *LUKS2_get_digest_jobj(struct luks2_hdr *hdr, int digest);
+json_object *LUKS2_get_segment_jobj(struct luks2_hdr *hdr, int segment);
+json_object *LUKS2_get_tokens_jobj(struct luks2_hdr *hdr);
+json_object *LUKS2_get_segments_jobj(struct luks2_hdr *hdr);
+
+void hexprint_base64(struct crypt_device *cd, json_object *jobj,
+ const char *sep, const char *line_sep);
+
+uint64_t crypt_jobj_get_uint64(json_object *jobj);
+uint32_t crypt_jobj_get_uint32(json_object *jobj);
+json_object *crypt_jobj_new_uint64(uint64_t value);
+
+int json_object_object_add_by_uint(json_object *jobj, unsigned key, json_object *jobj_val);
+void json_object_object_del_by_uint(json_object *jobj, unsigned key);
+int json_object_copy(json_object *jobj_src, json_object **jobj_dst);
+
+void JSON_DBG(struct crypt_device *cd, json_object *jobj, const char *desc);
+
+/*
+ * LUKS2 JSON validation
+ */
+
+/* validation helper */
+bool validate_json_uint32(json_object *jobj);
+json_object *json_contains(struct crypt_device *cd, json_object *jobj, const char *name,
+ const char *section, const char *key, json_type type);
+json_object *json_contains_string(struct crypt_device *cd, json_object *jobj,
+ const char *name, const char *section, const char *key);
+
+int LUKS2_hdr_validate(struct crypt_device *cd, json_object *hdr_jobj, uint64_t json_size);
+int LUKS2_check_json_size(struct crypt_device *cd, const struct luks2_hdr *hdr);
+int LUKS2_token_validate(struct crypt_device *cd, json_object *hdr_jobj,
+ json_object *jobj_token, const char *key);
+void LUKS2_token_dump(struct crypt_device *cd, int token);
+
+/*
+ * LUKS2 JSON repair for known glitches
+ */
+void LUKS2_hdr_repair(struct crypt_device *cd, json_object *jobj_hdr);
+void LUKS2_keyslots_repair(struct crypt_device *cd, json_object *jobj_hdr);
+
+/*
+ * JSON array helpers
+ */
+json_object *LUKS2_array_jobj(json_object *array, const char *num);
+json_object *LUKS2_array_remove(json_object *array, const char *num);
+
+/*
+ * Plugins API
+ */
+
+/**
+ * LUKS2 keyslots handlers (EXPERIMENTAL)
+ */
+typedef int (*keyslot_alloc_func)(struct crypt_device *cd, int keyslot,
+ size_t volume_key_len,
+ const struct luks2_keyslot_params *params);
+typedef int (*keyslot_update_func)(struct crypt_device *cd, int keyslot,
+ const struct luks2_keyslot_params *params);
+typedef int (*keyslot_open_func) (struct crypt_device *cd, int keyslot,
+ const char *password, size_t password_len,
+ char *volume_key, size_t volume_key_len);
+typedef int (*keyslot_store_func)(struct crypt_device *cd, int keyslot,
+ const char *password, size_t password_len,
+ const char *volume_key, size_t volume_key_len);
+typedef int (*keyslot_wipe_func) (struct crypt_device *cd, int keyslot);
+typedef int (*keyslot_dump_func) (struct crypt_device *cd, int keyslot);
+typedef int (*keyslot_validate_func) (struct crypt_device *cd, json_object *jobj_keyslot);
+typedef void(*keyslot_repair_func) (json_object *jobj_keyslot);
+
+/* see LUKS2_luks2_to_luks1 */
+int placeholder_keyslot_alloc(struct crypt_device *cd,
+ int keyslot,
+ uint64_t area_offset,
+ uint64_t area_length);
+
+/* validate all keyslot implementations in hdr json */
+int LUKS2_keyslots_validate(struct crypt_device *cd, json_object *hdr_jobj);
+
+typedef struct {
+ const char *name;
+ keyslot_alloc_func alloc;
+ keyslot_update_func update;
+ keyslot_open_func open;
+ keyslot_store_func store;
+ keyslot_wipe_func wipe;
+ keyslot_dump_func dump;
+ keyslot_validate_func validate;
+ keyslot_repair_func repair;
+} keyslot_handler;
+
+struct reenc_protection {
+ enum { REENC_PROTECTION_NOT_SET = 0,
+ REENC_PROTECTION_NONE,
+ REENC_PROTECTION_CHECKSUM,
+ REENC_PROTECTION_JOURNAL,
+ REENC_PROTECTION_DATASHIFT } type;
+
+ union {
+ struct {
+ char hash[LUKS2_CHECKSUM_ALG_L];
+ struct crypt_hash *ch;
+ size_t hash_size;
+ /* buffer for checksums */
+ void *checksums;
+ size_t checksums_len;
+ size_t block_size;
+ } csum;
+ struct {
+ uint64_t data_shift;
+ } ds;
+ } p;
+};
+
+/**
+ * LUKS2 digest handlers (EXPERIMENTAL)
+ */
+typedef int (*digest_verify_func)(struct crypt_device *cd, int digest,
+ const char *volume_key, size_t volume_key_len);
+typedef int (*digest_store_func) (struct crypt_device *cd, int digest,
+ const char *volume_key, size_t volume_key_len);
+typedef int (*digest_dump_func) (struct crypt_device *cd, int digest);
+
+typedef struct {
+ const char *name;
+ digest_verify_func verify;
+ digest_store_func store;
+ digest_dump_func dump;
+} digest_handler;
+
+int keyring_open(struct crypt_device *cd,
+ int token,
+ char **buffer,
+ size_t *buffer_len,
+ void *usrptr);
+
+void keyring_dump(struct crypt_device *cd, const char *json);
+
+int keyring_validate(struct crypt_device *cd, const char *json);
+
+void keyring_buffer_free(void *buffer, size_t buffer_size);
+
+struct crypt_token_handler_v2 {
+ const char *name;
+ crypt_token_open_func open;
+ crypt_token_buffer_free_func buffer_free;
+ crypt_token_validate_func validate;
+ crypt_token_dump_func dump;
+
+ /* here ends v1. Do not touch anything above */
+
+ crypt_token_open_pin_func open_pin;
+ crypt_token_version_func version;
+
+ void *dlhandle;
+};
+
+/*
+ * Initial sequence of structure members in union 'u' must be always
+ * identical. Version 4 must fully contain version 3 which must
+ * subsequently fully contain version 2, etc.
+ *
+ * See C standard, section 6.5.2.3, item 5.
+ */
+struct crypt_token_handler_internal {
+ uint32_t version;
+ union {
+ crypt_token_handler v1; /* deprecated public structure */
+ struct crypt_token_handler_v2 v2; /* internal helper v2 structure */
+ } u;
+};
+
+int LUKS2_find_area_gap(struct crypt_device *cd, struct luks2_hdr *hdr,
+ size_t keylength, uint64_t *area_offset, uint64_t *area_length);
+int LUKS2_find_area_max_gap(struct crypt_device *cd, struct luks2_hdr *hdr,
+ uint64_t *area_offset, uint64_t *area_length);
+
+uint64_t LUKS2_hdr_and_areas_size_jobj(json_object *jobj);
+
+int LUKS2_check_cipher(struct crypt_device *cd,
+ size_t keylength,
+ const char *cipher,
+ const char *cipher_mode);
+
+static inline const char *crypt_reencrypt_mode_to_str(crypt_reencrypt_mode_info mi)
+{
+ if (mi == CRYPT_REENCRYPT_REENCRYPT)
+ return "reencrypt";
+ if (mi == CRYPT_REENCRYPT_ENCRYPT)
+ return "encrypt";
+ if (mi == CRYPT_REENCRYPT_DECRYPT)
+ return "decrypt";
+ return "<unknown>";
+}
+
+/*
+ * Generic LUKS2 keyslot
+ */
+int LUKS2_keyslot_reencrypt_store(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const void *buffer,
+ size_t buffer_length);
+
+int LUKS2_keyslot_reencrypt_allocate(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment);
+
+int LUKS2_keyslot_reencrypt_update_needed(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment);
+
+int LUKS2_keyslot_reencrypt_update(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment,
+ struct volume_key *vks);
+
+int LUKS2_keyslot_reencrypt_load(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ struct reenc_protection *rp,
+ bool primary);
+
+int LUKS2_keyslot_reencrypt_digest_create(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint8_t version,
+ struct volume_key *vks);
+
+int LUKS2_keyslot_dump(struct crypt_device *cd,
+ int keyslot);
+
+int LUKS2_keyslot_jobj_area(json_object *jobj_keyslot, uint64_t *offset, uint64_t *length);
+
+/* JSON helpers */
+uint64_t json_segment_get_offset(json_object *jobj_segment, unsigned blockwise);
+const char *json_segment_type(json_object *jobj_segment);
+uint64_t json_segment_get_iv_offset(json_object *jobj_segment);
+uint64_t json_segment_get_size(json_object *jobj_segment, unsigned blockwise);
+const char *json_segment_get_cipher(json_object *jobj_segment);
+uint32_t json_segment_get_sector_size(json_object *jobj_segment);
+bool json_segment_is_backup(json_object *jobj_segment);
+json_object *json_segments_get_segment(json_object *jobj_segments, int segment);
+unsigned json_segments_count(json_object *jobj_segments);
+void json_segment_remove_flag(json_object *jobj_segment, const char *flag);
+uint64_t json_segments_get_minimal_offset(json_object *jobj_segments, unsigned blockwise);
+json_object *json_segment_create_linear(uint64_t offset, const uint64_t *length, unsigned reencryption);
+json_object *json_segment_create_crypt(uint64_t offset, uint64_t iv_offset, const uint64_t *length, const char *cipher, uint32_t sector_size, unsigned reencryption);
+int json_segments_segment_in_reencrypt(json_object *jobj_segments);
+bool json_segment_cmp(json_object *jobj_segment_1, json_object *jobj_segment_2);
+bool json_segment_contains_flag(json_object *jobj_segment, const char *flag_str, size_t len);
+
+int LUKS2_assembly_multisegment_dmd(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct volume_key *vks,
+ json_object *jobj_segments,
+ struct crypt_dm_active_device *dmd);
+
+/*
+ * Generic LUKS2 segment
+ */
+int LUKS2_segments_count(struct luks2_hdr *hdr);
+
+int LUKS2_segment_first_unused_id(struct luks2_hdr *hdr);
+
+int LUKS2_segment_set_flag(json_object *jobj_segment, const char *flag);
+
+json_object *LUKS2_get_segment_by_flag(struct luks2_hdr *hdr, const char *flag);
+
+int LUKS2_get_segment_id_by_flag(struct luks2_hdr *hdr, const char *flag);
+
+int LUKS2_segments_set(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ json_object *jobj_segments,
+ int commit);
+
+uint64_t LUKS2_segment_offset(struct luks2_hdr *hdr,
+ int segment,
+ unsigned blockwise);
+
+uint64_t LUKS2_segment_size(struct luks2_hdr *hdr,
+ int segment,
+ unsigned blockwise);
+
+int LUKS2_segment_is_type(struct luks2_hdr *hdr,
+ int segment,
+ const char *type);
+
+int LUKS2_segment_by_type(struct luks2_hdr *hdr,
+ const char *type);
+
+int LUKS2_last_segment_by_type(struct luks2_hdr *hdr,
+ const char *type);
+
+int LUKS2_get_default_segment(struct luks2_hdr *hdr);
+
+int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr);
+int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr);
+int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise);
+
+/*
+ * Generic LUKS2 digest
+ */
+int LUKS2_digest_verify_by_digest(struct crypt_device *cd,
+ int digest,
+ const struct volume_key *vk);
+
+void LUKS2_digests_erase_unused(struct crypt_device *cd,
+ struct luks2_hdr *hdr);
+
+int LUKS2_digest_dump(struct crypt_device *cd,
+ int digest);
+
+/*
+ * Generic LUKS2 token
+ */
+int LUKS2_tokens_count(struct luks2_hdr *hdr);
+
+/*
+ * LUKS2 generic
+ */
+int LUKS2_reload(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vks,
+ uint64_t device_size,
+ uint32_t flags);
+
+int LUKS2_keyslot_for_segment(struct luks2_hdr *hdr, int keyslot, int segment);
+int LUKS2_find_keyslot(struct luks2_hdr *hdr, const char *type);
+int LUKS2_set_keyslots_size(struct luks2_hdr *hdr, uint64_t data_offset);
+
+#endif
diff --git a/lib/luks2/luks2_json_format.c b/lib/luks2/luks2_json_format.c
new file mode 100644
index 0000000..4456358
--- /dev/null
+++ b/lib/luks2/luks2_json_format.c
@@ -0,0 +1,411 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, LUKS2 header format code
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+#include <uuid/uuid.h>
+
+struct area {
+ uint64_t offset;
+ uint64_t length;
+};
+
+static size_t get_area_size(size_t keylength)
+{
+ /* for now it is AF_split_sectors */
+ return size_round_up(keylength * 4000, 4096);
+}
+
+static size_t get_min_offset(struct luks2_hdr *hdr)
+{
+ return 2 * hdr->hdr_size;
+}
+
+static size_t get_max_offset(struct luks2_hdr *hdr)
+{
+ return LUKS2_hdr_and_areas_size(hdr);
+}
+
+int LUKS2_find_area_max_gap(struct crypt_device *cd, struct luks2_hdr *hdr,
+ uint64_t *area_offset, uint64_t *area_length)
+{
+ struct area areas[LUKS2_KEYSLOTS_MAX], sorted_areas[LUKS2_KEYSLOTS_MAX+1] = {};
+ int i, j, k, area_i;
+ size_t valid_offset, offset, length;
+
+ /* fill area offset + length table */
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ if (!LUKS2_keyslot_area(hdr, i, &areas[i].offset, &areas[i].length))
+ continue;
+ areas[i].length = 0;
+ areas[i].offset = 0;
+ }
+
+ /* sort table */
+ k = 0; /* index in sorted table */
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ offset = get_max_offset(hdr) ?: UINT64_MAX;
+ area_i = -1;
+ /* search for the smallest offset in table */
+ for (j = 0; j < LUKS2_KEYSLOTS_MAX; j++)
+ if (areas[j].offset && areas[j].offset <= offset) {
+ area_i = j;
+ offset = areas[j].offset;
+ }
+
+ if (area_i >= 0) {
+ sorted_areas[k].length = areas[area_i].length;
+ sorted_areas[k].offset = areas[area_i].offset;
+ areas[area_i].length = 0;
+ areas[area_i].offset = 0;
+ k++;
+ }
+ }
+
+ sorted_areas[LUKS2_KEYSLOTS_MAX].offset = get_max_offset(hdr);
+ sorted_areas[LUKS2_KEYSLOTS_MAX].length = 1;
+
+ /* search for the gap we can use */
+ length = valid_offset = 0;
+ offset = get_min_offset(hdr);
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX+1; i++) {
+ /* skip empty */
+ if (sorted_areas[i].offset == 0 || sorted_areas[i].length == 0)
+ continue;
+
+ /* found bigger gap than the last one */
+ if ((offset < sorted_areas[i].offset) && (sorted_areas[i].offset - offset) > length) {
+ length = sorted_areas[i].offset - offset;
+ valid_offset = offset;
+ }
+
+ /* move beyond allocated area */
+ offset = sorted_areas[i].offset + sorted_areas[i].length;
+ }
+
+ /* this search 'algorithm' does not work with unaligned areas */
+ assert(length == size_round_up(length, 4096));
+ assert(valid_offset == size_round_up(valid_offset, 4096));
+
+ if (!length) {
+ log_dbg(cd, "Not enough space in header keyslot area.");
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Found largest free area %zu -> %zu", valid_offset, length + valid_offset);
+
+ *area_offset = valid_offset;
+ *area_length = length;
+
+ return 0;
+}
+
+int LUKS2_find_area_gap(struct crypt_device *cd, struct luks2_hdr *hdr,
+ size_t keylength, uint64_t *area_offset, uint64_t *area_length)
+{
+ struct area areas[LUKS2_KEYSLOTS_MAX], sorted_areas[LUKS2_KEYSLOTS_MAX] = {};
+ int i, j, k, area_i;
+ size_t offset, length;
+
+ /* fill area offset + length table */
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ if (!LUKS2_keyslot_area(hdr, i, &areas[i].offset, &areas[i].length))
+ continue;
+ areas[i].length = 0;
+ areas[i].offset = 0;
+ }
+
+ /* sort table */
+ k = 0; /* index in sorted table */
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ offset = get_max_offset(hdr) ?: UINT64_MAX;
+ area_i = -1;
+ /* search for the smallest offset in table */
+ for (j = 0; j < LUKS2_KEYSLOTS_MAX; j++)
+ if (areas[j].offset && areas[j].offset <= offset) {
+ area_i = j;
+ offset = areas[j].offset;
+ }
+
+ if (area_i >= 0) {
+ sorted_areas[k].length = areas[area_i].length;
+ sorted_areas[k].offset = areas[area_i].offset;
+ areas[area_i].length = 0;
+ areas[area_i].offset = 0;
+ k++;
+ }
+ }
+
+ /* search for the gap we can use */
+ offset = get_min_offset(hdr);
+ length = get_area_size(keylength);
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ /* skip empty */
+ if (sorted_areas[i].offset == 0 || sorted_areas[i].length == 0)
+ continue;
+
+ /* enough space before the used area */
+ if ((offset < sorted_areas[i].offset) && ((offset + length) <= sorted_areas[i].offset))
+ break;
+
+ /* both offset and length are already aligned to 4096 bytes */
+ offset = sorted_areas[i].offset + sorted_areas[i].length;
+ }
+
+ if ((offset + length) > get_max_offset(hdr)) {
+ log_dbg(cd, "Not enough space in header keyslot area.");
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Found area %zu -> %zu", offset, length + offset);
+
+ if (area_offset)
+ *area_offset = offset;
+ if (area_length)
+ *area_length = length;
+
+ return 0;
+}
+
+int LUKS2_check_metadata_area_size(uint64_t metadata_size)
+{
+ /* see LUKS2_HDR2_OFFSETS */
+ return (metadata_size != 0x004000 &&
+ metadata_size != 0x008000 && metadata_size != 0x010000 &&
+ metadata_size != 0x020000 && metadata_size != 0x040000 &&
+ metadata_size != 0x080000 && metadata_size != 0x100000 &&
+ metadata_size != 0x200000 && metadata_size != 0x400000);
+}
+
+int LUKS2_check_keyslots_area_size(uint64_t keyslots_size)
+{
+ return (MISALIGNED_4K(keyslots_size) ||
+ keyslots_size > LUKS2_MAX_KEYSLOTS_SIZE);
+}
+
+int LUKS2_generate_hdr(
+ struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk,
+ const char *cipherName,
+ const char *cipherMode,
+ const char *integrity,
+ const char *uuid,
+ unsigned int sector_size, /* in bytes */
+ uint64_t data_offset, /* in bytes */
+ uint64_t align_offset, /* in bytes */
+ uint64_t required_alignment,
+ uint64_t metadata_size,
+ uint64_t keyslots_size)
+{
+ struct json_object *jobj_segment, *jobj_integrity, *jobj_keyslots, *jobj_segments, *jobj_config;
+ char cipher[128];
+ uuid_t partitionUuid;
+ int r, digest;
+ uint64_t mdev_size;
+
+ if (!metadata_size)
+ metadata_size = LUKS2_HDR_16K_LEN;
+ hdr->hdr_size = metadata_size;
+
+ if (data_offset && data_offset < get_min_offset(hdr)) {
+ log_err(cd, _("Requested data offset is too small."));
+ return -EINVAL;
+ }
+
+ /* Increase keyslot size according to data offset */
+ if (!keyslots_size && data_offset)
+ keyslots_size = data_offset - get_min_offset(hdr);
+
+ /* keyslots size has to be 4 KiB aligned */
+ keyslots_size -= (keyslots_size % 4096);
+
+ if (keyslots_size > LUKS2_MAX_KEYSLOTS_SIZE)
+ keyslots_size = LUKS2_MAX_KEYSLOTS_SIZE;
+
+ if (!keyslots_size) {
+ assert(LUKS2_DEFAULT_HDR_SIZE > 2 * LUKS2_HDR_OFFSET_MAX);
+ keyslots_size = LUKS2_DEFAULT_HDR_SIZE - get_min_offset(hdr);
+ /* Decrease keyslots_size due to metadata device being too small */
+ if (!device_size(crypt_metadata_device(cd), &mdev_size) &&
+ ((keyslots_size + get_min_offset(hdr)) > mdev_size) &&
+ device_fallocate(crypt_metadata_device(cd), keyslots_size + get_min_offset(hdr)) &&
+ (get_min_offset(hdr) <= mdev_size))
+ keyslots_size = mdev_size - get_min_offset(hdr);
+ }
+
+ /* Decrease keyslots_size if we have smaller data_offset */
+ if (data_offset && (keyslots_size + get_min_offset(hdr)) > data_offset) {
+ keyslots_size = data_offset - get_min_offset(hdr);
+ log_dbg(cd, "Decreasing keyslot area size to %" PRIu64
+ " bytes due to the requested data offset %"
+ PRIu64 " bytes.", keyslots_size, data_offset);
+ }
+
+ /* Data offset has priority */
+ if (!data_offset && required_alignment) {
+ data_offset = size_round_up(get_min_offset(hdr) + keyslots_size,
+ (size_t)required_alignment);
+ data_offset += align_offset;
+ }
+
+ log_dbg(cd, "Formatting LUKS2 with JSON metadata area %" PRIu64
+ " bytes and keyslots area %" PRIu64 " bytes.",
+ metadata_size - LUKS2_HDR_BIN_LEN, keyslots_size);
+
+ if (keyslots_size < (LUKS2_HDR_OFFSET_MAX - 2*LUKS2_HDR_16K_LEN))
+ log_std(cd, _("WARNING: keyslots area (%" PRIu64 " bytes) is very small,"
+ " available LUKS2 keyslot count is very limited.\n"),
+ keyslots_size);
+
+ hdr->seqid = 1;
+ hdr->version = 2;
+ memset(hdr->label, 0, LUKS2_LABEL_L);
+ strcpy(hdr->checksum_alg, "sha256");
+ crypt_random_get(cd, (char*)hdr->salt1, LUKS2_SALT_L, CRYPT_RND_SALT);
+ crypt_random_get(cd, (char*)hdr->salt2, LUKS2_SALT_L, CRYPT_RND_SALT);
+
+ if (uuid && uuid_parse(uuid, partitionUuid) == -1) {
+ log_err(cd, _("Wrong LUKS UUID format provided."));
+ return -EINVAL;
+ }
+ if (!uuid)
+ uuid_generate(partitionUuid);
+
+ uuid_unparse(partitionUuid, hdr->uuid);
+
+ if (*cipherMode != '\0')
+ r = snprintf(cipher, sizeof(cipher), "%s-%s", cipherName, cipherMode);
+ else
+ r = snprintf(cipher, sizeof(cipher), "%s", cipherName);
+ if (r < 0 || (size_t)r >= sizeof(cipher))
+ return -EINVAL;
+
+ hdr->jobj = json_object_new_object();
+
+ jobj_keyslots = json_object_new_object();
+ json_object_object_add(hdr->jobj, "keyslots", jobj_keyslots);
+ json_object_object_add(hdr->jobj, "tokens", json_object_new_object());
+ jobj_segments = json_object_new_object();
+ json_object_object_add(hdr->jobj, "segments", jobj_segments);
+ json_object_object_add(hdr->jobj, "digests", json_object_new_object());
+ jobj_config = json_object_new_object();
+ json_object_object_add(hdr->jobj, "config", jobj_config);
+
+ digest = LUKS2_digest_create(cd, "pbkdf2", hdr, vk);
+ if (digest < 0)
+ goto err;
+
+ if (LUKS2_digest_segment_assign(cd, hdr, 0, digest, 1, 0) < 0)
+ goto err;
+
+ jobj_segment = json_segment_create_crypt(data_offset, 0, NULL, cipher, sector_size, 0);
+ if (!jobj_segment)
+ goto err;
+
+ if (integrity) {
+ jobj_integrity = json_object_new_object();
+ json_object_object_add(jobj_integrity, "type", json_object_new_string(integrity));
+ json_object_object_add(jobj_integrity, "journal_encryption", json_object_new_string("none"));
+ json_object_object_add(jobj_integrity, "journal_integrity", json_object_new_string("none"));
+ json_object_object_add(jobj_segment, "integrity", jobj_integrity);
+ }
+
+ json_object_object_add_by_uint(jobj_segments, 0, jobj_segment);
+
+ json_object_object_add(jobj_config, "json_size", crypt_jobj_new_uint64(metadata_size - LUKS2_HDR_BIN_LEN));
+ json_object_object_add(jobj_config, "keyslots_size", crypt_jobj_new_uint64(keyslots_size));
+
+ JSON_DBG(cd, hdr->jobj, "Header JSON:");
+ return 0;
+err:
+ json_object_put(hdr->jobj);
+ hdr->jobj = NULL;
+ return -EINVAL;
+}
+
+int LUKS2_wipe_header_areas(struct crypt_device *cd,
+ struct luks2_hdr *hdr, bool detached_header)
+{
+ int r;
+ uint64_t offset, length;
+ size_t wipe_block;
+
+ /* Wipe complete header, keyslots and padding areas with zeroes. */
+ offset = 0;
+ length = LUKS2_get_data_offset(hdr) * SECTOR_SIZE;
+ wipe_block = 1024 * 1024;
+
+ if (LUKS2_hdr_validate(cd, hdr->jobj, hdr->hdr_size - LUKS2_HDR_BIN_LEN))
+ return -EINVAL;
+
+ /* On detached header wipe at least the first 4k */
+ if (detached_header) {
+ length = 4096;
+ wipe_block = 4096;
+ }
+
+ r = device_check_size(cd, crypt_metadata_device(cd), length, 1);
+ if (r)
+ return r;
+
+ log_dbg(cd, "Wiping LUKS areas (0x%06" PRIx64 " - 0x%06" PRIx64") with zeroes.",
+ offset, length + offset);
+
+ r = crypt_wipe_device(cd, crypt_metadata_device(cd), CRYPT_WIPE_ZERO,
+ offset, length, wipe_block, NULL, NULL);
+ if (r < 0)
+ return r;
+
+ /* Wipe keyslot area */
+ wipe_block = 1024 * 1024;
+ offset = get_min_offset(hdr);
+ length = LUKS2_keyslots_size(hdr);
+
+ log_dbg(cd, "Wiping keyslots area (0x%06" PRIx64 " - 0x%06" PRIx64") with random data.",
+ offset, length + offset);
+
+ return crypt_wipe_device(cd, crypt_metadata_device(cd), CRYPT_WIPE_RANDOM,
+ offset, length, wipe_block, NULL, NULL);
+}
+
+int LUKS2_set_keyslots_size(struct luks2_hdr *hdr, uint64_t data_offset)
+{
+ json_object *jobj_config;
+ uint64_t keyslots_size;
+
+ if (data_offset < get_min_offset(hdr))
+ return 1;
+
+ keyslots_size = data_offset - get_min_offset(hdr);
+
+ /* keep keyslots_size reasonable for custom data alignments */
+ if (keyslots_size > LUKS2_MAX_KEYSLOTS_SIZE)
+ keyslots_size = LUKS2_MAX_KEYSLOTS_SIZE;
+
+ /* keyslots size has to be 4 KiB aligned */
+ keyslots_size -= (keyslots_size % 4096);
+
+ if (!json_object_object_get_ex(hdr->jobj, "config", &jobj_config))
+ return 1;
+
+ json_object_object_add(jobj_config, "keyslots_size", crypt_jobj_new_uint64(keyslots_size));
+ return 0;
+}
diff --git a/lib/luks2/luks2_json_metadata.c b/lib/luks2/luks2_json_metadata.c
new file mode 100644
index 0000000..4771f04
--- /dev/null
+++ b/lib/luks2/luks2_json_metadata.c
@@ -0,0 +1,2874 @@
+/*
+ * LUKS - Linux Unified Key Setup v2
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ * Copyright (C) 2015-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+#include "../integrity/integrity.h"
+#include <ctype.h>
+#include <uuid/uuid.h>
+
+#define LUKS_STRIPES 4000
+
+struct interval {
+ uint64_t offset;
+ uint64_t length;
+};
+
+void hexprint_base64(struct crypt_device *cd, json_object *jobj,
+ const char *sep, const char *line_sep)
+{
+ char *buf = NULL;
+ size_t buf_len;
+ unsigned int i;
+
+ if (crypt_base64_decode(&buf, &buf_len, json_object_get_string(jobj),
+ json_object_get_string_len(jobj)))
+ return;
+
+ for (i = 0; i < buf_len; i++) {
+ if (i && !(i % 16))
+ log_std(cd, "\n\t%s", line_sep);
+ log_std(cd, "%02hhx%s", buf[i], sep);
+ }
+ log_std(cd, "\n");
+ free(buf);
+}
+
+void JSON_DBG(struct crypt_device *cd, json_object *jobj, const char *desc)
+{
+ if (desc)
+ crypt_log(cd, CRYPT_LOG_DEBUG_JSON, desc);
+ crypt_log(cd, CRYPT_LOG_DEBUG_JSON, json_object_to_json_string_ext(jobj,
+ JSON_C_TO_STRING_PRETTY | JSON_C_TO_STRING_NOSLASHESCAPE));
+}
+
+/*
+ * JSON array helpers
+ */
+struct json_object *LUKS2_array_jobj(struct json_object *array, const char *num)
+{
+ struct json_object *jobj1;
+ int i;
+
+ for (i = 0; i < (int) json_object_array_length(array); i++) {
+ jobj1 = json_object_array_get_idx(array, i);
+ if (!strcmp(num, json_object_get_string(jobj1)))
+ return jobj1;
+ }
+
+ return NULL;
+}
+
+struct json_object *LUKS2_array_remove(struct json_object *array, const char *num)
+{
+ struct json_object *jobj1, *jobj_removing = NULL, *array_new;
+ int i;
+
+ jobj_removing = LUKS2_array_jobj(array, num);
+ if (!jobj_removing)
+ return NULL;
+
+ /* Create new array without jobj_removing. */
+ array_new = json_object_new_array();
+ for (i = 0; i < (int) json_object_array_length(array); i++) {
+ jobj1 = json_object_array_get_idx(array, i);
+ if (jobj1 != jobj_removing)
+ json_object_array_add(array_new, json_object_get(jobj1));
+ }
+
+ return array_new;
+}
+
+/*
+ * JSON struct access helpers
+ */
+json_object *LUKS2_get_keyslot_jobj(struct luks2_hdr *hdr, int keyslot)
+{
+ json_object *jobj1, *jobj2;
+ char keyslot_name[16];
+
+ if (!hdr || keyslot < 0)
+ return NULL;
+
+ if (snprintf(keyslot_name, sizeof(keyslot_name), "%u", keyslot) < 1)
+ return NULL;
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj1))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj1, keyslot_name, &jobj2))
+ return NULL;
+
+ return jobj2;
+}
+
+json_object *LUKS2_get_tokens_jobj(struct luks2_hdr *hdr)
+{
+ json_object *jobj_tokens;
+
+ if (!hdr || !json_object_object_get_ex(hdr->jobj, "tokens", &jobj_tokens))
+ return NULL;
+
+ return jobj_tokens;
+}
+
+json_object *LUKS2_get_token_jobj(struct luks2_hdr *hdr, int token)
+{
+ json_object *jobj1, *jobj2;
+ char token_name[16];
+
+ if (!hdr || token < 0)
+ return NULL;
+
+ jobj1 = LUKS2_get_tokens_jobj(hdr);
+ if (!jobj1)
+ return NULL;
+
+ if (snprintf(token_name, sizeof(token_name), "%u", token) < 1)
+ return NULL;
+
+ json_object_object_get_ex(jobj1, token_name, &jobj2);
+ return jobj2;
+}
+
+json_object *LUKS2_get_digest_jobj(struct luks2_hdr *hdr, int digest)
+{
+ json_object *jobj1, *jobj2;
+ char digest_name[16];
+
+ if (!hdr || digest < 0)
+ return NULL;
+
+ if (snprintf(digest_name, sizeof(digest_name), "%u", digest) < 1)
+ return NULL;
+
+ if (!json_object_object_get_ex(hdr->jobj, "digests", &jobj1))
+ return NULL;
+
+ json_object_object_get_ex(jobj1, digest_name, &jobj2);
+ return jobj2;
+}
+
+static json_object *json_get_segments_jobj(json_object *hdr_jobj)
+{
+ json_object *jobj_segments;
+
+ if (!hdr_jobj || !json_object_object_get_ex(hdr_jobj, "segments", &jobj_segments))
+ return NULL;
+
+ return jobj_segments;
+}
+
+json_object *LUKS2_get_segment_jobj(struct luks2_hdr *hdr, int segment)
+{
+ if (!hdr)
+ return NULL;
+
+ if (segment == CRYPT_DEFAULT_SEGMENT)
+ segment = LUKS2_get_default_segment(hdr);
+
+ return json_segments_get_segment(json_get_segments_jobj(hdr->jobj), segment);
+}
+
+json_object *LUKS2_get_segments_jobj(struct luks2_hdr *hdr)
+{
+ return hdr ? json_get_segments_jobj(hdr->jobj) : NULL;
+}
+
+int LUKS2_segments_count(struct luks2_hdr *hdr)
+{
+ if (!hdr)
+ return -EINVAL;
+
+ return json_segments_count(LUKS2_get_segments_jobj(hdr));
+}
+
+int LUKS2_get_default_segment(struct luks2_hdr *hdr)
+{
+ int s = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
+ if (s >= 0)
+ return s;
+
+ if (LUKS2_segments_count(hdr) >= 1)
+ return 0;
+
+ return -EINVAL;
+}
+
+/*
+ * json_type_int needs to be validated first.
+ * See validate_json_uint32()
+ */
+uint32_t crypt_jobj_get_uint32(json_object *jobj)
+{
+ return json_object_get_int64(jobj);
+}
+
+/* jobj has to be json_type_string and numbered */
+static bool json_str_to_uint64(json_object *jobj, uint64_t *value)
+{
+ char *endptr;
+ unsigned long long tmp;
+
+ errno = 0;
+ tmp = strtoull(json_object_get_string(jobj), &endptr, 10);
+ if (*endptr || errno) {
+ *value = 0;
+ return false;
+ }
+
+ *value = tmp;
+ return true;
+}
+
+uint64_t crypt_jobj_get_uint64(json_object *jobj)
+{
+ uint64_t r;
+ json_str_to_uint64(jobj, &r);
+ return r;
+}
+
+json_object *crypt_jobj_new_uint64(uint64_t value)
+{
+ /* 18446744073709551615 */
+ char num[21];
+ int r;
+ json_object *jobj;
+
+ r = snprintf(num, sizeof(num), "%" PRIu64, value);
+ if (r < 0 || (size_t)r >= sizeof(num))
+ return NULL;
+
+ jobj = json_object_new_string(num);
+ return jobj;
+}
+
+/*
+ * Validate helpers
+ */
+static bool numbered(struct crypt_device *cd, const char *name, const char *key)
+{
+ int i;
+
+ for (i = 0; key[i]; i++)
+ if (!isdigit(key[i])) {
+ log_dbg(cd, "%s \"%s\" is not in numbered form.", name, key);
+ return false;
+ }
+ return true;
+}
+
+json_object *json_contains(struct crypt_device *cd, json_object *jobj, const char *name,
+ const char *section, const char *key, json_type type)
+{
+ json_object *sobj;
+
+ if (!json_object_object_get_ex(jobj, key, &sobj) ||
+ !json_object_is_type(sobj, type)) {
+ log_dbg(cd, "%s \"%s\" is missing \"%s\" (%s) specification.",
+ section, name, key, json_type_to_name(type));
+ return NULL;
+ }
+
+ return sobj;
+}
+
+json_object *json_contains_string(struct crypt_device *cd, json_object *jobj,
+ const char *name, const char *section, const char *key)
+{
+ json_object *sobj = json_contains(cd, jobj, name, section, key, json_type_string);
+
+ if (!sobj)
+ return NULL;
+
+ if (strlen(json_object_get_string(sobj)) < 1)
+ return NULL;
+
+ return sobj;
+}
+
+bool validate_json_uint32(json_object *jobj)
+{
+ int64_t tmp;
+
+ errno = 0;
+ tmp = json_object_get_int64(jobj);
+
+ return (errno || tmp < 0 || tmp > UINT32_MAX) ? false : true;
+}
+
+static bool validate_keyslots_array(struct crypt_device *cd, json_object *jarr, json_object *jobj_keys)
+{
+ json_object *jobj;
+ int i = 0, length = (int) json_object_array_length(jarr);
+
+ while (i < length) {
+ jobj = json_object_array_get_idx(jarr, i);
+ if (!json_object_is_type(jobj, json_type_string)) {
+ log_dbg(cd, "Illegal value type in keyslots array at index %d.", i);
+ return false;
+ }
+
+ if (!json_contains(cd, jobj_keys, "", "Keyslots section",
+ json_object_get_string(jobj), json_type_object))
+ return false;
+
+ i++;
+ }
+
+ return true;
+}
+
+static bool validate_segments_array(struct crypt_device *cd, json_object *jarr, json_object *jobj_segments)
+{
+ json_object *jobj;
+ int i = 0, length = (int) json_object_array_length(jarr);
+
+ while (i < length) {
+ jobj = json_object_array_get_idx(jarr, i);
+ if (!json_object_is_type(jobj, json_type_string)) {
+ log_dbg(cd, "Illegal value type in segments array at index %d.", i);
+ return false;
+ }
+
+ if (!json_contains(cd, jobj_segments, "", "Segments section",
+ json_object_get_string(jobj), json_type_object))
+ return false;
+
+ i++;
+ }
+
+ return true;
+}
+
+static bool segment_has_digest(const char *segment_name, json_object *jobj_digests)
+{
+ json_object *jobj_segments;
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ UNUSED(key);
+ json_object_object_get_ex(val, "segments", &jobj_segments);
+ if (LUKS2_array_jobj(jobj_segments, segment_name))
+ return true;
+ }
+
+ return false;
+}
+
+
+static bool validate_intervals(struct crypt_device *cd,
+ int length, const struct interval *ix,
+ uint64_t metadata_size, uint64_t keyslots_area_end)
+{
+ int j, i = 0;
+
+ while (i < length) {
+ /* Offset cannot be inside primary or secondary JSON area */
+ if (ix[i].offset < 2 * metadata_size) {
+ log_dbg(cd, "Illegal area offset: %" PRIu64 ".", ix[i].offset);
+ return false;
+ }
+
+ if (!ix[i].length) {
+ log_dbg(cd, "Area length must be greater than zero.");
+ return false;
+ }
+
+ if (ix[i].offset > (UINT64_MAX - ix[i].length)) {
+ log_dbg(cd, "Interval offset+length overflow.");
+ return false;
+ }
+
+ if ((ix[i].offset + ix[i].length) > keyslots_area_end) {
+ log_dbg(cd, "Area [%" PRIu64 ", %" PRIu64 "] overflows binary keyslots area (ends at offset: %" PRIu64 ").",
+ ix[i].offset, ix[i].offset + ix[i].length, keyslots_area_end);
+ return false;
+ }
+
+ for (j = 0; j < length; j++) {
+ if (i == j)
+ continue;
+
+ if (ix[j].offset > (UINT64_MAX - ix[j].length)) {
+ log_dbg(cd, "Interval offset+length overflow.");
+ return false;
+ }
+
+ if ((ix[i].offset >= ix[j].offset) && (ix[i].offset < (ix[j].offset + ix[j].length))) {
+ log_dbg(cd, "Overlapping areas [%" PRIu64 ",%" PRIu64 "] and [%" PRIu64 ",%" PRIu64 "].",
+ ix[i].offset, ix[i].offset + ix[i].length,
+ ix[j].offset, ix[j].offset + ix[j].length);
+ return false;
+ }
+ }
+
+ i++;
+ }
+
+ return true;
+}
+
+static int LUKS2_keyslot_validate(struct crypt_device *cd, json_object *hdr_keyslot, const char *key)
+{
+ json_object *jobj_key_size;
+
+ if (!json_contains_string(cd, hdr_keyslot, key, "Keyslot", "type"))
+ return 1;
+ if (!(jobj_key_size = json_contains(cd, hdr_keyslot, key, "Keyslot", "key_size", json_type_int)))
+ return 1;
+
+ /* enforce uint32_t type */
+ if (!validate_json_uint32(jobj_key_size)) {
+ log_dbg(cd, "Illegal field \"key_size\":%s.",
+ json_object_get_string(jobj_key_size));
+ return 1;
+ }
+
+ return 0;
+}
+
+int LUKS2_token_validate(struct crypt_device *cd,
+ json_object *hdr_jobj, json_object *jobj_token, const char *key)
+{
+ json_object *jarr, *jobj_keyslots;
+
+ /* keyslots are not yet validated, but we need to know token doesn't reference missing keyslot */
+ if (!json_object_object_get_ex(hdr_jobj, "keyslots", &jobj_keyslots))
+ return 1;
+
+ if (!json_contains_string(cd, jobj_token, key, "Token", "type"))
+ return 1;
+
+ jarr = json_contains(cd, jobj_token, key, "Token", "keyslots", json_type_array);
+ if (!jarr)
+ return 1;
+
+ if (!validate_keyslots_array(cd, jarr, jobj_keyslots))
+ return 1;
+
+ return 0;
+}
+
+static int hdr_validate_json_size(struct crypt_device *cd, json_object *hdr_jobj, uint64_t hdr_json_size)
+{
+ json_object *jobj, *jobj1;
+ const char *json;
+ uint64_t json_area_size, json_size;
+
+ json_object_object_get_ex(hdr_jobj, "config", &jobj);
+ json_object_object_get_ex(jobj, "json_size", &jobj1);
+
+ json = json_object_to_json_string_ext(hdr_jobj,
+ JSON_C_TO_STRING_PLAIN | JSON_C_TO_STRING_NOSLASHESCAPE);
+ json_area_size = crypt_jobj_get_uint64(jobj1);
+ json_size = (uint64_t)strlen(json);
+
+ if (hdr_json_size != json_area_size) {
+ log_dbg(cd, "JSON area size does not match value in binary header.");
+ return 1;
+ }
+
+ if (json_size > json_area_size) {
+ log_dbg(cd, "JSON does not fit in the designated area.");
+ return 1;
+ }
+
+ return 0;
+}
+
+int LUKS2_check_json_size(struct crypt_device *cd, const struct luks2_hdr *hdr)
+{
+ return hdr_validate_json_size(cd, hdr->jobj, hdr->hdr_size - LUKS2_HDR_BIN_LEN);
+}
+
+static int hdr_validate_keyslots(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ json_object *jobj;
+
+ if (!(jobj = json_contains(cd, hdr_jobj, "", "JSON area", "keyslots", json_type_object)))
+ return 1;
+
+ json_object_object_foreach(jobj, key, val) {
+ if (!numbered(cd, "Keyslot", key))
+ return 1;
+ if (LUKS2_keyslot_validate(cd, val, key))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int hdr_validate_tokens(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ json_object *jobj;
+
+ if (!(jobj = json_contains(cd, hdr_jobj, "", "JSON area", "tokens", json_type_object)))
+ return 1;
+
+ json_object_object_foreach(jobj, key, val) {
+ if (!numbered(cd, "Token", key))
+ return 1;
+ if (LUKS2_token_validate(cd, hdr_jobj, val, key))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int hdr_validate_crypt_segment(struct crypt_device *cd, json_object *jobj,
+ const char *key, json_object *jobj_digests,
+ uint64_t size)
+{
+ int r;
+ json_object *jobj_ivoffset, *jobj_sector_size, *jobj_integrity;
+ uint32_t sector_size;
+ uint64_t ivoffset;
+
+ if (!(jobj_ivoffset = json_contains_string(cd, jobj, key, "Segment", "iv_tweak")) ||
+ !json_contains_string(cd, jobj, key, "Segment", "encryption") ||
+ !(jobj_sector_size = json_contains(cd, jobj, key, "Segment", "sector_size", json_type_int)))
+ return 1;
+
+ /* integrity */
+ if (json_object_object_get_ex(jobj, "integrity", &jobj_integrity)) {
+ if (!json_contains(cd, jobj, key, "Segment", "integrity", json_type_object) ||
+ !json_contains_string(cd, jobj_integrity, key, "Segment integrity", "type") ||
+ !json_contains_string(cd, jobj_integrity, key, "Segment integrity", "journal_encryption") ||
+ !json_contains_string(cd, jobj_integrity, key, "Segment integrity", "journal_integrity"))
+ return 1;
+ }
+
+ /* enforce uint32_t type */
+ if (!validate_json_uint32(jobj_sector_size)) {
+ log_dbg(cd, "Illegal field \"sector_size\":%s.",
+ json_object_get_string(jobj_sector_size));
+ return 1;
+ }
+
+ sector_size = crypt_jobj_get_uint32(jobj_sector_size);
+ if (!sector_size || MISALIGNED_512(sector_size)) {
+ log_dbg(cd, "Illegal sector size: %" PRIu32, sector_size);
+ return 1;
+ }
+
+ if (!numbered(cd, "iv_tweak", json_object_get_string(jobj_ivoffset)) ||
+ !json_str_to_uint64(jobj_ivoffset, &ivoffset)) {
+ log_dbg(cd, "Illegal iv_tweak value.");
+ return 1;
+ }
+
+ if (size % sector_size) {
+ log_dbg(cd, "Size field has to be aligned to sector size: %" PRIu32, sector_size);
+ return 1;
+ }
+
+ r = segment_has_digest(key, jobj_digests);
+
+ if (!r)
+ log_dbg(cd, "Crypt segment %s not assigned to key digest.", key);
+
+ return !r;
+}
+
+static bool validate_segment_intervals(struct crypt_device *cd,
+ int length, const struct interval *ix)
+{
+ int j, i = 0;
+
+ while (i < length) {
+ if (ix[i].length == UINT64_MAX && (i != (length - 1))) {
+ log_dbg(cd, "Only last regular segment is allowed to have 'dynamic' size.");
+ return false;
+ }
+
+ for (j = 0; j < length; j++) {
+ if (i == j)
+ continue;
+
+ if (ix[j].length != UINT64_MAX && ix[j].offset > (UINT64_MAX - ix[j].length)) {
+ log_dbg(cd, "Interval offset+length overflow.");
+ return false;
+ }
+
+ if ((ix[i].offset >= ix[j].offset) && (ix[j].length == UINT64_MAX || (ix[i].offset < (ix[j].offset + ix[j].length)))) {
+ log_dbg(cd, "Overlapping segments [%" PRIu64 ",%" PRIu64 "]%s and [%" PRIu64 ",%" PRIu64 "]%s.",
+ ix[i].offset, ix[i].offset + ix[i].length, ix[i].length == UINT64_MAX ? "(dynamic)" : "",
+ ix[j].offset, ix[j].offset + ix[j].length, ix[j].length == UINT64_MAX ? "(dynamic)" : "");
+ return false;
+ }
+ }
+
+ i++;
+ }
+
+ return true;
+}
+
+static int reqs_unknown(uint32_t reqs)
+{
+ return reqs & CRYPT_REQUIREMENT_UNKNOWN;
+}
+
+static int reqs_reencrypt(uint32_t reqs)
+{
+ return reqs & CRYPT_REQUIREMENT_OFFLINE_REENCRYPT;
+}
+
+static int reqs_reencrypt_online(uint32_t reqs)
+{
+ return reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
+}
+
+/*
+ * Config section requirements object must be valid.
+ * Also general segments section must be validated first.
+ */
+static int validate_reencrypt_segments(struct crypt_device *cd, json_object *hdr_jobj, json_object *jobj_segments, int first_backup, int segments_count)
+{
+ json_object *jobj, *jobj_backup_previous = NULL, *jobj_backup_final = NULL;
+ uint32_t reqs;
+ int i, r;
+ struct luks2_hdr dummy = {
+ .jobj = hdr_jobj
+ };
+
+ r = LUKS2_config_get_requirements(cd, &dummy, &reqs);
+ if (r)
+ return 1;
+
+ if (reqs_reencrypt_online(reqs)) {
+ for (i = first_backup; i < segments_count; i++) {
+ jobj = json_segments_get_segment(jobj_segments, i);
+ if (!jobj)
+ return 1;
+ if (json_segment_contains_flag(jobj, "backup-final", 0))
+ jobj_backup_final = jobj;
+ else if (json_segment_contains_flag(jobj, "backup-previous", 0))
+ jobj_backup_previous = jobj;
+ }
+
+ if (!jobj_backup_final || !jobj_backup_previous) {
+ log_dbg(cd, "Backup segment is missing.");
+ return 1;
+ }
+
+ for (i = 0; i < first_backup; i++) {
+ jobj = json_segments_get_segment(jobj_segments, i);
+ if (!jobj)
+ return 1;
+
+ if (json_segment_contains_flag(jobj, "in-reencryption", 0)) {
+ if (!json_segment_cmp(jobj, jobj_backup_final)) {
+ log_dbg(cd, "Segment in reencryption does not match backup final segment.");
+ return 1;
+ }
+ continue;
+ }
+
+ if (!json_segment_cmp(jobj, jobj_backup_final) &&
+ !json_segment_cmp(jobj, jobj_backup_previous)) {
+ log_dbg(cd, "Segment does not match neither backup final or backup previous segment.");
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ json_object *jobj_segments, *jobj_digests, *jobj_offset, *jobj_size, *jobj_type, *jobj_flags, *jobj;
+ uint64_t offset, size;
+ int i, r, count, first_backup = -1;
+ struct interval *intervals = NULL;
+
+ if (!(jobj_segments = json_contains(cd, hdr_jobj, "", "JSON area", "segments", json_type_object)))
+ return 1;
+
+ count = json_object_object_length(jobj_segments);
+ if (count < 1) {
+ log_dbg(cd, "Empty segments section.");
+ return 1;
+ }
+
+ /* digests should already be validated */
+ if (!json_object_object_get_ex(hdr_jobj, "digests", &jobj_digests))
+ return 1;
+
+ json_object_object_foreach(jobj_segments, key, val) {
+ if (!numbered(cd, "Segment", key))
+ return 1;
+
+ /* those fields are mandatory for all segment types */
+ if (!(jobj_type = json_contains_string(cd, val, key, "Segment", "type")) ||
+ !(jobj_offset = json_contains_string(cd, val, key, "Segment", "offset")) ||
+ !(jobj_size = json_contains_string(cd, val, key, "Segment", "size")))
+ return 1;
+
+ if (!numbered(cd, "offset", json_object_get_string(jobj_offset)))
+ return 1;
+
+ if (!json_str_to_uint64(jobj_offset, &offset)) {
+ log_dbg(cd, "Illegal segment offset value.");
+ return 1;
+ }
+
+ /* size "dynamic" means whole device starting at 'offset' */
+ if (strcmp(json_object_get_string(jobj_size), "dynamic")) {
+ if (!numbered(cd, "size", json_object_get_string(jobj_size)))
+ return 1;
+ if (!json_str_to_uint64(jobj_size, &size) || !size) {
+ log_dbg(cd, "Illegal segment size value.");
+ return 1;
+ }
+ } else
+ size = 0;
+
+ /* all device-mapper devices are aligned to 512 sector size */
+ if (MISALIGNED_512(offset)) {
+ log_dbg(cd, "Offset field has to be aligned to sector size: %" PRIu32, SECTOR_SIZE);
+ return 1;
+ }
+ if (MISALIGNED_512(size)) {
+ log_dbg(cd, "Size field has to be aligned to sector size: %" PRIu32, SECTOR_SIZE);
+ return 1;
+ }
+
+ /* flags array is optional and must contain strings */
+ if (json_object_object_get_ex(val, "flags", NULL)) {
+ if (!(jobj_flags = json_contains(cd, val, key, "Segment", "flags", json_type_array)))
+ return 1;
+ for (i = 0; i < (int) json_object_array_length(jobj_flags); i++)
+ if (!json_object_is_type(json_object_array_get_idx(jobj_flags, i), json_type_string))
+ return 1;
+ }
+
+ i = atoi(key);
+ if (json_segment_is_backup(val)) {
+ if (first_backup < 0 || i < first_backup)
+ first_backup = i;
+ } else {
+ if ((first_backup >= 0) && i >= first_backup) {
+ log_dbg(cd, "Regular segment at %d is behind backup segment at %d", i, first_backup);
+ return 1;
+ }
+ }
+
+ /* crypt */
+ if (!strcmp(json_object_get_string(jobj_type), "crypt") &&
+ hdr_validate_crypt_segment(cd, val, key, jobj_digests, size))
+ return 1;
+ }
+
+ if (first_backup == 0) {
+ log_dbg(cd, "No regular segment.");
+ return 1;
+ }
+
+ /* avoid needlessly large allocation when first backup segment is invalid */
+ if (first_backup >= count) {
+ log_dbg(cd, "Gap between last regular segment and backup segment at key %d.", first_backup);
+ return 1;
+ }
+
+ if (first_backup < 0)
+ first_backup = count;
+
+ if ((size_t)first_backup < SIZE_MAX / sizeof(*intervals))
+ intervals = malloc(first_backup * sizeof(*intervals));
+
+ if (!intervals) {
+ log_dbg(cd, "Not enough memory.");
+ return 1;
+ }
+
+ for (i = 0; i < first_backup; i++) {
+ jobj = json_segments_get_segment(jobj_segments, i);
+ if (!jobj) {
+ log_dbg(cd, "Gap at key %d in segments object.", i);
+ free(intervals);
+ return 1;
+ }
+ intervals[i].offset = json_segment_get_offset(jobj, 0);
+ intervals[i].length = json_segment_get_size(jobj, 0) ?: UINT64_MAX;
+ }
+
+ r = !validate_segment_intervals(cd, first_backup, intervals);
+ free(intervals);
+
+ if (r)
+ return 1;
+
+ for (; i < count; i++) {
+ if (!json_segments_get_segment(jobj_segments, i)) {
+ log_dbg(cd, "Gap at key %d in segments object.", i);
+ return 1;
+ }
+ }
+
+ return validate_reencrypt_segments(cd, hdr_jobj, jobj_segments, first_backup, count);
+}
+
+static uint64_t LUKS2_metadata_size_jobj(json_object *jobj)
+{
+ json_object *jobj1, *jobj2;
+ uint64_t json_size;
+
+ json_object_object_get_ex(jobj, "config", &jobj1);
+ json_object_object_get_ex(jobj1, "json_size", &jobj2);
+ json_str_to_uint64(jobj2, &json_size);
+
+ return json_size + LUKS2_HDR_BIN_LEN;
+}
+
+uint64_t LUKS2_metadata_size(struct luks2_hdr *hdr)
+{
+ return LUKS2_metadata_size_jobj(hdr->jobj);
+}
+
+static int hdr_validate_areas(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ struct interval *intervals;
+ json_object *jobj_keyslots, *jobj_offset, *jobj_length, *jobj_segments, *jobj_area;
+ int length, ret, i = 0;
+ uint64_t metadata_size;
+
+ if (!json_object_object_get_ex(hdr_jobj, "keyslots", &jobj_keyslots))
+ return 1;
+
+ /* segments are already validated */
+ if (!json_object_object_get_ex(hdr_jobj, "segments", &jobj_segments))
+ return 1;
+
+ /* config is already validated */
+ metadata_size = LUKS2_metadata_size_jobj(hdr_jobj);
+
+ length = json_object_object_length(jobj_keyslots);
+
+ /* Empty section */
+ if (length == 0)
+ return 0;
+
+ if (length < 0) {
+ log_dbg(cd, "Invalid keyslot areas specification.");
+ return 1;
+ }
+
+ intervals = malloc(length * sizeof(*intervals));
+ if (!intervals) {
+ log_dbg(cd, "Not enough memory.");
+ return -ENOMEM;
+ }
+
+ json_object_object_foreach(jobj_keyslots, key, val) {
+
+ if (!(jobj_area = json_contains(cd, val, key, "Keyslot", "area", json_type_object)) ||
+ !json_contains_string(cd, jobj_area, key, "Keyslot area", "type") ||
+ !(jobj_offset = json_contains_string(cd, jobj_area, key, "Keyslot", "offset")) ||
+ !(jobj_length = json_contains_string(cd, jobj_area, key, "Keyslot", "size")) ||
+ !numbered(cd, "offset", json_object_get_string(jobj_offset)) ||
+ !numbered(cd, "size", json_object_get_string(jobj_length))) {
+ free(intervals);
+ return 1;
+ }
+
+ /* rule out values > UINT64_MAX */
+ if (!json_str_to_uint64(jobj_offset, &intervals[i].offset) ||
+ !json_str_to_uint64(jobj_length, &intervals[i].length)) {
+ log_dbg(cd, "Illegal keyslot area values.");
+ free(intervals);
+ return 1;
+ }
+
+ i++;
+ }
+
+ if (length != i) {
+ free(intervals);
+ return 1;
+ }
+
+ ret = validate_intervals(cd, length, intervals, metadata_size, LUKS2_hdr_and_areas_size_jobj(hdr_jobj)) ? 0 : 1;
+
+ free(intervals);
+
+ return ret;
+}
+
+static int hdr_validate_digests(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ json_object *jarr_keys, *jarr_segs, *jobj, *jobj_keyslots, *jobj_segments;
+
+ if (!(jobj = json_contains(cd, hdr_jobj, "", "JSON area", "digests", json_type_object)))
+ return 1;
+
+ /* keyslots are not yet validated, but we need to know digest doesn't reference missing keyslot */
+ if (!(jobj_keyslots = json_contains(cd, hdr_jobj, "", "JSON area", "keyslots", json_type_object)))
+ return 1;
+
+ /* segments are not yet validated, but we need to know digest doesn't reference missing segment */
+ if (!(jobj_segments = json_contains(cd, hdr_jobj, "", "JSON area", "segments", json_type_object)))
+ return 1;
+
+ json_object_object_foreach(jobj, key, val) {
+ if (!numbered(cd, "Digest", key))
+ return 1;
+
+ if (!json_contains_string(cd, val, key, "Digest", "type") ||
+ !(jarr_keys = json_contains(cd, val, key, "Digest", "keyslots", json_type_array)) ||
+ !(jarr_segs = json_contains(cd, val, key, "Digest", "segments", json_type_array)))
+ return 1;
+
+ if (!validate_keyslots_array(cd, jarr_keys, jobj_keyslots))
+ return 1;
+ if (!validate_segments_array(cd, jarr_segs, jobj_segments))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* requirements being validated in stand-alone routine */
+static int hdr_validate_config(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ json_object *jobj_config, *jobj;
+ int i;
+ uint64_t keyslots_size, metadata_size, segment_offset;
+
+ if (!(jobj_config = json_contains(cd, hdr_jobj, "", "JSON area", "config", json_type_object)))
+ return 1;
+
+ if (!(jobj = json_contains_string(cd, jobj_config, "section", "Config", "json_size")))
+ return 1;
+ if (!json_str_to_uint64(jobj, &metadata_size)) {
+ log_dbg(cd, "Illegal config json_size value.");
+ return 1;
+ }
+
+ /* single metadata instance is assembled from json area size plus
+ * binary header size */
+ metadata_size += LUKS2_HDR_BIN_LEN;
+
+ if (!(jobj = json_contains_string(cd, jobj_config, "section", "Config", "keyslots_size")))
+ return 1;
+ if(!json_str_to_uint64(jobj, &keyslots_size)) {
+ log_dbg(cd, "Illegal config keyslot_size value.");
+ return 1;
+ }
+
+ if (LUKS2_check_metadata_area_size(metadata_size)) {
+ log_dbg(cd, "Unsupported LUKS2 header size (%" PRIu64 ").", metadata_size);
+ return 1;
+ }
+
+ if (LUKS2_check_keyslots_area_size(keyslots_size)) {
+ log_dbg(cd, "Unsupported LUKS2 keyslots size (%" PRIu64 ").", keyslots_size);
+ return 1;
+ }
+
+ /*
+ * validate keyslots_size fits in between (2 * metadata_size) and first
+ * segment_offset (except detached header)
+ */
+ segment_offset = json_segments_get_minimal_offset(json_get_segments_jobj(hdr_jobj), 0);
+ if (segment_offset &&
+ (segment_offset < keyslots_size ||
+ (segment_offset - keyslots_size) < (2 * metadata_size))) {
+ log_dbg(cd, "keyslots_size is too large %" PRIu64 " (bytes). Data offset: %" PRIu64
+ ", keyslots offset: %" PRIu64, keyslots_size, segment_offset, 2 * metadata_size);
+ return 1;
+ }
+
+ /* Flags array is optional */
+ if (json_object_object_get_ex(jobj_config, "flags", &jobj)) {
+ if (!json_contains(cd, jobj_config, "section", "Config", "flags", json_type_array))
+ return 1;
+
+ /* All array members must be strings */
+ for (i = 0; i < (int) json_object_array_length(jobj); i++)
+ if (!json_object_is_type(json_object_array_get_idx(jobj, i), json_type_string))
+ return 1;
+ }
+
+ return 0;
+}
+
+static bool reencrypt_candidate_flag(const char *flag)
+{
+ const char *ptr;
+
+ assert(flag);
+
+ if (!strcmp(flag, "online-reencrypt"))
+ return true;
+
+ if (strncmp(flag, "online-reencrypt-v", 18))
+ return false;
+
+ ptr = flag + 18;
+ if (!*ptr)
+ return false;
+
+ while (*ptr) {
+ if (!isdigit(*ptr))
+ return false;
+ ptr++;
+ }
+
+ return true;
+}
+
+static int hdr_validate_requirements(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ int i;
+ json_object *jobj_config, *jobj, *jobj1;
+ unsigned online_reencrypt_flag = 0;
+
+ if (!(jobj_config = json_contains(cd, hdr_jobj, "", "JSON area", "config", json_type_object)))
+ return 1;
+
+ /* Requirements object is optional */
+ if (json_object_object_get_ex(jobj_config, "requirements", &jobj)) {
+ if (!json_contains(cd, jobj_config, "section", "Config", "requirements", json_type_object))
+ return 1;
+
+ /* Mandatory array is optional */
+ if (json_object_object_get_ex(jobj, "mandatory", &jobj1)) {
+ if (!json_contains(cd, jobj, "section", "Requirements", "mandatory", json_type_array))
+ return 1;
+
+ /* All array members must be strings */
+ for (i = 0; i < (int) json_object_array_length(jobj1); i++) {
+ if (!json_object_is_type(json_object_array_get_idx(jobj1, i), json_type_string))
+ return 1;
+
+ if (reencrypt_candidate_flag(json_object_get_string(json_object_array_get_idx(jobj1, i))))
+ online_reencrypt_flag++;
+
+ }
+ }
+ }
+
+ if (online_reencrypt_flag > 1) {
+ log_dbg(cd, "Multiple online reencryption requirement flags detected.");
+ return 1;
+ }
+
+ return 0;
+}
+
+int LUKS2_hdr_validate(struct crypt_device *cd, json_object *hdr_jobj, uint64_t json_size)
+{
+ struct {
+ int (*validate)(struct crypt_device *, json_object *);
+ } checks[] = {
+ { hdr_validate_requirements },
+ { hdr_validate_tokens },
+ { hdr_validate_digests },
+ { hdr_validate_segments },
+ { hdr_validate_keyslots },
+ { hdr_validate_config },
+ { hdr_validate_areas },
+ { NULL }
+ };
+ int i;
+
+ if (!hdr_jobj)
+ return 1;
+
+ for (i = 0; checks[i].validate; i++)
+ if (checks[i].validate && checks[i].validate(cd, hdr_jobj))
+ return 1;
+
+ if (hdr_validate_json_size(cd, hdr_jobj, json_size))
+ return 1;
+
+ /* validate keyslot implementations */
+ if (LUKS2_keyslots_validate(cd, hdr_jobj))
+ return 1;
+
+ return 0;
+}
+
+static bool hdr_json_free(json_object **jobj)
+{
+ assert(jobj);
+
+ if (json_object_put(*jobj))
+ *jobj = NULL;
+
+ return (*jobj == NULL);
+}
+
+static int hdr_update_copy_for_rollback(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ json_object **jobj_copy;
+
+ assert(hdr);
+ assert(hdr->jobj);
+
+ jobj_copy = (json_object **)&hdr->jobj_rollback;
+
+ if (!hdr_json_free(jobj_copy)) {
+ log_dbg(cd, "LUKS2 rollback metadata copy still in use");
+ return -EINVAL;
+ }
+
+ return json_object_copy(hdr->jobj, jobj_copy) ? -ENOMEM : 0;
+}
+
+/* FIXME: should we expose do_recovery parameter explicitly? */
+int LUKS2_hdr_read(struct crypt_device *cd, struct luks2_hdr *hdr, int repair)
+{
+ int r;
+
+ r = device_read_lock(cd, crypt_metadata_device(cd));
+ if (r) {
+ log_err(cd, _("Failed to acquire read lock on device %s."),
+ device_path(crypt_metadata_device(cd)));
+ return r;
+ }
+
+ r = LUKS2_disk_hdr_read(cd, hdr, crypt_metadata_device(cd), 1, !repair);
+ if (r == -EAGAIN) {
+ /* unlikely: auto-recovery is required and failed due to read lock being held */
+ device_read_unlock(cd, crypt_metadata_device(cd));
+
+ /* Do not use LUKS2_device_write lock. Recovery. */
+ r = device_write_lock(cd, crypt_metadata_device(cd));
+ if (r < 0) {
+ log_err(cd, _("Failed to acquire write lock on device %s."),
+ device_path(crypt_metadata_device(cd)));
+ return r;
+ }
+
+ r = LUKS2_disk_hdr_read(cd, hdr, crypt_metadata_device(cd), 1, !repair);
+
+ device_write_unlock(cd, crypt_metadata_device(cd));
+ } else
+ device_read_unlock(cd, crypt_metadata_device(cd));
+
+ if (!r && (r = hdr_update_copy_for_rollback(cd, hdr)))
+ log_dbg(cd, "Failed to update rollback LUKS2 metadata.");
+
+ return r;
+}
+
+static int hdr_cleanup_and_validate(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ LUKS2_digests_erase_unused(cd, hdr);
+
+ return LUKS2_hdr_validate(cd, hdr->jobj, hdr->hdr_size - LUKS2_HDR_BIN_LEN);
+}
+
+int LUKS2_hdr_write_force(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ int r;
+
+ if (hdr_cleanup_and_validate(cd, hdr))
+ return -EINVAL;
+
+ r = LUKS2_disk_hdr_write(cd, hdr, crypt_metadata_device(cd), false);
+
+ if (!r && (r = hdr_update_copy_for_rollback(cd, hdr)))
+ log_dbg(cd, "Failed to update rollback LUKS2 metadata.");
+
+ return r;
+}
+
+int LUKS2_hdr_write(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ int r;
+
+ if (hdr_cleanup_and_validate(cd, hdr))
+ return -EINVAL;
+
+ r = LUKS2_disk_hdr_write(cd, hdr, crypt_metadata_device(cd), true);
+
+ if (!r && (r = hdr_update_copy_for_rollback(cd, hdr)))
+ log_dbg(cd, "Failed to update rollback LUKS2 metadata.");
+
+ return r;
+}
+
+int LUKS2_hdr_rollback(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ json_object **jobj_copy;
+
+ assert(hdr->jobj_rollback);
+
+ log_dbg(cd, "Rolling back in-memory LUKS2 json metadata.");
+
+ jobj_copy = (json_object **)&hdr->jobj;
+
+ if (!hdr_json_free(jobj_copy)) {
+ log_dbg(cd, "LUKS2 header still in use");
+ return -EINVAL;
+ }
+
+ return json_object_copy(hdr->jobj_rollback, jobj_copy) ? -ENOMEM : 0;
+}
+
+int LUKS2_hdr_uuid(struct crypt_device *cd, struct luks2_hdr *hdr, const char *uuid)
+{
+ uuid_t partitionUuid;
+
+ if (uuid && uuid_parse(uuid, partitionUuid) == -1) {
+ log_err(cd, _("Wrong LUKS UUID format provided."));
+ return -EINVAL;
+ }
+ if (!uuid)
+ uuid_generate(partitionUuid);
+
+ uuid_unparse(partitionUuid, hdr->uuid);
+
+ return LUKS2_hdr_write(cd, hdr);
+}
+
+int LUKS2_hdr_labels(struct crypt_device *cd, struct luks2_hdr *hdr,
+ const char *label, const char *subsystem, int commit)
+{
+ //FIXME: check if the labels are the same and skip this.
+
+ memset(hdr->label, 0, LUKS2_LABEL_L);
+ if (label)
+ strncpy(hdr->label, label, LUKS2_LABEL_L-1);
+
+ memset(hdr->subsystem, 0, LUKS2_LABEL_L);
+ if (subsystem)
+ strncpy(hdr->subsystem, subsystem, LUKS2_LABEL_L-1);
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+void LUKS2_hdr_free(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ json_object **jobj;
+
+ assert(hdr);
+
+ jobj = (json_object **)&hdr->jobj;
+
+ if (!hdr_json_free(jobj))
+ log_dbg(cd, "LUKS2 header still in use");
+
+ jobj = (json_object **)&hdr->jobj_rollback;
+
+ if (!hdr_json_free(jobj))
+ log_dbg(cd, "LUKS2 rollback metadata copy still in use");
+}
+
+static uint64_t LUKS2_keyslots_size_jobj(json_object *jobj)
+{
+ json_object *jobj1, *jobj2;
+ uint64_t keyslots_size;
+
+ json_object_object_get_ex(jobj, "config", &jobj1);
+ json_object_object_get_ex(jobj1, "keyslots_size", &jobj2);
+ json_str_to_uint64(jobj2, &keyslots_size);
+
+ return keyslots_size;
+}
+
+uint64_t LUKS2_keyslots_size(struct luks2_hdr *hdr)
+{
+ return LUKS2_keyslots_size_jobj(hdr->jobj);
+}
+
+uint64_t LUKS2_hdr_and_areas_size_jobj(json_object *jobj)
+{
+ return 2 * LUKS2_metadata_size_jobj(jobj) + LUKS2_keyslots_size_jobj(jobj);
+}
+
+uint64_t LUKS2_hdr_and_areas_size(struct luks2_hdr *hdr)
+{
+ return LUKS2_hdr_and_areas_size_jobj(hdr->jobj);
+}
+
+int LUKS2_hdr_backup(struct crypt_device *cd, struct luks2_hdr *hdr,
+ const char *backup_file)
+{
+ struct device *device = crypt_metadata_device(cd);
+ int fd, devfd, r = 0;
+ ssize_t hdr_size;
+ ssize_t ret, buffer_size;
+ char *buffer = NULL;
+
+ hdr_size = LUKS2_hdr_and_areas_size(hdr);
+ buffer_size = size_round_up(hdr_size, crypt_getpagesize());
+
+ buffer = malloc(buffer_size);
+ if (!buffer)
+ return -ENOMEM;
+
+ log_dbg(cd, "Storing backup of header (%zu bytes).", hdr_size);
+ log_dbg(cd, "Output backup file size: %zu bytes.", buffer_size);
+
+ r = device_read_lock(cd, device);
+ if (r) {
+ log_err(cd, _("Failed to acquire read lock on device %s."),
+ device_path(crypt_metadata_device(cd)));
+ goto out;
+ }
+
+ devfd = device_open_locked(cd, device, O_RDONLY);
+ if (devfd < 0) {
+ device_read_unlock(cd, device);
+ log_err(cd, _("Device %s is not a valid LUKS device."), device_path(device));
+ r = (devfd == -1) ? -EINVAL : devfd;
+ goto out;
+ }
+
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), buffer, hdr_size, 0) < hdr_size) {
+ device_read_unlock(cd, device);
+ r = -EIO;
+ goto out;
+ }
+
+ device_read_unlock(cd, device);
+
+ fd = open(backup_file, O_CREAT|O_EXCL|O_WRONLY, S_IRUSR);
+ if (fd == -1) {
+ if (errno == EEXIST)
+ log_err(cd, _("Requested header backup file %s already exists."), backup_file);
+ else
+ log_err(cd, _("Cannot create header backup file %s."), backup_file);
+ r = -EINVAL;
+ goto out;
+ }
+ ret = write_buffer(fd, buffer, buffer_size);
+ close(fd);
+ if (ret < buffer_size) {
+ log_err(cd, _("Cannot write header backup file %s."), backup_file);
+ r = -EIO;
+ } else
+ r = 0;
+out:
+ crypt_safe_memzero(buffer, buffer_size);
+ free(buffer);
+ return r;
+}
+
+int LUKS2_hdr_restore(struct crypt_device *cd, struct luks2_hdr *hdr,
+ const char *backup_file)
+{
+ struct device *backup_device, *device = crypt_metadata_device(cd);
+ int r, fd, devfd = -1, diff_uuid = 0;
+ ssize_t ret, buffer_size = 0;
+ char *buffer = NULL, msg[1024];
+ struct luks2_hdr hdr_file = {}, tmp_hdr = {};
+ uint32_t reqs = 0;
+
+ r = device_alloc(cd, &backup_device, backup_file);
+ if (r < 0)
+ return r;
+
+ r = device_read_lock(cd, backup_device);
+ if (r) {
+ log_err(cd, _("Failed to acquire read lock on device %s."),
+ device_path(backup_device));
+ device_free(cd, backup_device);
+ return r;
+ }
+
+ r = LUKS2_disk_hdr_read(cd, &hdr_file, backup_device, 0, 0);
+ device_read_unlock(cd, backup_device);
+ device_free(cd, backup_device);
+
+ if (r < 0) {
+ log_err(cd, _("Backup file does not contain valid LUKS header."));
+ goto out;
+ }
+
+ /* do not allow header restore from backup with unmet requirements */
+ if (LUKS2_unmet_requirements(cd, &hdr_file, CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 1)) {
+ log_err(cd, _("Forbidden LUKS2 requirements detected in backup %s."),
+ backup_file);
+ r = -ETXTBSY;
+ goto out;
+ }
+
+ buffer_size = LUKS2_hdr_and_areas_size(&hdr_file);
+ buffer = malloc(buffer_size);
+ if (!buffer) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ fd = open(backup_file, O_RDONLY);
+ if (fd == -1) {
+ log_err(cd, _("Cannot open header backup file %s."), backup_file);
+ r = -EINVAL;
+ goto out;
+ }
+
+ ret = read_buffer(fd, buffer, buffer_size);
+ close(fd);
+ if (ret < buffer_size) {
+ log_err(cd, _("Cannot read header backup file %s."), backup_file);
+ r = -EIO;
+ goto out;
+ }
+
+ r = LUKS2_hdr_read(cd, &tmp_hdr, 0);
+ if (r == 0) {
+ log_dbg(cd, "Device %s already contains LUKS2 header, checking UUID and requirements.", device_path(device));
+ r = LUKS2_config_get_requirements(cd, &tmp_hdr, &reqs);
+ if (r)
+ goto out;
+
+ if (memcmp(tmp_hdr.uuid, hdr_file.uuid, LUKS2_UUID_L))
+ diff_uuid = 1;
+
+ if (!reqs_reencrypt(reqs)) {
+ log_dbg(cd, "Checking LUKS2 header size and offsets.");
+ if (LUKS2_get_data_offset(&tmp_hdr) != LUKS2_get_data_offset(&hdr_file)) {
+ log_err(cd, _("Data offset differ on device and backup, restore failed."));
+ r = -EINVAL;
+ goto out;
+ }
+ /* FIXME: what could go wrong? Erase if we're fine with consequences */
+ if (buffer_size != (ssize_t) LUKS2_hdr_and_areas_size(&tmp_hdr)) {
+ log_err(cd, _("Binary header with keyslot areas size differ on device and backup, restore failed."));
+ r = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ r = snprintf(msg, sizeof(msg), _("Device %s %s%s%s%s"), device_path(device),
+ r ? _("does not contain LUKS2 header. Replacing header can destroy data on that device.") :
+ _("already contains LUKS2 header. Replacing header will destroy existing keyslots."),
+ diff_uuid ? _("\nWARNING: real device header has different UUID than backup!") : "",
+ reqs_unknown(reqs) ? _("\nWARNING: unknown LUKS2 requirements detected in real device header!"
+ "\nReplacing header with backup may corrupt the data on that device!") : "",
+ reqs_reencrypt(reqs) ? _("\nWARNING: Unfinished offline reencryption detected on the device!"
+ "\nReplacing header with backup may corrupt data.") : "");
+ if (r < 0 || (size_t) r >= sizeof(msg)) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ if (!crypt_confirm(cd, msg)) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ log_dbg(cd, "Storing backup of header (%zu bytes) to device %s.", buffer_size, device_path(device));
+
+ /* Do not use LUKS2_device_write lock for checking sequence id on restore */
+ r = device_write_lock(cd, device);
+ if (r < 0) {
+ log_err(cd, _("Failed to acquire write lock on device %s."),
+ device_path(device));
+ goto out;
+ }
+
+ devfd = device_open_locked(cd, device, O_RDWR);
+ if (devfd < 0) {
+ if (errno == EACCES)
+ log_err(cd, _("Cannot write to device %s, permission denied."),
+ device_path(device));
+ else
+ log_err(cd, _("Cannot open device %s."), device_path(device));
+ device_write_unlock(cd, device);
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), buffer, buffer_size, 0) < buffer_size)
+ r = -EIO;
+ else
+ r = 0;
+
+ device_write_unlock(cd, device);
+out:
+ LUKS2_hdr_free(cd, hdr);
+ LUKS2_hdr_free(cd, &hdr_file);
+ LUKS2_hdr_free(cd, &tmp_hdr);
+ crypt_safe_memzero(&hdr_file, sizeof(hdr_file));
+ crypt_safe_memzero(&tmp_hdr, sizeof(tmp_hdr));
+ crypt_safe_memzero(buffer, buffer_size);
+ free(buffer);
+ device_sync(cd, device);
+ return r;
+}
+
+/*
+ * Persistent config flags
+ */
+static const struct {
+ uint32_t flag;
+ const char *description;
+} persistent_flags[] = {
+ { CRYPT_ACTIVATE_ALLOW_DISCARDS, "allow-discards" },
+ { CRYPT_ACTIVATE_SAME_CPU_CRYPT, "same-cpu-crypt" },
+ { CRYPT_ACTIVATE_SUBMIT_FROM_CRYPT_CPUS, "submit-from-crypt-cpus" },
+ { CRYPT_ACTIVATE_NO_JOURNAL, "no-journal" },
+ { CRYPT_ACTIVATE_NO_READ_WORKQUEUE, "no-read-workqueue" },
+ { CRYPT_ACTIVATE_NO_WRITE_WORKQUEUE, "no-write-workqueue" },
+ { 0, NULL }
+};
+
+int LUKS2_config_get_flags(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t *flags)
+{
+ json_object *jobj1, *jobj_config, *jobj_flags;
+ int i, j, found;
+
+ if (!hdr || !flags)
+ return -EINVAL;
+
+ *flags = 0;
+
+ if (!json_object_object_get_ex(hdr->jobj, "config", &jobj_config))
+ return 0;
+
+ if (!json_object_object_get_ex(jobj_config, "flags", &jobj_flags))
+ return 0;
+
+ for (i = 0; i < (int) json_object_array_length(jobj_flags); i++) {
+ jobj1 = json_object_array_get_idx(jobj_flags, i);
+ found = 0;
+ for (j = 0; persistent_flags[j].description && !found; j++)
+ if (!strcmp(persistent_flags[j].description,
+ json_object_get_string(jobj1))) {
+ *flags |= persistent_flags[j].flag;
+ log_dbg(cd, "Using persistent flag %s.",
+ json_object_get_string(jobj1));
+ found = 1;
+ }
+ if (!found)
+ log_verbose(cd, _("Ignored unknown flag %s."),
+ json_object_get_string(jobj1));
+ }
+
+ return 0;
+}
+
+int LUKS2_config_set_flags(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t flags)
+{
+ json_object *jobj_config, *jobj_flags;
+ int i;
+
+ if (!json_object_object_get_ex(hdr->jobj, "config", &jobj_config))
+ return 0;
+
+ jobj_flags = json_object_new_array();
+
+ for (i = 0; persistent_flags[i].description; i++) {
+ if (flags & persistent_flags[i].flag) {
+ log_dbg(cd, "Setting persistent flag: %s.", persistent_flags[i].description);
+ json_object_array_add(jobj_flags,
+ json_object_new_string(persistent_flags[i].description));
+ }
+ }
+
+ /* Replace or add new flags array */
+ json_object_object_add(jobj_config, "flags", jobj_flags);
+
+ return LUKS2_hdr_write(cd, hdr);
+}
+
+/*
+ * json format example (mandatory array must not be ignored,
+ * all other future fields may be added later)
+ *
+ * "requirements": {
+ * mandatory : [],
+ * optional0 : [],
+ * optional1 : "lala"
+ * }
+ */
+
+/* LUKS2 library requirements */
+struct requirement_flag {
+ uint32_t flag;
+ uint8_t version;
+ const char *description;
+};
+
+static const struct requirement_flag unknown_requirement_flag = { CRYPT_REQUIREMENT_UNKNOWN, 0, NULL };
+
+static const struct requirement_flag requirements_flags[] = {
+ { CRYPT_REQUIREMENT_OFFLINE_REENCRYPT,1, "offline-reencrypt" },
+ { CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 2, "online-reencrypt-v2" },
+ { CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 3, "online-reencrypt-v3" },
+ { CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 1, "online-reencrypt" },
+ { 0, 0, NULL }
+};
+
+static const struct requirement_flag *get_requirement_by_name(const char *requirement)
+{
+ int i;
+
+ for (i = 0; requirements_flags[i].description; i++)
+ if (!strcmp(requirement, requirements_flags[i].description))
+ return requirements_flags + i;
+
+ return &unknown_requirement_flag;
+}
+
+static json_object *mandatory_requirements_jobj(struct luks2_hdr *hdr)
+{
+ json_object *jobj_config, *jobj_requirements, *jobj_mandatory;
+
+ assert(hdr);
+
+ if (!json_object_object_get_ex(hdr->jobj, "config", &jobj_config))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj_config, "requirements", &jobj_requirements))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj_requirements, "mandatory", &jobj_mandatory))
+ return NULL;
+
+ return jobj_mandatory;
+}
+
+bool LUKS2_reencrypt_requirement_candidate(struct luks2_hdr *hdr)
+{
+ json_object *jobj_mandatory;
+ int i, len;
+
+ assert(hdr);
+
+ jobj_mandatory = mandatory_requirements_jobj(hdr);
+ if (!jobj_mandatory)
+ return false;
+
+ len = (int) json_object_array_length(jobj_mandatory);
+ if (len <= 0)
+ return false;
+
+ for (i = 0; i < len; i++) {
+ if (reencrypt_candidate_flag(json_object_get_string(json_object_array_get_idx(jobj_mandatory, i))))
+ return true;
+ }
+
+ return false;
+}
+
+int LUKS2_config_get_reencrypt_version(struct luks2_hdr *hdr, uint8_t *version)
+{
+ json_object *jobj_mandatory, *jobj;
+ int i, len;
+ const struct requirement_flag *req;
+
+ assert(hdr);
+ assert(version);
+
+ jobj_mandatory = mandatory_requirements_jobj(hdr);
+ if (!jobj_mandatory)
+ return -ENOENT;
+
+ len = (int) json_object_array_length(jobj_mandatory);
+ if (len <= 0)
+ return -ENOENT;
+
+ for (i = 0; i < len; i++) {
+ jobj = json_object_array_get_idx(jobj_mandatory, i);
+
+ /* search for requirements prefixed with "online-reencrypt" */
+ if (strncmp(json_object_get_string(jobj), "online-reencrypt", 16))
+ continue;
+
+ /* check current library is aware of the requirement */
+ req = get_requirement_by_name(json_object_get_string(jobj));
+ if (req->flag == CRYPT_REQUIREMENT_UNKNOWN)
+ continue;
+
+ *version = req->version;
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static const struct requirement_flag *stored_requirement_name_by_id(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t req_id)
+{
+ json_object *jobj_mandatory, *jobj;
+ int i, len;
+ const struct requirement_flag *req;
+
+ assert(hdr);
+
+ jobj_mandatory = mandatory_requirements_jobj(hdr);
+ if (!jobj_mandatory)
+ return NULL;
+
+ len = (int) json_object_array_length(jobj_mandatory);
+ if (len <= 0)
+ return NULL;
+
+ for (i = 0; i < len; i++) {
+ jobj = json_object_array_get_idx(jobj_mandatory, i);
+ req = get_requirement_by_name(json_object_get_string(jobj));
+ if (req->flag == req_id)
+ return req;
+ }
+
+ return NULL;
+}
+
+/*
+ * returns count of requirements (past cryptsetup 2.0 release)
+ */
+int LUKS2_config_get_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t *reqs)
+{
+ json_object *jobj_mandatory, *jobj;
+ int i, len;
+ const struct requirement_flag *req;
+
+ assert(hdr);
+ assert(reqs);
+
+ *reqs = 0;
+
+ jobj_mandatory = mandatory_requirements_jobj(hdr);
+ if (!jobj_mandatory)
+ return 0;
+
+ len = (int) json_object_array_length(jobj_mandatory);
+ if (len <= 0)
+ return 0;
+
+ log_dbg(cd, "LUKS2 requirements detected:");
+
+ for (i = 0; i < len; i++) {
+ jobj = json_object_array_get_idx(jobj_mandatory, i);
+ req = get_requirement_by_name(json_object_get_string(jobj));
+ log_dbg(cd, "%s - %sknown", json_object_get_string(jobj),
+ reqs_unknown(req->flag) ? "un" : "");
+ *reqs |= req->flag;
+ }
+
+ return 0;
+}
+
+int LUKS2_config_set_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t reqs, bool commit)
+{
+ json_object *jobj_config, *jobj_requirements, *jobj_mandatory, *jobj;
+ int i, r = -EINVAL;
+ const struct requirement_flag *req;
+ uint32_t req_id;
+
+ if (!hdr)
+ return -EINVAL;
+
+ jobj_mandatory = json_object_new_array();
+ if (!jobj_mandatory)
+ return -ENOMEM;
+
+ for (i = 0; requirements_flags[i].description; i++) {
+ req_id = reqs & requirements_flags[i].flag;
+ if (req_id) {
+ /* retain already stored version of requirement flag */
+ req = stored_requirement_name_by_id(cd, hdr, req_id);
+ if (req)
+ jobj = json_object_new_string(req->description);
+ else
+ jobj = json_object_new_string(requirements_flags[i].description);
+ if (!jobj) {
+ r = -ENOMEM;
+ goto err;
+ }
+ json_object_array_add(jobj_mandatory, jobj);
+ /* erase processed flag from input set */
+ reqs &= ~(requirements_flags[i].flag);
+ }
+ }
+
+ /* any remaining bit in requirements is unknown therefore illegal */
+ if (reqs) {
+ log_dbg(cd, "Illegal requirement flag(s) requested");
+ goto err;
+ }
+
+ if (!json_object_object_get_ex(hdr->jobj, "config", &jobj_config))
+ goto err;
+
+ if (!json_object_object_get_ex(jobj_config, "requirements", &jobj_requirements)) {
+ jobj_requirements = json_object_new_object();
+ if (!jobj_requirements) {
+ r = -ENOMEM;
+ goto err;
+ }
+ json_object_object_add(jobj_config, "requirements", jobj_requirements);
+ }
+
+ if (json_object_array_length(jobj_mandatory) > 0) {
+ /* replace mandatory field with new values */
+ json_object_object_add(jobj_requirements, "mandatory", jobj_mandatory);
+ } else {
+ /* new mandatory field was empty, delete old one */
+ json_object_object_del(jobj_requirements, "mandatory");
+ json_object_put(jobj_mandatory);
+ }
+
+ /* remove empty requirements object */
+ if (!json_object_object_length(jobj_requirements))
+ json_object_object_del(jobj_config, "requirements");
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+err:
+ json_object_put(jobj_mandatory);
+ return r;
+}
+
+static json_object *LUKS2_get_mandatory_requirements_filtered_jobj(struct luks2_hdr *hdr,
+ uint32_t filter_req_ids)
+{
+ int i, len;
+ const struct requirement_flag *req;
+ json_object *jobj_mandatory, *jobj_mandatory_filtered, *jobj;
+
+ jobj_mandatory_filtered = json_object_new_array();
+ if (!jobj_mandatory_filtered)
+ return NULL;
+
+ jobj_mandatory = mandatory_requirements_jobj(hdr);
+ if (!jobj_mandatory)
+ return jobj_mandatory_filtered;
+
+ len = (int) json_object_array_length(jobj_mandatory);
+
+ for (i = 0; i < len; i++) {
+ jobj = json_object_array_get_idx(jobj_mandatory, i);
+ req = get_requirement_by_name(json_object_get_string(jobj));
+ if (req->flag == CRYPT_REQUIREMENT_UNKNOWN || req->flag & filter_req_ids)
+ continue;
+ json_object_array_add(jobj_mandatory_filtered,
+ json_object_new_string(req->description));
+ }
+
+ return jobj_mandatory_filtered;
+}
+
+/*
+ * The function looks for specific version of requirement id.
+ * If it can't be fulfilled function fails.
+ */
+int LUKS2_config_set_requirement_version(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint32_t req_id,
+ uint8_t req_version,
+ bool commit)
+{
+ json_object *jobj_config, *jobj_requirements, *jobj_mandatory;
+ const struct requirement_flag *req;
+ int r = -EINVAL;
+
+ if (!hdr || req_id == CRYPT_REQUIREMENT_UNKNOWN)
+ return -EINVAL;
+
+ req = requirements_flags;
+
+ while (req->description) {
+ /* we have a match */
+ if (req->flag == req_id && req->version == req_version)
+ break;
+ req++;
+ }
+
+ if (!req->description)
+ return -EINVAL;
+
+ /*
+ * Creates copy of mandatory requirements set without specific requirement
+ * (no matter the version) we want to set.
+ */
+ jobj_mandatory = LUKS2_get_mandatory_requirements_filtered_jobj(hdr, req_id);
+ if (!jobj_mandatory)
+ return -ENOMEM;
+
+ json_object_array_add(jobj_mandatory, json_object_new_string(req->description));
+
+ if (!json_object_object_get_ex(hdr->jobj, "config", &jobj_config))
+ goto err;
+
+ if (!json_object_object_get_ex(jobj_config, "requirements", &jobj_requirements)) {
+ jobj_requirements = json_object_new_object();
+ if (!jobj_requirements) {
+ r = -ENOMEM;
+ goto err;
+ }
+ json_object_object_add(jobj_config, "requirements", jobj_requirements);
+ }
+
+ json_object_object_add(jobj_requirements, "mandatory", jobj_mandatory);
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+err:
+ json_object_put(jobj_mandatory);
+ return r;
+}
+
+/*
+ * Header dump
+ */
+static void hdr_dump_config(struct crypt_device *cd, json_object *hdr_jobj)
+{
+
+ json_object *jobj1, *jobj_config, *jobj_flags, *jobj_requirements, *jobj_mandatory;
+ int i = 0, flags = 0, reqs = 0;
+
+ log_std(cd, "Flags: \t");
+
+ if (json_object_object_get_ex(hdr_jobj, "config", &jobj_config)) {
+ if (json_object_object_get_ex(jobj_config, "flags", &jobj_flags))
+ flags = (int) json_object_array_length(jobj_flags);
+ if (json_object_object_get_ex(jobj_config, "requirements", &jobj_requirements) &&
+ json_object_object_get_ex(jobj_requirements, "mandatory", &jobj_mandatory))
+ reqs = (int) json_object_array_length(jobj_mandatory);
+ }
+
+ for (i = 0; i < flags; i++) {
+ jobj1 = json_object_array_get_idx(jobj_flags, i);
+ log_std(cd, "%s ", json_object_get_string(jobj1));
+ }
+
+ log_std(cd, "%s\n%s", flags > 0 ? "" : "(no flags)", reqs > 0 ? "" : "\n");
+
+ if (reqs > 0) {
+ log_std(cd, "Requirements:\t");
+ for (i = 0; i < reqs; i++) {
+ jobj1 = json_object_array_get_idx(jobj_mandatory, i);
+ log_std(cd, "%s ", json_object_get_string(jobj1));
+ }
+ log_std(cd, "\n\n");
+ }
+}
+
+static const char *get_priority_desc(json_object *jobj)
+{
+ crypt_keyslot_priority priority;
+ json_object *jobj_priority;
+ const char *text;
+
+ if (json_object_object_get_ex(jobj, "priority", &jobj_priority))
+ priority = (crypt_keyslot_priority)(int)json_object_get_int(jobj_priority);
+ else
+ priority = CRYPT_SLOT_PRIORITY_NORMAL;
+
+ switch (priority) {
+ case CRYPT_SLOT_PRIORITY_IGNORE: text = "ignored"; break;
+ case CRYPT_SLOT_PRIORITY_PREFER: text = "preferred"; break;
+ case CRYPT_SLOT_PRIORITY_NORMAL: text = "normal"; break;
+ default: text = "invalid";
+ }
+
+ return text;
+}
+
+static void hdr_dump_keyslots(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ char slot[16];
+ json_object *keyslots_jobj, *digests_jobj, *jobj2, *jobj3, *val;
+ const char *tmps;
+ int i, j, r;
+
+ log_std(cd, "Keyslots:\n");
+ json_object_object_get_ex(hdr_jobj, "keyslots", &keyslots_jobj);
+
+ for (j = 0; j < LUKS2_KEYSLOTS_MAX; j++) {
+ if (snprintf(slot, sizeof(slot), "%i", j) < 0)
+ slot[0] = '\0';
+ json_object_object_get_ex(keyslots_jobj, slot, &val);
+ if (!val)
+ continue;
+
+ json_object_object_get_ex(val, "type", &jobj2);
+ tmps = json_object_get_string(jobj2);
+
+ r = LUKS2_keyslot_for_segment(crypt_get_hdr(cd, CRYPT_LUKS2), j, CRYPT_ONE_SEGMENT);
+ log_std(cd, " %s: %s%s\n", slot, tmps, r == -ENOENT ? " (unbound)" : "");
+
+ if (json_object_object_get_ex(val, "key_size", &jobj2))
+ log_std(cd, "\tKey: %u bits\n", crypt_jobj_get_uint32(jobj2) * 8);
+
+ log_std(cd, "\tPriority: %s\n", get_priority_desc(val));
+
+ LUKS2_keyslot_dump(cd, j);
+
+ json_object_object_get_ex(hdr_jobj, "digests", &digests_jobj);
+ json_object_object_foreach(digests_jobj, key2, val2) {
+ json_object_object_get_ex(val2, "keyslots", &jobj2);
+ for (i = 0; i < (int) json_object_array_length(jobj2); i++) {
+ jobj3 = json_object_array_get_idx(jobj2, i);
+ if (!strcmp(slot, json_object_get_string(jobj3))) {
+ log_std(cd, "\tDigest ID: %s\n", key2);
+ }
+ }
+ }
+ }
+}
+
+static void hdr_dump_tokens(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ char token[16];
+ json_object *tokens_jobj, *jobj2, *jobj3, *val;
+ const char *tmps;
+ int i, j;
+
+ log_std(cd, "Tokens:\n");
+ json_object_object_get_ex(hdr_jobj, "tokens", &tokens_jobj);
+
+ for (j = 0; j < LUKS2_TOKENS_MAX; j++) {
+ if (snprintf(token, sizeof(token), "%i", j) < 0)
+ token[0] = '\0';
+ json_object_object_get_ex(tokens_jobj, token, &val);
+ if (!val)
+ continue;
+
+ json_object_object_get_ex(val, "type", &jobj2);
+ tmps = json_object_get_string(jobj2);
+ log_std(cd, " %s: %s\n", token, tmps);
+
+ LUKS2_token_dump(cd, j);
+
+ json_object_object_get_ex(val, "keyslots", &jobj2);
+ for (i = 0; i < (int) json_object_array_length(jobj2); i++) {
+ jobj3 = json_object_array_get_idx(jobj2, i);
+ log_std(cd, "\tKeyslot: %s\n", json_object_get_string(jobj3));
+ }
+ }
+}
+
+static void hdr_dump_segments(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ char segment[16];
+ json_object *jobj_segments, *jobj_segment, *jobj1, *jobj2;
+ int i, j, flags;
+ uint64_t value;
+
+ log_std(cd, "Data segments:\n");
+ json_object_object_get_ex(hdr_jobj, "segments", &jobj_segments);
+
+ for (i = 0; i < LUKS2_SEGMENT_MAX; i++) {
+ if (snprintf(segment, sizeof(segment), "%i", i) < 0)
+ segment[0] = '\0';
+ if (!json_object_object_get_ex(jobj_segments, segment, &jobj_segment))
+ continue;
+
+ json_object_object_get_ex(jobj_segment, "type", &jobj1);
+ log_std(cd, " %s: %s\n", segment, json_object_get_string(jobj1));
+
+ json_object_object_get_ex(jobj_segment, "offset", &jobj1);
+ json_str_to_uint64(jobj1, &value);
+ log_std(cd, "\toffset: %" PRIu64 " [bytes]\n", value);
+
+ json_object_object_get_ex(jobj_segment, "size", &jobj1);
+ if (!(strcmp(json_object_get_string(jobj1), "dynamic")))
+ log_std(cd, "\tlength: (whole device)\n");
+ else {
+ json_str_to_uint64(jobj1, &value);
+ log_std(cd, "\tlength: %" PRIu64 " [bytes]\n", value);
+ }
+
+ if (json_object_object_get_ex(jobj_segment, "encryption", &jobj1))
+ log_std(cd, "\tcipher: %s\n", json_object_get_string(jobj1));
+
+ if (json_object_object_get_ex(jobj_segment, "sector_size", &jobj1))
+ log_std(cd, "\tsector: %" PRIu32 " [bytes]\n", crypt_jobj_get_uint32(jobj1));
+
+ if (json_object_object_get_ex(jobj_segment, "integrity", &jobj1) &&
+ json_object_object_get_ex(jobj1, "type", &jobj2))
+ log_std(cd, "\tintegrity: %s\n", json_object_get_string(jobj2));
+
+ if (json_object_object_get_ex(jobj_segment, "flags", &jobj1) &&
+ (flags = (int)json_object_array_length(jobj1)) > 0) {
+ jobj2 = json_object_array_get_idx(jobj1, 0);
+ log_std(cd, "\tflags : %s", json_object_get_string(jobj2));
+ for (j = 1; j < flags; j++) {
+ jobj2 = json_object_array_get_idx(jobj1, j);
+ log_std(cd, ", %s", json_object_get_string(jobj2));
+ }
+ log_std(cd, "\n");
+ }
+
+ log_std(cd, "\n");
+ }
+}
+
+static void hdr_dump_digests(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ char key[16];
+ json_object *jobj1, *jobj2, *val;
+ const char *tmps;
+ int i;
+
+ log_std(cd, "Digests:\n");
+ json_object_object_get_ex(hdr_jobj, "digests", &jobj1);
+
+ for (i = 0; i < LUKS2_DIGEST_MAX; i++) {
+ if (snprintf(key, sizeof(key), "%i", i) < 0)
+ key[0] = '\0';
+ json_object_object_get_ex(jobj1, key, &val);
+ if (!val)
+ continue;
+
+ json_object_object_get_ex(val, "type", &jobj2);
+ tmps = json_object_get_string(jobj2);
+ log_std(cd, " %s: %s\n", key, tmps);
+
+ LUKS2_digest_dump(cd, i);
+ }
+}
+
+int LUKS2_hdr_dump(struct crypt_device *cd, struct luks2_hdr *hdr)
+{
+ if (!hdr->jobj)
+ return -EINVAL;
+
+ JSON_DBG(cd, hdr->jobj, NULL);
+
+ log_std(cd, "LUKS header information\n");
+ log_std(cd, "Version: \t%u\n", hdr->version);
+ log_std(cd, "Epoch: \t%" PRIu64 "\n", hdr->seqid);
+ log_std(cd, "Metadata area: \t%" PRIu64 " [bytes]\n", LUKS2_metadata_size(hdr));
+ log_std(cd, "Keyslots area: \t%" PRIu64 " [bytes]\n", LUKS2_keyslots_size(hdr));
+ log_std(cd, "UUID: \t%s\n", *hdr->uuid ? hdr->uuid : "(no UUID)");
+ log_std(cd, "Label: \t%s\n", *hdr->label ? hdr->label : "(no label)");
+ log_std(cd, "Subsystem: \t%s\n", *hdr->subsystem ? hdr->subsystem : "(no subsystem)");
+
+ hdr_dump_config(cd, hdr->jobj);
+ hdr_dump_segments(cd, hdr->jobj);
+ hdr_dump_keyslots(cd, hdr->jobj);
+ hdr_dump_tokens(cd, hdr->jobj);
+ hdr_dump_digests(cd, hdr->jobj);
+
+ return 0;
+}
+
+int LUKS2_hdr_dump_json(struct crypt_device *cd, struct luks2_hdr *hdr, const char **json)
+{
+ const char *json_buf;
+
+ json_buf = json_object_to_json_string_ext(hdr->jobj,
+ JSON_C_TO_STRING_PRETTY | JSON_C_TO_STRING_NOSLASHESCAPE);
+
+ if (!json_buf)
+ return -EINVAL;
+
+ if (json)
+ *json = json_buf;
+ else
+ crypt_log(cd, CRYPT_LOG_NORMAL, json_buf);
+
+ return 0;
+}
+
+int LUKS2_get_data_size(struct luks2_hdr *hdr, uint64_t *size, bool *dynamic)
+{
+ int i, len, sector_size;
+ json_object *jobj_segments, *jobj_segment, *jobj_size;
+ uint64_t tmp = 0;
+
+ if (!size || !json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments))
+ return -EINVAL;
+
+ len = json_object_object_length(jobj_segments);
+
+ for (i = 0; i < len; i++) {
+ if (!(jobj_segment = json_segments_get_segment(jobj_segments, i)))
+ return -EINVAL;
+
+ if (json_segment_is_backup(jobj_segment))
+ break;
+
+ json_object_object_get_ex(jobj_segment, "size", &jobj_size);
+ if (!strcmp(json_object_get_string(jobj_size), "dynamic")) {
+ sector_size = json_segment_get_sector_size(jobj_segment);
+ /* last dynamic segment must have at least one sector in size */
+ if (tmp)
+ *size = tmp + (sector_size > 0 ? sector_size : SECTOR_SIZE);
+ else
+ *size = 0;
+ if (dynamic)
+ *dynamic = true;
+ return 0;
+ }
+
+ tmp += crypt_jobj_get_uint64(jobj_size);
+ }
+
+ /* impossible, real device size must not be zero */
+ if (!tmp)
+ return -EINVAL;
+
+ *size = tmp;
+ if (dynamic)
+ *dynamic = false;
+ return 0;
+}
+
+uint64_t LUKS2_get_data_offset(struct luks2_hdr *hdr)
+{
+ crypt_reencrypt_info ri;
+ json_object *jobj;
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_CLEAN || ri == CRYPT_REENCRYPT_CRASH) {
+ jobj = LUKS2_get_segment_by_flag(hdr, "backup-final");
+ if (jobj)
+ return json_segment_get_offset(jobj, 1);
+ }
+
+ return json_segments_get_minimal_offset(LUKS2_get_segments_jobj(hdr), 1);
+}
+
+const char *LUKS2_get_cipher(struct luks2_hdr *hdr, int segment)
+{
+ json_object *jobj_segment;
+
+ if (!hdr)
+ return NULL;
+
+ if (segment == CRYPT_DEFAULT_SEGMENT)
+ segment = LUKS2_get_default_segment(hdr);
+
+ jobj_segment = json_segments_get_segment(json_get_segments_jobj(hdr->jobj), segment);
+ if (!jobj_segment)
+ return NULL;
+
+ /* FIXME: default encryption (for other segment types) must be string here. */
+ return json_segment_get_cipher(jobj_segment) ?: "null";
+}
+
+crypt_reencrypt_info LUKS2_reencrypt_status(struct luks2_hdr *hdr)
+{
+ uint32_t reqs;
+
+ /*
+ * Any unknown requirement or offline reencryption should abort
+ * anything related to online-reencryption handling
+ */
+ if (LUKS2_config_get_requirements(NULL, hdr, &reqs))
+ return CRYPT_REENCRYPT_INVALID;
+
+ if (!reqs_reencrypt_online(reqs))
+ return CRYPT_REENCRYPT_NONE;
+
+ if (json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr)) < 0)
+ return CRYPT_REENCRYPT_CLEAN;
+
+ return CRYPT_REENCRYPT_CRASH;
+}
+
+const char *LUKS2_get_keyslot_cipher(struct luks2_hdr *hdr, int keyslot, size_t *key_size)
+{
+ json_object *jobj_keyslot, *jobj_area, *jobj1;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return NULL;
+
+ /* currently we only support raw length preserving area encryption */
+ json_object_object_get_ex(jobj_area, "type", &jobj1);
+ if (strcmp(json_object_get_string(jobj1), "raw"))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj_area, "key_size", &jobj1))
+ return NULL;
+ *key_size = json_object_get_int(jobj1);
+
+ if (!json_object_object_get_ex(jobj_area, "encryption", &jobj1))
+ return NULL;
+
+ return json_object_get_string(jobj1);
+}
+
+const char *LUKS2_get_integrity(struct luks2_hdr *hdr, int segment)
+{
+ json_object *jobj1, *jobj2, *jobj3;
+
+ jobj1 = LUKS2_get_segment_jobj(hdr, segment);
+ if (!jobj1)
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj1, "integrity", &jobj2))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj2, "type", &jobj3))
+ return NULL;
+
+ return json_object_get_string(jobj3);
+}
+
+/* FIXME: this only ensures that once we have journal encryption, it is not ignored. */
+/* implement segment count and type restrictions (crypt and only single crypt) */
+static int LUKS2_integrity_compatible(struct luks2_hdr *hdr)
+{
+ json_object *jobj1, *jobj2, *jobj3, *jobj4;
+ const char *str;
+
+ if (!json_object_object_get_ex(hdr->jobj, "segments", &jobj1))
+ return 0;
+
+ if (!(jobj2 = LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT)))
+ return 0;
+
+ if (!json_object_object_get_ex(jobj2, "integrity", &jobj3))
+ return 0;
+
+ if (!json_object_object_get_ex(jobj3, "journal_encryption", &jobj4) ||
+ !(str = json_object_get_string(jobj4)) ||
+ strcmp(str, "none"))
+ return 0;
+
+ if (!json_object_object_get_ex(jobj3, "journal_integrity", &jobj4) ||
+ !(str = json_object_get_string(jobj4)) ||
+ strcmp(str, "none"))
+ return 0;
+
+ return 1;
+}
+
+static int LUKS2_keyslot_get_volume_key_size(struct luks2_hdr *hdr, const char *keyslot)
+{
+ json_object *jobj1, *jobj2, *jobj3;
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj1))
+ return -1;
+
+ if (!json_object_object_get_ex(jobj1, keyslot, &jobj2))
+ return -1;
+
+ if (!json_object_object_get_ex(jobj2, "key_size", &jobj3))
+ return -1;
+
+ return json_object_get_int(jobj3);
+}
+
+/* Key size used for encryption of keyslot */
+int LUKS2_get_keyslot_stored_key_size(struct luks2_hdr *hdr, int keyslot)
+{
+ char keyslot_name[16];
+
+ if (snprintf(keyslot_name, sizeof(keyslot_name), "%u", keyslot) < 1)
+ return -1;
+
+ return LUKS2_keyslot_get_volume_key_size(hdr, keyslot_name);
+}
+
+int LUKS2_get_volume_key_size(struct luks2_hdr *hdr, int segment)
+{
+ json_object *jobj_digests, *jobj_digest_segments, *jobj_digest_keyslots, *jobj1;
+ char buf[16];
+
+ if (segment == CRYPT_DEFAULT_SEGMENT)
+ segment = LUKS2_get_default_segment(hdr);
+
+ if (snprintf(buf, sizeof(buf), "%u", segment) < 1)
+ return -1;
+
+ json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ UNUSED(key);
+ json_object_object_get_ex(val, "segments", &jobj_digest_segments);
+ json_object_object_get_ex(val, "keyslots", &jobj_digest_keyslots);
+
+ if (!LUKS2_array_jobj(jobj_digest_segments, buf))
+ continue;
+ if (json_object_array_length(jobj_digest_keyslots) <= 0)
+ continue;
+
+ jobj1 = json_object_array_get_idx(jobj_digest_keyslots, 0);
+
+ return LUKS2_keyslot_get_volume_key_size(hdr, json_object_get_string(jobj1));
+ }
+
+ return -1;
+}
+
+uint32_t LUKS2_get_sector_size(struct luks2_hdr *hdr)
+{
+ return json_segment_get_sector_size(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT));
+}
+
+int LUKS2_assembly_multisegment_dmd(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct volume_key *vks,
+ json_object *jobj_segments,
+ struct crypt_dm_active_device *dmd)
+{
+ struct volume_key *vk;
+ json_object *jobj;
+ enum devcheck device_check;
+ int r;
+ unsigned s = 0;
+ uint64_t data_offset, segment_size, segment_offset, segment_start = 0;
+ struct dm_target *t = &dmd->segment;
+
+ if (dmd->flags & CRYPT_ACTIVATE_SHARED)
+ device_check = DEV_OK;
+ else
+ device_check = DEV_EXCL;
+
+ data_offset = LUKS2_reencrypt_data_offset(hdr, true);
+
+ r = device_block_adjust(cd, crypt_data_device(cd), device_check,
+ data_offset, &dmd->size, &dmd->flags);
+ if (r)
+ return r;
+
+ r = dm_targets_allocate(&dmd->segment, json_segments_count(jobj_segments));
+ if (r)
+ goto err;
+
+ r = -EINVAL;
+
+ while (t) {
+ jobj = json_segments_get_segment(jobj_segments, s);
+ if (!jobj) {
+ log_dbg(cd, "Internal error. Segment %u is null.", s);
+ r = -EINVAL;
+ goto err;
+ }
+
+ segment_offset = json_segment_get_offset(jobj, 1);
+ segment_size = json_segment_get_size(jobj, 1);
+ /* 'dynamic' length allowed in last segment only */
+ if (!segment_size && !t->next)
+ segment_size = dmd->size - segment_start;
+ if (!segment_size) {
+ log_dbg(cd, "Internal error. Wrong segment size %u", s);
+ r = -EINVAL;
+ goto err;
+ }
+
+ if (!strcmp(json_segment_type(jobj), "crypt")) {
+ vk = crypt_volume_key_by_id(vks, LUKS2_digest_by_segment(hdr, s));
+ if (!vk) {
+ log_err(cd, _("Missing key for dm-crypt segment %u"), s);
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = dm_crypt_target_set(t, segment_start, segment_size,
+ crypt_data_device(cd), vk,
+ json_segment_get_cipher(jobj),
+ json_segment_get_iv_offset(jobj),
+ segment_offset, "none", 0,
+ json_segment_get_sector_size(jobj));
+ if (r) {
+ log_err(cd, _("Failed to set dm-crypt segment."));
+ goto err;
+ }
+ } else if (!strcmp(json_segment_type(jobj), "linear")) {
+ r = dm_linear_target_set(t, segment_start, segment_size, crypt_data_device(cd), segment_offset);
+ if (r) {
+ log_err(cd, _("Failed to set dm-linear segment."));
+ goto err;
+ }
+ } else {
+ r = -EINVAL;
+ goto err;
+ }
+
+ segment_start += segment_size;
+ t = t->next;
+ s++;
+ }
+
+ return r;
+err:
+ dm_targets_free(cd, dmd);
+ return r;
+}
+
+/* FIXME: This shares almost all code with activate_multi_custom */
+static int _reload_custom_multi(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vks,
+ json_object *jobj_segments,
+ uint64_t device_size,
+ uint32_t flags)
+{
+ int r;
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ struct crypt_dm_active_device dmd = {
+ .uuid = crypt_get_uuid(cd),
+ .size = device_size >> SECTOR_SHIFT
+ };
+
+ /* do not allow activation when particular requirements detected */
+ if ((r = LUKS2_unmet_requirements(cd, hdr, CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 0)))
+ return r;
+
+ /* Add persistent activation flags */
+ if (!(flags & CRYPT_ACTIVATE_IGNORE_PERSISTENT))
+ LUKS2_config_get_flags(cd, hdr, &dmd.flags);
+
+ dmd.flags |= (flags | CRYPT_ACTIVATE_SHARED);
+
+ r = LUKS2_assembly_multisegment_dmd(cd, hdr, vks, jobj_segments, &dmd);
+ if (!r)
+ r = dm_reload_device(cd, name, &dmd, 0, 0);
+
+ dm_targets_free(cd, &dmd);
+ return r;
+}
+
+int LUKS2_reload(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vks,
+ uint64_t device_size,
+ uint32_t flags)
+{
+ if (crypt_get_integrity_tag_size(cd))
+ return -ENOTSUP;
+
+ return _reload_custom_multi(cd, name, vks,
+ LUKS2_get_segments_jobj(crypt_get_hdr(cd, CRYPT_LUKS2)), device_size, flags);
+}
+
+int LUKS2_activate_multi(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vks,
+ uint64_t device_size,
+ uint32_t flags)
+{
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ json_object *jobj_segments = LUKS2_get_segments_jobj(hdr);
+ int r;
+ struct crypt_dm_active_device dmd = {
+ .size = device_size,
+ .uuid = crypt_get_uuid(cd)
+ };
+
+ /* do not allow activation when particular requirements detected */
+ if ((r = LUKS2_unmet_requirements(cd, hdr, CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 0)))
+ return r;
+
+ /* Add persistent activation flags */
+ if (!(flags & CRYPT_ACTIVATE_IGNORE_PERSISTENT))
+ LUKS2_config_get_flags(cd, hdr, &dmd.flags);
+
+ dmd.flags |= flags;
+
+ r = LUKS2_assembly_multisegment_dmd(cd, hdr, vks, jobj_segments, &dmd);
+ if (!r)
+ r = dm_create_device(cd, name, CRYPT_LUKS2, &dmd);
+
+ dm_targets_free(cd, &dmd);
+ return r;
+}
+
+int LUKS2_activate(struct crypt_device *cd,
+ const char *name,
+ struct volume_key *vk,
+ uint32_t flags)
+{
+ int r;
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ struct crypt_dm_active_device dmdi = {}, dmd = {
+ .uuid = crypt_get_uuid(cd)
+ };
+
+ /* do not allow activation when particular requirements detected */
+ if ((r = LUKS2_unmet_requirements(cd, hdr, 0, 0)))
+ return r;
+
+ r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
+ vk, crypt_get_cipher_spec(cd), crypt_get_iv_offset(cd),
+ crypt_get_data_offset(cd), crypt_get_integrity(cd) ?: "none",
+ crypt_get_integrity_tag_size(cd), crypt_get_sector_size(cd));
+ if (r < 0)
+ return r;
+
+ /* Add persistent activation flags */
+ if (!(flags & CRYPT_ACTIVATE_IGNORE_PERSISTENT))
+ LUKS2_config_get_flags(cd, hdr, &dmd.flags);
+
+ dmd.flags |= flags;
+
+ if (crypt_get_integrity_tag_size(cd)) {
+ if (!LUKS2_integrity_compatible(hdr)) {
+ log_err(cd, _("Unsupported device integrity configuration."));
+ return -EINVAL;
+ }
+
+ if (dmd.flags & CRYPT_ACTIVATE_ALLOW_DISCARDS) {
+ log_err(cd, _("Discard/TRIM is not supported."));
+ return -EINVAL;
+ }
+
+ r = INTEGRITY_create_dmd_device(cd, NULL, NULL, NULL, NULL, &dmdi, dmd.flags, 0);
+ if (r)
+ return r;
+
+ dmdi.flags |= CRYPT_ACTIVATE_PRIVATE;
+ dmdi.uuid = dmd.uuid;
+ dmd.segment.u.crypt.offset = 0;
+ dmd.segment.size = dmdi.segment.size;
+
+ r = create_or_reload_device_with_integrity(cd, name, CRYPT_LUKS2, &dmd, &dmdi);
+ } else
+ r = create_or_reload_device(cd, name, CRYPT_LUKS2, &dmd);
+
+ dm_targets_free(cd, &dmd);
+ dm_targets_free(cd, &dmdi);
+
+ return r;
+}
+
+static bool is_reencryption_helper(const char *name)
+{
+ size_t len;
+
+ if (!name)
+ return false;
+
+ len = strlen(name);
+ return (len >= 9 && (!strncmp(name + len - 8, "-hotzone-", 9) ||
+ !strcmp(name + len - 8, "-overlay")));
+
+}
+
+static bool contains_reencryption_helper(char **names)
+{
+ while (*names) {
+ if (is_reencryption_helper(*names++))
+ return true;
+ }
+
+ return false;
+}
+
+int LUKS2_deactivate(struct crypt_device *cd, const char *name, struct luks2_hdr *hdr, struct crypt_dm_active_device *dmd, uint32_t flags)
+{
+ int r, ret;
+ struct dm_target *tgt;
+ crypt_status_info ci;
+ struct crypt_dm_active_device dmdc;
+ char **dep, deps_uuid_prefix[40], *deps[MAX_DM_DEPS+1] = { 0 };
+ const char *namei = NULL;
+ struct crypt_lock_handle *reencrypt_lock = NULL;
+
+ if (!dmd || !dmd->uuid || strncmp(CRYPT_LUKS2, dmd->uuid, sizeof(CRYPT_LUKS2)-1))
+ return -EINVAL;
+
+ /* uuid mismatch with metadata (if available) */
+ if (hdr && crypt_uuid_cmp(dmd->uuid, hdr->uuid))
+ return -EINVAL;
+
+ r = snprintf(deps_uuid_prefix, sizeof(deps_uuid_prefix), CRYPT_SUBDEV "-%.32s", dmd->uuid + 6);
+ if (r < 0 || (size_t)r != (sizeof(deps_uuid_prefix) - 1))
+ return -EINVAL;
+
+ tgt = &dmd->segment;
+
+ /* TODO: We have LUKS2 dependencies now */
+ if (single_segment(dmd) && tgt->type == DM_CRYPT && tgt->u.crypt.tag_size)
+ namei = device_dm_name(tgt->data_device);
+
+ r = dm_device_deps(cd, name, deps_uuid_prefix, deps, ARRAY_SIZE(deps));
+ if (r < 0)
+ goto out;
+
+ if (contains_reencryption_helper(deps)) {
+ r = LUKS2_reencrypt_lock_by_dm_uuid(cd, dmd->uuid, &reencrypt_lock);
+ if (r) {
+ if (r == -EBUSY)
+ log_err(cd, _("Reencryption in-progress. Cannot deactivate device."));
+ else
+ log_err(cd, _("Failed to get reencryption lock."));
+ goto out;
+ }
+ }
+
+ dep = deps;
+ while (*dep) {
+ if (is_reencryption_helper(*dep) && (dm_status_suspended(cd, *dep) > 0)) {
+ if (dm_error_device(cd, *dep))
+ log_err(cd, _("Failed to replace suspended device %s with dm-error target."), *dep);
+ }
+ dep++;
+ }
+
+ r = dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEY | DM_ACTIVE_CRYPT_KEYSIZE, &dmdc);
+ if (r < 0) {
+ memset(&dmdc, 0, sizeof(dmdc));
+ dmdc.segment.type = DM_UNKNOWN;
+ }
+
+ /* Remove top level device first */
+ r = dm_remove_device(cd, name, flags);
+ if (!r) {
+ tgt = &dmdc.segment;
+ while (tgt) {
+ if (tgt->type == DM_CRYPT)
+ crypt_drop_keyring_key_by_description(cd, tgt->u.crypt.vk->key_description, LOGON_KEY);
+ tgt = tgt->next;
+ }
+ }
+ dm_targets_free(cd, &dmdc);
+
+ /* TODO: We have LUKS2 dependencies now */
+ if (r >= 0 && namei) {
+ log_dbg(cd, "Deactivating integrity device %s.", namei);
+ r = dm_remove_device(cd, namei, 0);
+ }
+
+ if (!r) {
+ ret = 0;
+ dep = deps;
+ while (*dep) {
+ log_dbg(cd, "Deactivating LUKS2 dependent device %s.", *dep);
+ r = dm_query_device(cd, *dep, DM_ACTIVE_CRYPT_KEY | DM_ACTIVE_CRYPT_KEYSIZE, &dmdc);
+ if (r < 0) {
+ memset(&dmdc, 0, sizeof(dmdc));
+ dmdc.segment.type = DM_UNKNOWN;
+ }
+
+ r = dm_remove_device(cd, *dep, flags);
+ if (r < 0) {
+ ci = crypt_status(cd, *dep);
+ if (ci == CRYPT_BUSY)
+ log_err(cd, _("Device %s is still in use."), *dep);
+ if (ci == CRYPT_INACTIVE)
+ r = 0;
+ }
+ if (!r) {
+ tgt = &dmdc.segment;
+ while (tgt) {
+ if (tgt->type == DM_CRYPT)
+ crypt_drop_keyring_key_by_description(cd, tgt->u.crypt.vk->key_description, LOGON_KEY);
+ tgt = tgt->next;
+ }
+ }
+ dm_targets_free(cd, &dmdc);
+ if (r && !ret)
+ ret = r;
+ dep++;
+ }
+ r = ret;
+ }
+
+out:
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ dep = deps;
+ while (*dep)
+ free(*dep++);
+
+ return r;
+}
+
+int LUKS2_unmet_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t reqs_mask, int quiet)
+{
+ uint32_t reqs;
+ int r = LUKS2_config_get_requirements(cd, hdr, &reqs);
+
+ if (r) {
+ if (!quiet)
+ log_err(cd, _("Failed to read LUKS2 requirements."));
+ return r;
+ }
+
+ /* do not mask unknown requirements check */
+ if (reqs_unknown(reqs)) {
+ if (!quiet)
+ log_err(cd, _("Unmet LUKS2 requirements detected."));
+ return -ETXTBSY;
+ }
+
+ /* mask out permitted requirements */
+ reqs &= ~reqs_mask;
+
+ if (reqs_reencrypt(reqs) && !quiet)
+ log_err(cd, _("Operation incompatible with device marked for legacy reencryption. Aborting."));
+ if (reqs_reencrypt_online(reqs) && !quiet)
+ log_err(cd, _("Operation incompatible with device marked for LUKS2 reencryption. Aborting."));
+
+ /* any remaining unmasked requirement fails the check */
+ return reqs ? -EINVAL : 0;
+}
+
+/*
+ * NOTE: this routine is called on json object that failed validation.
+ * Proceed with caution :)
+ *
+ * known glitches so far:
+ *
+ * any version < 2.0.3:
+ * - luks2 keyslot pbkdf params change via crypt_keyslot_change_by_passphrase()
+ * could leave previous type parameters behind. Correct this by purging
+ * all params not needed by current type.
+ */
+void LUKS2_hdr_repair(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ json_object *jobj_keyslots;
+
+ if (!json_object_object_get_ex(hdr_jobj, "keyslots", &jobj_keyslots))
+ return;
+ if (!json_object_is_type(jobj_keyslots, json_type_object))
+ return;
+
+ LUKS2_keyslots_repair(cd, jobj_keyslots);
+}
+
+void json_object_object_del_by_uint(json_object *jobj, unsigned key)
+{
+ char key_name[16];
+
+ if (snprintf(key_name, sizeof(key_name), "%u", key) < 1)
+ return;
+ json_object_object_del(jobj, key_name);
+}
+
+int json_object_object_add_by_uint(json_object *jobj, unsigned key, json_object *jobj_val)
+{
+ char key_name[16];
+
+ if (snprintf(key_name, sizeof(key_name), "%u", key) < 1)
+ return -EINVAL;
+
+#if HAVE_DECL_JSON_OBJECT_OBJECT_ADD_EX
+ return json_object_object_add_ex(jobj, key_name, jobj_val, 0) ? -ENOMEM : 0;
+#else
+ json_object_object_add(jobj, key_name, jobj_val);
+ return 0;
+#endif
+}
+
+/* jobj_dst must contain pointer initialized to NULL (see json-c json_object_deep_copy API) */
+int json_object_copy(json_object *jobj_src, json_object **jobj_dst)
+{
+ if (!jobj_src || !jobj_dst || *jobj_dst)
+ return -1;
+
+#if HAVE_DECL_JSON_OBJECT_DEEP_COPY
+ return json_object_deep_copy(jobj_src, jobj_dst, NULL);
+#else
+ *jobj_dst = json_tokener_parse(json_object_get_string(jobj_src));
+ return *jobj_dst ? 0 : -1;
+#endif
+}
diff --git a/lib/luks2/luks2_keyslot.c b/lib/luks2/luks2_keyslot.c
new file mode 100644
index 0000000..5cf4b83
--- /dev/null
+++ b/lib/luks2/luks2_keyslot.c
@@ -0,0 +1,977 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, keyslot handling
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+/* Internal implementations */
+extern const keyslot_handler luks2_keyslot;
+extern const keyslot_handler reenc_keyslot;
+
+static const keyslot_handler *keyslot_handlers[LUKS2_KEYSLOTS_MAX] = {
+ &luks2_keyslot,
+#if USE_LUKS2_REENCRYPTION
+ &reenc_keyslot,
+#endif
+ NULL
+};
+
+static const keyslot_handler
+*LUKS2_keyslot_handler_type(const char *type)
+{
+ int i;
+
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX && keyslot_handlers[i]; i++) {
+ if (!strcmp(keyslot_handlers[i]->name, type))
+ return keyslot_handlers[i];
+ }
+
+ return NULL;
+}
+
+static const keyslot_handler
+*LUKS2_keyslot_handler(struct crypt_device *cd, int keyslot)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj1, *jobj2;
+
+ if (keyslot < 0)
+ return NULL;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return NULL;
+
+ if (!(jobj1 = LUKS2_get_keyslot_jobj(hdr, keyslot)))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj1, "type", &jobj2))
+ return NULL;
+
+ return LUKS2_keyslot_handler_type(json_object_get_string(jobj2));
+}
+
+int LUKS2_keyslot_find_empty(struct crypt_device *cd, struct luks2_hdr *hdr, size_t keylength)
+{
+ int i;
+
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++)
+ if (!LUKS2_get_keyslot_jobj(hdr, i))
+ break;
+
+ if (i == LUKS2_KEYSLOTS_MAX)
+ return -EINVAL;
+
+ /* Check also there is a space for the key in keyslots area */
+ if (keylength && LUKS2_find_area_gap(cd, hdr, keylength, NULL, NULL) < 0)
+ return -ENOSPC;
+
+ return i;
+}
+
+/* Check if a keyslot is assigned to specific segment */
+static int _keyslot_for_segment(struct luks2_hdr *hdr, int keyslot, int segment)
+{
+ int keyslot_digest, count = 0;
+ unsigned s;
+
+ keyslot_digest = LUKS2_digest_by_keyslot(hdr, keyslot);
+ if (keyslot_digest < 0)
+ return keyslot_digest;
+
+ if (segment >= 0)
+ return keyslot_digest == LUKS2_digest_by_segment(hdr, segment);
+
+ for (s = 0; s < json_segments_count(LUKS2_get_segments_jobj(hdr)); s++) {
+ if (keyslot_digest == LUKS2_digest_by_segment(hdr, s))
+ count++;
+ }
+
+ return count;
+}
+
+static int _keyslot_for_digest(struct luks2_hdr *hdr, int keyslot, int digest)
+{
+ int r = -EINVAL;
+
+ r = LUKS2_digest_by_keyslot(hdr, keyslot);
+ if (r < 0)
+ return r;
+ return r == digest ? 0 : -ENOENT;
+}
+
+int LUKS2_keyslot_for_segment(struct luks2_hdr *hdr, int keyslot, int segment)
+{
+ int r = -EINVAL;
+
+ /* no need to check anything */
+ if (segment == CRYPT_ANY_SEGMENT)
+ return 0; /* ok */
+ if (segment == CRYPT_DEFAULT_SEGMENT) {
+ segment = LUKS2_get_default_segment(hdr);
+ if (segment < 0)
+ return segment;
+ }
+
+ r = _keyslot_for_segment(hdr, keyslot, segment);
+ if (r < 0)
+ return r;
+
+ return r >= 1 ? 0 : -ENOENT;
+}
+
+/* Number of keyslots assigned to a segment or all keyslots for CRYPT_ANY_SEGMENT */
+int LUKS2_keyslot_active_count(struct luks2_hdr *hdr, int segment)
+{
+ int num = 0;
+ json_object *jobj_keyslots;
+
+ json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots);
+
+ json_object_object_foreach(jobj_keyslots, slot, val) {
+ UNUSED(val);
+ if (!LUKS2_keyslot_for_segment(hdr, atoi(slot), segment))
+ num++;
+ }
+
+ return num;
+}
+
+int LUKS2_keyslot_cipher_incompatible(struct crypt_device *cd, const char *cipher_spec)
+{
+ char cipher[MAX_CIPHER_LEN], cipher_mode[MAX_CIPHER_LEN];
+
+ if (!cipher_spec || crypt_is_cipher_null(cipher_spec))
+ return 1;
+
+ if (crypt_parse_name_and_mode(cipher_spec, cipher, NULL, cipher_mode) < 0)
+ return 1;
+
+ /* Keyslot is already authenticated; we cannot use integrity tags here */
+ if (crypt_get_integrity_tag_size(cd))
+ return 1;
+
+ /* Wrapped key schemes cannot be used for keyslot encryption */
+ if (crypt_cipher_wrapped_key(cipher, cipher_mode))
+ return 1;
+
+ /* Check if crypto backend can use the cipher */
+ if (crypt_cipher_ivsize(cipher, cipher_mode) < 0)
+ return 1;
+
+ return 0;
+}
+
+int LUKS2_keyslot_params_default(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct luks2_keyslot_params *params)
+{
+ const struct crypt_pbkdf_type *pbkdf = crypt_get_pbkdf_type(cd);
+ const char *cipher_spec;
+ size_t key_size;
+ int r;
+
+ if (!hdr || !pbkdf || !params)
+ return -EINVAL;
+
+ /*
+ * set keyslot area encryption parameters
+ */
+ params->area_type = LUKS2_KEYSLOT_AREA_RAW;
+ cipher_spec = crypt_keyslot_get_encryption(cd, CRYPT_ANY_SLOT, &key_size);
+ if (!cipher_spec || !key_size)
+ return -EINVAL;
+
+ params->area.raw.key_size = key_size;
+ r = snprintf(params->area.raw.encryption, sizeof(params->area.raw.encryption), "%s", cipher_spec);
+ if (r < 0 || (size_t)r >= sizeof(params->area.raw.encryption))
+ return -EINVAL;
+
+ /*
+ * set keyslot AF parameters
+ */
+ params->af_type = LUKS2_KEYSLOT_AF_LUKS1;
+ /* currently we use hash for AF from pbkdf settings */
+ r = snprintf(params->af.luks1.hash, sizeof(params->af.luks1.hash), "%s", pbkdf->hash ?: DEFAULT_LUKS1_HASH);
+ if (r < 0 || (size_t)r >= sizeof(params->af.luks1.hash))
+ return -EINVAL;
+ params->af.luks1.stripes = 4000;
+
+ return 0;
+}
+
+int LUKS2_keyslot_pbkdf(struct luks2_hdr *hdr, int keyslot, struct crypt_pbkdf_type *pbkdf)
+{
+ json_object *jobj_keyslot, *jobj_kdf, *jobj;
+
+ if (!hdr || !pbkdf)
+ return -EINVAL;
+
+ if (LUKS2_keyslot_info(hdr, keyslot) == CRYPT_SLOT_INVALID)
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -ENOENT;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf))
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_kdf, "type", &jobj))
+ return -EINVAL;
+
+ memset(pbkdf, 0, sizeof(*pbkdf));
+
+ pbkdf->type = json_object_get_string(jobj);
+ if (json_object_object_get_ex(jobj_kdf, "hash", &jobj))
+ pbkdf->hash = json_object_get_string(jobj);
+ if (json_object_object_get_ex(jobj_kdf, "iterations", &jobj))
+ pbkdf->iterations = json_object_get_int(jobj);
+ if (json_object_object_get_ex(jobj_kdf, "time", &jobj))
+ pbkdf->iterations = json_object_get_int(jobj);
+ if (json_object_object_get_ex(jobj_kdf, "memory", &jobj))
+ pbkdf->max_memory_kb = json_object_get_int(jobj);
+ if (json_object_object_get_ex(jobj_kdf, "cpus", &jobj))
+ pbkdf->parallel_threads = json_object_get_int(jobj);
+
+ return 0;
+}
+
+static int LUKS2_keyslot_unbound(struct luks2_hdr *hdr, int keyslot)
+{
+ json_object *jobj_digest, *jobj_segments;
+ int digest = LUKS2_digest_by_keyslot(hdr, keyslot);
+
+ if (digest < 0)
+ return 0;
+
+ if (!(jobj_digest = LUKS2_get_digest_jobj(hdr, digest)))
+ return 0;
+
+ json_object_object_get_ex(jobj_digest, "segments", &jobj_segments);
+ if (!jobj_segments || !json_object_is_type(jobj_segments, json_type_array) ||
+ json_object_array_length(jobj_segments) == 0)
+ return 1;
+
+ return 0;
+}
+
+crypt_keyslot_info LUKS2_keyslot_info(struct luks2_hdr *hdr, int keyslot)
+{
+ if(keyslot >= LUKS2_KEYSLOTS_MAX || keyslot < 0)
+ return CRYPT_SLOT_INVALID;
+
+ if (!LUKS2_get_keyslot_jobj(hdr, keyslot))
+ return CRYPT_SLOT_INACTIVE;
+
+ if (LUKS2_digest_by_keyslot(hdr, keyslot) < 0 ||
+ LUKS2_keyslot_unbound(hdr, keyslot))
+ return CRYPT_SLOT_UNBOUND;
+
+ if (LUKS2_keyslot_active_count(hdr, CRYPT_DEFAULT_SEGMENT) == 1 &&
+ !LUKS2_keyslot_for_segment(hdr, keyslot, CRYPT_DEFAULT_SEGMENT))
+ return CRYPT_SLOT_ACTIVE_LAST;
+
+ return CRYPT_SLOT_ACTIVE;
+}
+
+int LUKS2_keyslot_jobj_area(json_object *jobj_keyslot, uint64_t *offset, uint64_t *length)
+{
+ json_object *jobj_area, *jobj;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_area, "offset", &jobj))
+ return -EINVAL;
+ *offset = crypt_jobj_get_uint64(jobj);
+
+ if (!json_object_object_get_ex(jobj_area, "size", &jobj))
+ return -EINVAL;
+ *length = crypt_jobj_get_uint64(jobj);
+
+ return 0;
+}
+
+int LUKS2_keyslot_area(struct luks2_hdr *hdr,
+ int keyslot,
+ uint64_t *offset,
+ uint64_t *length)
+{
+ json_object *jobj_keyslot;
+
+ if (LUKS2_keyslot_info(hdr, keyslot) == CRYPT_SLOT_INVALID)
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -ENOENT;
+
+ return LUKS2_keyslot_jobj_area(jobj_keyslot, offset, length);
+}
+
+static int _open_and_verify(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const keyslot_handler *h,
+ int keyslot,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vk)
+{
+ int r, key_size = LUKS2_get_keyslot_stored_key_size(hdr, keyslot);
+
+ if (key_size < 0)
+ return -EINVAL;
+
+ *vk = crypt_alloc_volume_key(key_size, NULL);
+ if (!*vk)
+ return -ENOMEM;
+
+ r = h->open(cd, keyslot, password, password_len, (*vk)->key, (*vk)->keylength);
+ if (r < 0)
+ log_dbg(cd, "Keyslot %d (%s) open failed with %d.", keyslot, h->name, r);
+ else
+ r = LUKS2_digest_verify(cd, hdr, *vk, keyslot);
+
+ if (r < 0) {
+ crypt_free_volume_key(*vk);
+ *vk = NULL;
+ }
+
+ crypt_volume_key_set_id(*vk, r);
+
+ return r < 0 ? r : keyslot;
+}
+
+static int LUKS2_open_and_verify_by_digest(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int digest,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vk)
+{
+ const keyslot_handler *h;
+ int r;
+
+ if (!(h = LUKS2_keyslot_handler(cd, keyslot)))
+ return -ENOENT;
+
+ r = h->validate(cd, LUKS2_get_keyslot_jobj(hdr, keyslot));
+ if (r) {
+ log_dbg(cd, "Keyslot %d validation failed.", keyslot);
+ return r;
+ }
+
+ r = _keyslot_for_digest(hdr, keyslot, digest);
+ if (r) {
+ if (r == -ENOENT)
+ log_dbg(cd, "Keyslot %d unusable for digest %d.", keyslot, digest);
+ return r;
+ }
+
+ return _open_and_verify(cd, hdr, h, keyslot, password, password_len, vk);
+}
+
+static int LUKS2_open_and_verify(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int segment,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vk)
+{
+ const keyslot_handler *h;
+ int r;
+
+ if (!(h = LUKS2_keyslot_handler(cd, keyslot)))
+ return -ENOENT;
+
+ r = h->validate(cd, LUKS2_get_keyslot_jobj(hdr, keyslot));
+ if (r) {
+ log_dbg(cd, "Keyslot %d validation failed.", keyslot);
+ return r;
+ }
+
+ r = LUKS2_keyslot_for_segment(hdr, keyslot, segment);
+ if (r) {
+ if (r == -ENOENT)
+ log_dbg(cd, "Keyslot %d unusable for segment %d.", keyslot, segment);
+ return r;
+ }
+
+ return _open_and_verify(cd, hdr, h, keyslot, password, password_len, vk);
+}
+
+static int LUKS2_keyslot_open_priority_digest(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ crypt_keyslot_priority priority,
+ const char *password,
+ size_t password_len,
+ int digest,
+ struct volume_key **vk)
+{
+ json_object *jobj_keyslots, *jobj;
+ crypt_keyslot_priority slot_priority;
+ int keyslot, r = -ENOENT;
+
+ json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots);
+
+ json_object_object_foreach(jobj_keyslots, slot, val) {
+ if (!json_object_object_get_ex(val, "priority", &jobj))
+ slot_priority = CRYPT_SLOT_PRIORITY_NORMAL;
+ else
+ slot_priority = json_object_get_int(jobj);
+
+ keyslot = atoi(slot);
+ if (slot_priority != priority) {
+ log_dbg(cd, "Keyslot %d priority %d != %d (required), skipped.",
+ keyslot, slot_priority, priority);
+ continue;
+ }
+
+ r = LUKS2_open_and_verify_by_digest(cd, hdr, keyslot, digest, password, password_len, vk);
+
+ /* Do not retry for errors that are no -EPERM or -ENOENT,
+ former meaning password wrong, latter key slot unusable for segment */
+ if ((r != -EPERM) && (r != -ENOENT))
+ break;
+ }
+
+ return r;
+}
+
+static int LUKS2_keyslot_open_priority(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ crypt_keyslot_priority priority,
+ const char *password,
+ size_t password_len,
+ int segment,
+ struct volume_key **vk)
+{
+ json_object *jobj_keyslots, *jobj;
+ crypt_keyslot_priority slot_priority;
+ int keyslot, r = -ENOENT;
+
+ json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots);
+
+ json_object_object_foreach(jobj_keyslots, slot, val) {
+ if (!json_object_object_get_ex(val, "priority", &jobj))
+ slot_priority = CRYPT_SLOT_PRIORITY_NORMAL;
+ else
+ slot_priority = json_object_get_int(jobj);
+
+ keyslot = atoi(slot);
+ if (slot_priority != priority) {
+ log_dbg(cd, "Keyslot %d priority %d != %d (required), skipped.",
+ keyslot, slot_priority, priority);
+ continue;
+ }
+
+ r = LUKS2_open_and_verify(cd, hdr, keyslot, segment, password, password_len, vk);
+
+ /* Do not retry for errors that are no -EPERM or -ENOENT,
+ former meaning password wrong, latter key slot unusable for segment */
+ if ((r != -EPERM) && (r != -ENOENT))
+ break;
+ }
+
+ return r;
+}
+
+static int LUKS2_keyslot_open_by_digest(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int digest,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vk)
+{
+ int r_prio, r = -EINVAL;
+
+ if (digest < 0)
+ return r;
+
+ if (keyslot == CRYPT_ANY_SLOT) {
+ r_prio = LUKS2_keyslot_open_priority_digest(cd, hdr, CRYPT_SLOT_PRIORITY_PREFER,
+ password, password_len, digest, vk);
+ if (r_prio >= 0)
+ r = r_prio;
+ else if (r_prio != -EPERM && r_prio != -ENOENT)
+ r = r_prio;
+ else
+ r = LUKS2_keyslot_open_priority_digest(cd, hdr, CRYPT_SLOT_PRIORITY_NORMAL,
+ password, password_len, digest, vk);
+ /* Prefer password wrong to no entry from priority slot */
+ if (r_prio == -EPERM && r == -ENOENT)
+ r = r_prio;
+ } else
+ r = LUKS2_open_and_verify_by_digest(cd, hdr, keyslot, digest, password, password_len, vk);
+
+ return r;
+}
+
+int LUKS2_keyslot_open_all_segments(struct crypt_device *cd,
+ int keyslot_old,
+ int keyslot_new,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vks)
+{
+ struct volume_key *vk = NULL;
+ int digest_old, digest_new, r = -EINVAL;
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+
+ digest_old = LUKS2_reencrypt_digest_old(hdr);
+ if (digest_old >= 0) {
+ log_dbg(cd, "Trying to unlock volume key (digest: %d) using keyslot %d.", digest_old, keyslot_old);
+ r = LUKS2_keyslot_open_by_digest(cd, hdr, keyslot_old, digest_old, password, password_len, &vk);
+ if (r < 0)
+ goto out;
+ crypt_volume_key_add_next(vks, vk);
+ }
+
+ digest_new = LUKS2_reencrypt_digest_new(hdr);
+ if (digest_new >= 0 && digest_old != digest_new) {
+ log_dbg(cd, "Trying to unlock volume key (digest: %d) using keyslot %d.", digest_new, keyslot_new);
+ r = LUKS2_keyslot_open_by_digest(cd, hdr, keyslot_new, digest_new, password, password_len, &vk);
+ if (r < 0)
+ goto out;
+ crypt_volume_key_add_next(vks, vk);
+ }
+out:
+ if (r < 0) {
+ crypt_free_volume_key(*vks);
+ *vks = NULL;
+
+ if (r == -ENOMEM)
+ log_err(cd, _("Not enough available memory to open a keyslot."));
+ else if (r != -EPERM && r != -ENOENT)
+ log_err(cd, _("Keyslot open failed."));
+ }
+ return r;
+}
+
+int LUKS2_keyslot_open(struct crypt_device *cd,
+ int keyslot,
+ int segment,
+ const char *password,
+ size_t password_len,
+ struct volume_key **vk)
+{
+ struct luks2_hdr *hdr;
+ int r_prio, r = -EINVAL;
+
+ hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+
+ if (keyslot == CRYPT_ANY_SLOT) {
+ r_prio = LUKS2_keyslot_open_priority(cd, hdr, CRYPT_SLOT_PRIORITY_PREFER,
+ password, password_len, segment, vk);
+ if (r_prio >= 0)
+ r = r_prio;
+ else if (r_prio != -EPERM && r_prio != -ENOENT)
+ r = r_prio;
+ else
+ r = LUKS2_keyslot_open_priority(cd, hdr, CRYPT_SLOT_PRIORITY_NORMAL,
+ password, password_len, segment, vk);
+ /* Prefer password wrong to no entry from priority slot */
+ if (r_prio == -EPERM && r == -ENOENT)
+ r = r_prio;
+ } else
+ r = LUKS2_open_and_verify(cd, hdr, keyslot, segment, password, password_len, vk);
+
+ if (r < 0) {
+ if (r == -ENOMEM)
+ log_err(cd, _("Not enough available memory to open a keyslot."));
+ else if (r != -EPERM && r != -ENOENT)
+ log_err(cd, _("Keyslot open failed."));
+ }
+
+ return r;
+}
+
+int LUKS2_keyslot_reencrypt_store(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const void *buffer,
+ size_t buffer_length)
+{
+ const keyslot_handler *h;
+ int r;
+
+ if (!(h = LUKS2_keyslot_handler(cd, keyslot)) || strcmp(h->name, "reencrypt"))
+ return -EINVAL;
+
+ r = h->validate(cd, LUKS2_get_keyslot_jobj(hdr, keyslot));
+ if (r) {
+ log_dbg(cd, "Keyslot validation failed.");
+ return r;
+ }
+
+ return h->store(cd, keyslot, NULL, 0,
+ buffer, buffer_length);
+}
+
+int LUKS2_keyslot_store(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const char *password,
+ size_t password_len,
+ const struct volume_key *vk,
+ const struct luks2_keyslot_params *params)
+{
+ const keyslot_handler *h;
+ int r;
+
+ if (keyslot == CRYPT_ANY_SLOT)
+ return -EINVAL;
+
+ if (!LUKS2_get_keyslot_jobj(hdr, keyslot)) {
+ /* Try to allocate default and empty keyslot type */
+ h = LUKS2_keyslot_handler_type("luks2");
+ if (!h)
+ return -EINVAL;
+
+ r = h->alloc(cd, keyslot, vk->keylength, params);
+ if (r)
+ return r;
+ } else {
+ if (!(h = LUKS2_keyslot_handler(cd, keyslot)))
+ return -EINVAL;
+
+ r = h->update(cd, keyslot, params);
+ if (r) {
+ log_dbg(cd, "Failed to update keyslot %d json.", keyslot);
+ return r;
+ }
+ }
+
+ r = h->validate(cd, LUKS2_get_keyslot_jobj(hdr, keyslot));
+ if (r) {
+ log_dbg(cd, "Keyslot validation failed.");
+ return r;
+ }
+
+ if (LUKS2_hdr_validate(cd, hdr->jobj, hdr->hdr_size - LUKS2_HDR_BIN_LEN))
+ return -EINVAL;
+
+ return h->store(cd, keyslot, password, password_len,
+ vk->key, vk->keylength);
+}
+
+int LUKS2_keyslot_wipe(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int wipe_area_only)
+{
+ struct device *device = crypt_metadata_device(cd);
+ uint64_t area_offset, area_length;
+ int r;
+ json_object *jobj_keyslot, *jobj_keyslots;
+ const keyslot_handler *h;
+
+ h = LUKS2_keyslot_handler(cd, keyslot);
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots))
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -ENOENT;
+
+ if (wipe_area_only)
+ log_dbg(cd, "Wiping keyslot %d area only.", keyslot);
+
+ r = LUKS2_device_write_lock(cd, hdr, device);
+ if (r)
+ return r;
+
+ /* secure deletion of possible key material in keyslot area */
+ r = crypt_keyslot_area(cd, keyslot, &area_offset, &area_length);
+ if (r && r != -ENOENT)
+ goto out;
+
+ if (!r) {
+ r = crypt_wipe_device(cd, device, CRYPT_WIPE_SPECIAL, area_offset,
+ area_length, area_length, NULL, NULL);
+ if (r) {
+ if (r == -EACCES) {
+ log_err(cd, _("Cannot write to device %s, permission denied."),
+ device_path(device));
+ r = -EINVAL;
+ } else
+ log_err(cd, _("Cannot wipe device %s."), device_path(device));
+ goto out;
+ }
+ }
+
+ if (wipe_area_only)
+ goto out;
+
+ /* Slot specific wipe */
+ if (h) {
+ r = h->wipe(cd, keyslot);
+ if (r < 0)
+ goto out;
+ } else
+ log_dbg(cd, "Wiping keyslot %d without specific-slot handler loaded.", keyslot);
+
+ json_object_object_del_by_uint(jobj_keyslots, keyslot);
+
+ r = LUKS2_hdr_write(cd, hdr);
+out:
+ device_write_unlock(cd, crypt_metadata_device(cd));
+ return r;
+}
+
+int LUKS2_keyslot_dump(struct crypt_device *cd, int keyslot)
+{
+ const keyslot_handler *h;
+
+ if (!(h = LUKS2_keyslot_handler(cd, keyslot)))
+ return -EINVAL;
+
+ return h->dump(cd, keyslot);
+}
+
+crypt_keyslot_priority LUKS2_keyslot_priority_get(struct luks2_hdr *hdr, int keyslot)
+{
+ json_object *jobj_keyslot, *jobj_priority;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return CRYPT_SLOT_PRIORITY_INVALID;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "priority", &jobj_priority))
+ return CRYPT_SLOT_PRIORITY_NORMAL;
+
+ return json_object_get_int(jobj_priority);
+}
+
+int LUKS2_keyslot_priority_set(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, crypt_keyslot_priority priority, int commit)
+{
+ json_object *jobj_keyslot;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ if (priority == CRYPT_SLOT_PRIORITY_NORMAL)
+ json_object_object_del(jobj_keyslot, "priority");
+ else
+ json_object_object_add(jobj_keyslot, "priority", json_object_new_int(priority));
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+int placeholder_keyslot_alloc(struct crypt_device *cd,
+ int keyslot,
+ uint64_t area_offset,
+ uint64_t area_length)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj_keyslots, *jobj_keyslot, *jobj_area;
+
+ log_dbg(cd, "Allocating placeholder keyslot %d for LUKS1 down conversion.", keyslot);
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ if (keyslot < 0 || keyslot >= LUKS2_KEYSLOTS_MAX)
+ return -EINVAL;
+
+ if (LUKS2_get_keyslot_jobj(hdr, keyslot))
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots))
+ return -EINVAL;
+
+ jobj_keyslot = json_object_new_object();
+ json_object_object_add(jobj_keyslot, "type", json_object_new_string("placeholder"));
+ /*
+ * key_size = -1 makes placeholder keyslot impossible to pass validation.
+ * It's a safeguard against accidentally storing temporary conversion
+ * LUKS2 header.
+ */
+ json_object_object_add(jobj_keyslot, "key_size", json_object_new_int(-1));
+
+ /* Area object */
+ jobj_area = json_object_new_object();
+ json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(area_offset));
+ json_object_object_add(jobj_area, "size", crypt_jobj_new_uint64(area_length));
+ json_object_object_add(jobj_keyslot, "area", jobj_area);
+
+ json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot);
+
+ return 0;
+}
+
+static unsigned LUKS2_get_keyslot_digests_count(json_object *hdr_jobj, int keyslot)
+{
+ char num[16];
+ json_object *jobj_digests, *jobj_keyslots;
+ unsigned count = 0;
+
+ if (!json_object_object_get_ex(hdr_jobj, "digests", &jobj_digests))
+ return 0;
+
+ if (snprintf(num, sizeof(num), "%u", keyslot) < 0)
+ return 0;
+
+ json_object_object_foreach(jobj_digests, key, val) {
+ UNUSED(key);
+ json_object_object_get_ex(val, "keyslots", &jobj_keyslots);
+ if (LUKS2_array_jobj(jobj_keyslots, num))
+ count++;
+ }
+
+ return count;
+}
+
+/* run only on header that passed basic format validation */
+int LUKS2_keyslots_validate(struct crypt_device *cd, json_object *hdr_jobj)
+{
+ const keyslot_handler *h;
+ int keyslot;
+ json_object *jobj_keyslots, *jobj_type;
+ uint32_t reqs, reencrypt_count = 0;
+ struct luks2_hdr dummy = {
+ .jobj = hdr_jobj
+ };
+
+ if (!json_object_object_get_ex(hdr_jobj, "keyslots", &jobj_keyslots))
+ return -EINVAL;
+
+ if (LUKS2_config_get_requirements(cd, &dummy, &reqs))
+ return -EINVAL;
+
+ json_object_object_foreach(jobj_keyslots, slot, val) {
+ keyslot = atoi(slot);
+ json_object_object_get_ex(val, "type", &jobj_type);
+ h = LUKS2_keyslot_handler_type(json_object_get_string(jobj_type));
+ if (!h)
+ continue;
+ if (h->validate && h->validate(cd, val)) {
+ log_dbg(cd, "Keyslot type %s validation failed on keyslot %d.", h->name, keyslot);
+ return -EINVAL;
+ }
+
+ if (!strcmp(h->name, "luks2") && LUKS2_get_keyslot_digests_count(hdr_jobj, keyslot) != 1) {
+ log_dbg(cd, "Keyslot %d is not assigned to exactly 1 digest.", keyslot);
+ return -EINVAL;
+ }
+
+ if (!strcmp(h->name, "reencrypt"))
+ reencrypt_count++;
+ }
+
+ if ((reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT) && reencrypt_count == 0) {
+ log_dbg(cd, "Missing reencryption keyslot.");
+ return -EINVAL;
+ }
+
+ if (reencrypt_count && !LUKS2_reencrypt_requirement_candidate(&dummy)) {
+ log_dbg(cd, "Missing reencryption requirement flag.");
+ return -EINVAL;
+ }
+
+ if (reencrypt_count > 1) {
+ log_dbg(cd, "Too many reencryption keyslots.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void LUKS2_keyslots_repair(struct crypt_device *cd, json_object *jobj_keyslots)
+{
+ const keyslot_handler *h;
+ json_object *jobj_type;
+
+ json_object_object_foreach(jobj_keyslots, slot, val) {
+ UNUSED(slot);
+ if (!json_object_is_type(val, json_type_object) ||
+ !json_object_object_get_ex(val, "type", &jobj_type) ||
+ !json_object_is_type(jobj_type, json_type_string))
+ continue;
+
+ h = LUKS2_keyslot_handler_type(json_object_get_string(jobj_type));
+ if (h && h->repair)
+ h->repair(val);
+ }
+}
+
+/* assumes valid header */
+int LUKS2_find_keyslot(struct luks2_hdr *hdr, const char *type)
+{
+ int i;
+ json_object *jobj_keyslot, *jobj_type;
+
+ if (!type)
+ return -EINVAL;
+
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, i);
+ if (!jobj_keyslot)
+ continue;
+
+ json_object_object_get_ex(jobj_keyslot, "type", &jobj_type);
+ if (!strcmp(json_object_get_string(jobj_type), type))
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+/* assumes valid header, it does not move references in tokens/digests etc! */
+int LUKS2_keyslot_swap(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, int keyslot2)
+{
+ json_object *jobj_keyslots, *jobj_keyslot, *jobj_keyslot2;
+ int r;
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots))
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ jobj_keyslot2 = LUKS2_get_keyslot_jobj(hdr, keyslot2);
+ if (!jobj_keyslot2)
+ return -EINVAL;
+
+ /* This transfer owner of object, no need for json_object_put */
+ json_object_get(jobj_keyslot);
+ json_object_get(jobj_keyslot2);
+
+ json_object_object_del_by_uint(jobj_keyslots, keyslot);
+ r = json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot2);
+ if (r < 0) {
+ log_dbg(cd, "Failed to swap keyslot %d.", keyslot);
+ return r;
+ }
+
+ json_object_object_del_by_uint(jobj_keyslots, keyslot2);
+ r = json_object_object_add_by_uint(jobj_keyslots, keyslot2, jobj_keyslot);
+ if (r < 0)
+ log_dbg(cd, "Failed to swap keyslot2 %d.", keyslot2);
+
+ return r;
+}
diff --git a/lib/luks2/luks2_keyslot_luks2.c b/lib/luks2/luks2_keyslot_luks2.c
new file mode 100644
index 0000000..491dcad
--- /dev/null
+++ b/lib/luks2/luks2_keyslot_luks2.c
@@ -0,0 +1,821 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, LUKS2 type keyslot handler
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <limits.h>
+#include "luks2_internal.h"
+
+/* FIXME: move keyslot encryption to crypto backend */
+#include "../luks1/af.h"
+
+#define LUKS_SALTSIZE 32
+#define LUKS_SLOT_ITERATIONS_MIN 1000
+#define LUKS_STRIPES 4000
+
+/* Serialize memory-hard keyslot access: optional workaround for parallel processing */
+#define MIN_MEMORY_FOR_SERIALIZE_LOCK_KB 32*1024 /* 32MB */
+
+/* coverity[ -taint_source : arg-0 ] */
+static int luks2_encrypt_to_storage(char *src, size_t srcLength,
+ const char *cipher, const char *cipher_mode,
+ struct volume_key *vk, unsigned int sector,
+ struct crypt_device *cd)
+{
+#ifndef ENABLE_AF_ALG /* Support for old kernel without Crypto API */
+ return LUKS_encrypt_to_storage(src, srcLength, cipher, cipher_mode, vk, sector, cd);
+#else
+ struct crypt_storage *s;
+ int devfd, r;
+ struct device *device = crypt_metadata_device(cd);
+
+ /* Only whole sector writes supported */
+ if (MISALIGNED_512(srcLength))
+ return -EINVAL;
+
+ /* Encrypt buffer */
+ r = crypt_storage_init(&s, SECTOR_SIZE, cipher, cipher_mode, vk->key, vk->keylength, false);
+ if (r) {
+ log_err(cd, _("Cannot use %s-%s cipher for keyslot encryption."), cipher, cipher_mode);
+ return r;
+ }
+
+ r = crypt_storage_encrypt(s, 0, srcLength, src);
+ crypt_storage_destroy(s);
+ if (r) {
+ log_err(cd, _("IO error while encrypting keyslot."));
+ return r;
+ }
+
+ devfd = device_open_locked(cd, device, O_RDWR);
+ if (devfd >= 0) {
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), src,
+ srcLength, sector * SECTOR_SIZE) < 0)
+ r = -EIO;
+ else
+ r = 0;
+
+ device_sync(cd, device);
+ } else
+ r = -EIO;
+
+ if (r)
+ log_err(cd, _("IO error while encrypting keyslot."));
+
+ return r;
+#endif
+}
+
+static int luks2_decrypt_from_storage(char *dst, size_t dstLength,
+ const char *cipher, const char *cipher_mode, struct volume_key *vk,
+ unsigned int sector, struct crypt_device *cd)
+{
+ struct device *device = crypt_metadata_device(cd);
+#ifndef ENABLE_AF_ALG /* Support for old kernel without Crypto API */
+ int r = device_read_lock(cd, device);
+ if (r) {
+ log_err(cd, _("Failed to acquire read lock on device %s."), device_path(device));
+ return r;
+ }
+ r = LUKS_decrypt_from_storage(dst, dstLength, cipher, cipher_mode, vk, sector, cd);
+ device_read_unlock(cd, crypt_metadata_device(cd));
+ return r;
+#else
+ struct crypt_storage *s;
+ int devfd, r;
+
+ /* Only whole sector writes supported */
+ if (MISALIGNED_512(dstLength))
+ return -EINVAL;
+
+ r = crypt_storage_init(&s, SECTOR_SIZE, cipher, cipher_mode, vk->key, vk->keylength, false);
+ if (r) {
+ log_err(cd, _("Cannot use %s-%s cipher for keyslot encryption."), cipher, cipher_mode);
+ return r;
+ }
+
+ r = device_read_lock(cd, device);
+ if (r) {
+ log_err(cd, _("Failed to acquire read lock on device %s."),
+ device_path(device));
+ crypt_storage_destroy(s);
+ return r;
+ }
+
+ devfd = device_open_locked(cd, device, O_RDONLY);
+ if (devfd >= 0) {
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), dst,
+ dstLength, sector * SECTOR_SIZE) < 0)
+ r = -EIO;
+ else
+ r = 0;
+ } else
+ r = -EIO;
+
+ device_read_unlock(cd, device);
+
+ /* Decrypt buffer */
+ if (!r)
+ r = crypt_storage_decrypt(s, 0, dstLength, dst);
+ else
+ log_err(cd, _("IO error while decrypting keyslot."));
+
+ crypt_storage_destroy(s);
+ return r;
+#endif
+}
+
+static int luks2_keyslot_get_pbkdf_params(json_object *jobj_keyslot,
+ struct crypt_pbkdf_type *pbkdf, char **salt)
+{
+ json_object *jobj_kdf, *jobj1, *jobj2;
+ size_t salt_len;
+ int r;
+
+ if (!jobj_keyslot || !pbkdf)
+ return -EINVAL;
+
+ memset(pbkdf, 0, sizeof(*pbkdf));
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf))
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_kdf, "type", &jobj1))
+ return -EINVAL;
+ pbkdf->type = json_object_get_string(jobj1);
+ if (!strcmp(pbkdf->type, CRYPT_KDF_PBKDF2)) {
+ if (!json_object_object_get_ex(jobj_kdf, "hash", &jobj2))
+ return -EINVAL;
+ pbkdf->hash = json_object_get_string(jobj2);
+ if (!json_object_object_get_ex(jobj_kdf, "iterations", &jobj2))
+ return -EINVAL;
+ pbkdf->iterations = json_object_get_int(jobj2);
+ pbkdf->max_memory_kb = 0;
+ pbkdf->parallel_threads = 0;
+ } else {
+ if (!json_object_object_get_ex(jobj_kdf, "time", &jobj2))
+ return -EINVAL;
+ pbkdf->iterations = json_object_get_int(jobj2);
+ if (!json_object_object_get_ex(jobj_kdf, "memory", &jobj2))
+ return -EINVAL;
+ pbkdf->max_memory_kb = json_object_get_int(jobj2);
+ if (!json_object_object_get_ex(jobj_kdf, "cpus", &jobj2))
+ return -EINVAL;
+ pbkdf->parallel_threads = json_object_get_int(jobj2);
+ }
+
+ if (!json_object_object_get_ex(jobj_kdf, "salt", &jobj2))
+ return -EINVAL;
+
+ r = crypt_base64_decode(salt, &salt_len, json_object_get_string(jobj2),
+ json_object_get_string_len(jobj2));
+ if (r < 0)
+ return r;
+
+ if (salt_len != LUKS_SALTSIZE) {
+ free(*salt);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int luks2_keyslot_set_key(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ const char *password, size_t passwordLen,
+ const char *volume_key, size_t volume_key_len)
+{
+ struct volume_key *derived_key;
+ char *salt = NULL, cipher[MAX_CIPHER_LEN], cipher_mode[MAX_CIPHER_LEN];
+ char *AfKey = NULL;
+ const char *af_hash = NULL;
+ size_t AFEKSize, keyslot_key_len;
+ json_object *jobj2, *jobj_kdf, *jobj_af, *jobj_area;
+ uint64_t area_offset;
+ struct crypt_pbkdf_type pbkdf;
+ int r;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf) ||
+ !json_object_object_get_ex(jobj_keyslot, "af", &jobj_af) ||
+ !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+
+ /* prevent accidental volume key size change after allocation */
+ if (!json_object_object_get_ex(jobj_keyslot, "key_size", &jobj2))
+ return -EINVAL;
+ if (json_object_get_int(jobj2) != (int)volume_key_len)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_area, "offset", &jobj2))
+ return -EINVAL;
+ area_offset = crypt_jobj_get_uint64(jobj2);
+
+ if (!json_object_object_get_ex(jobj_area, "encryption", &jobj2))
+ return -EINVAL;
+ r = crypt_parse_name_and_mode(json_object_get_string(jobj2), cipher, NULL, cipher_mode);
+ if (r < 0)
+ return r;
+
+ if (!json_object_object_get_ex(jobj_area, "key_size", &jobj2))
+ return -EINVAL;
+ keyslot_key_len = json_object_get_int(jobj2);
+
+ if (!json_object_object_get_ex(jobj_af, "hash", &jobj2))
+ return -EINVAL;
+ af_hash = json_object_get_string(jobj2);
+
+ r = luks2_keyslot_get_pbkdf_params(jobj_keyslot, &pbkdf, &salt);
+ if (r < 0)
+ return r;
+
+ /*
+ * Allocate derived key storage.
+ */
+ derived_key = crypt_alloc_volume_key(keyslot_key_len, NULL);
+ if (!derived_key) {
+ free(salt);
+ return -ENOMEM;
+ }
+ /*
+ * Calculate keyslot content, split and store it to keyslot area.
+ */
+ log_dbg(cd, "Running keyslot key derivation.");
+ r = crypt_pbkdf(pbkdf.type, pbkdf.hash, password, passwordLen,
+ salt, LUKS_SALTSIZE,
+ derived_key->key, derived_key->keylength,
+ pbkdf.iterations, pbkdf.max_memory_kb,
+ pbkdf.parallel_threads);
+ free(salt);
+ if (r < 0) {
+ if ((crypt_backend_flags() & CRYPT_BACKEND_PBKDF2_INT) &&
+ pbkdf.iterations > INT_MAX)
+ log_err(cd, _("PBKDF2 iteration value overflow."));
+ crypt_free_volume_key(derived_key);
+ return r;
+ }
+
+ // FIXME: verity key_size to AFEKSize
+ AFEKSize = AF_split_sectors(volume_key_len, LUKS_STRIPES) * SECTOR_SIZE;
+ AfKey = crypt_safe_alloc(AFEKSize);
+ if (!AfKey) {
+ crypt_free_volume_key(derived_key);
+ return -ENOMEM;
+ }
+
+ r = crypt_hash_size(af_hash);
+ if (r < 0)
+ log_err(cd, _("Hash algorithm %s is not available."), af_hash);
+ else
+ r = AF_split(cd, volume_key, AfKey, volume_key_len, LUKS_STRIPES, af_hash);
+
+ if (r == 0) {
+ log_dbg(cd, "Updating keyslot area [0x%04" PRIx64 "].", area_offset);
+ /* FIXME: sector_offset should be size_t, fix LUKS_encrypt... accordingly */
+ r = luks2_encrypt_to_storage(AfKey, AFEKSize, cipher, cipher_mode,
+ derived_key, (unsigned)(area_offset / SECTOR_SIZE), cd);
+ }
+
+ crypt_safe_free(AfKey);
+ crypt_free_volume_key(derived_key);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int luks2_keyslot_get_key(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ const char *password, size_t passwordLen,
+ char *volume_key, size_t volume_key_len)
+{
+ struct volume_key *derived_key = NULL;
+ struct crypt_pbkdf_type pbkdf;
+ char *AfKey = NULL;
+ size_t AFEKSize;
+ const char *af_hash = NULL;
+ char *salt = NULL, cipher[MAX_CIPHER_LEN], cipher_mode[MAX_CIPHER_LEN];
+ json_object *jobj2, *jobj_af, *jobj_area;
+ uint64_t area_offset;
+ size_t keyslot_key_len;
+ bool try_serialize_lock = false;
+ int r;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "af", &jobj_af) ||
+ !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_af, "hash", &jobj2))
+ return -EINVAL;
+ af_hash = json_object_get_string(jobj2);
+
+ if (!json_object_object_get_ex(jobj_area, "offset", &jobj2))
+ return -EINVAL;
+ area_offset = crypt_jobj_get_uint64(jobj2);
+
+ if (!json_object_object_get_ex(jobj_area, "encryption", &jobj2))
+ return -EINVAL;
+ r = crypt_parse_name_and_mode(json_object_get_string(jobj2), cipher, NULL, cipher_mode);
+ if (r < 0)
+ return r;
+
+ if (!json_object_object_get_ex(jobj_area, "key_size", &jobj2))
+ return -EINVAL;
+ keyslot_key_len = json_object_get_int(jobj2);
+
+ r = luks2_keyslot_get_pbkdf_params(jobj_keyslot, &pbkdf, &salt);
+ if (r < 0)
+ return r;
+
+ /*
+ * Allocate derived key storage space.
+ */
+ derived_key = crypt_alloc_volume_key(keyslot_key_len, NULL);
+ if (!derived_key) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ AFEKSize = AF_split_sectors(volume_key_len, LUKS_STRIPES) * SECTOR_SIZE;
+ AfKey = crypt_safe_alloc(AFEKSize);
+ if (!AfKey) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * If requested, serialize unlocking for memory-hard KDF. Usually NOOP.
+ */
+ if (pbkdf.max_memory_kb > MIN_MEMORY_FOR_SERIALIZE_LOCK_KB)
+ try_serialize_lock = true;
+ if (try_serialize_lock && (r = crypt_serialize_lock(cd)))
+ goto out;
+
+ /*
+ * Calculate derived key, decrypt keyslot content and merge it.
+ */
+ log_dbg(cd, "Running keyslot key derivation.");
+ r = crypt_pbkdf(pbkdf.type, pbkdf.hash, password, passwordLen,
+ salt, LUKS_SALTSIZE,
+ derived_key->key, derived_key->keylength,
+ pbkdf.iterations, pbkdf.max_memory_kb,
+ pbkdf.parallel_threads);
+
+ if (try_serialize_lock)
+ crypt_serialize_unlock(cd);
+
+ if (r == 0) {
+ log_dbg(cd, "Reading keyslot area [0x%04" PRIx64 "].", area_offset);
+ /* FIXME: sector_offset should be size_t, fix LUKS_decrypt... accordingly */
+ r = luks2_decrypt_from_storage(AfKey, AFEKSize, cipher, cipher_mode,
+ derived_key, (unsigned)(area_offset / SECTOR_SIZE), cd);
+ }
+
+ if (r == 0) {
+ r = crypt_hash_size(af_hash);
+ if (r < 0)
+ log_err(cd, _("Hash algorithm %s is not available."), af_hash);
+ else
+ r = AF_merge(AfKey, volume_key, volume_key_len, LUKS_STRIPES, af_hash);
+ }
+out:
+ free(salt);
+ crypt_free_volume_key(derived_key);
+ crypt_safe_free(AfKey);
+
+ return r;
+}
+
+/*
+ * currently we support update of only:
+ *
+ * - af hash function
+ * - kdf params
+ */
+static int luks2_keyslot_update_json(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ const struct luks2_keyslot_params *params)
+{
+ const struct crypt_pbkdf_type *pbkdf;
+ json_object *jobj_af, *jobj_area, *jobj_kdf;
+ char salt[LUKS_SALTSIZE], *salt_base64 = NULL;
+ int r;
+
+ /* jobj_keyslot is not yet validated */
+
+ if (!json_object_object_get_ex(jobj_keyslot, "af", &jobj_af) ||
+ !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+
+ /* update area encryption parameters */
+ json_object_object_add(jobj_area, "encryption", json_object_new_string(params->area.raw.encryption));
+ json_object_object_add(jobj_area, "key_size", json_object_new_int(params->area.raw.key_size));
+
+ pbkdf = crypt_get_pbkdf_type(cd);
+ if (!pbkdf)
+ return -EINVAL;
+
+ r = crypt_benchmark_pbkdf_internal(cd, CONST_CAST(struct crypt_pbkdf_type *)pbkdf, params->area.raw.key_size);
+ if (r < 0)
+ return r;
+
+ /* refresh whole 'kdf' object */
+ jobj_kdf = json_object_new_object();
+ if (!jobj_kdf)
+ return -ENOMEM;
+ json_object_object_add(jobj_kdf, "type", json_object_new_string(pbkdf->type));
+ if (!strcmp(pbkdf->type, CRYPT_KDF_PBKDF2)) {
+ json_object_object_add(jobj_kdf, "hash", json_object_new_string(pbkdf->hash));
+ json_object_object_add(jobj_kdf, "iterations", json_object_new_int(pbkdf->iterations));
+ } else {
+ json_object_object_add(jobj_kdf, "time", json_object_new_int(pbkdf->iterations));
+ json_object_object_add(jobj_kdf, "memory", json_object_new_int(pbkdf->max_memory_kb));
+ json_object_object_add(jobj_kdf, "cpus", json_object_new_int(pbkdf->parallel_threads));
+ }
+ json_object_object_add(jobj_keyslot, "kdf", jobj_kdf);
+
+ /*
+ * Regenerate salt and add it in 'kdf' object
+ */
+ r = crypt_random_get(cd, salt, LUKS_SALTSIZE, CRYPT_RND_SALT);
+ if (r < 0)
+ return r;
+ r = crypt_base64_encode(&salt_base64, NULL, salt, LUKS_SALTSIZE);
+ if (r < 0)
+ return r;
+ json_object_object_add(jobj_kdf, "salt", json_object_new_string(salt_base64));
+ free(salt_base64);
+
+ /* update 'af' hash */
+ json_object_object_add(jobj_af, "hash", json_object_new_string(params->af.luks1.hash));
+
+ JSON_DBG(cd, jobj_keyslot, "Keyslot JSON:");
+ return 0;
+}
+
+static int luks2_keyslot_alloc(struct crypt_device *cd,
+ int keyslot,
+ size_t volume_key_len,
+ const struct luks2_keyslot_params *params)
+{
+ struct luks2_hdr *hdr;
+ uint64_t area_offset, area_length;
+ json_object *jobj_keyslots, *jobj_keyslot, *jobj_af, *jobj_area;
+ int r;
+
+ log_dbg(cd, "Trying to allocate LUKS2 keyslot %d.", keyslot);
+
+ if (!params || params->area_type != LUKS2_KEYSLOT_AREA_RAW ||
+ params->af_type != LUKS2_KEYSLOT_AF_LUKS1) {
+ log_dbg(cd, "Invalid LUKS2 keyslot parameters.");
+ return -EINVAL;
+ }
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ if (keyslot == CRYPT_ANY_SLOT)
+ keyslot = LUKS2_keyslot_find_empty(cd, hdr, 0);
+
+ if (keyslot < 0 || keyslot >= LUKS2_KEYSLOTS_MAX)
+ return -ENOMEM;
+
+ if (LUKS2_get_keyslot_jobj(hdr, keyslot)) {
+ log_dbg(cd, "Cannot modify already active keyslot %d.", keyslot);
+ return -EINVAL;
+ }
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots))
+ return -EINVAL;
+
+ r = LUKS2_find_area_gap(cd, hdr, volume_key_len, &area_offset, &area_length);
+ if (r < 0) {
+ log_err(cd, _("No space for new keyslot."));
+ return r;
+ }
+
+ jobj_keyslot = json_object_new_object();
+ json_object_object_add(jobj_keyslot, "type", json_object_new_string("luks2"));
+ json_object_object_add(jobj_keyslot, "key_size", json_object_new_int(volume_key_len));
+
+ /* AF object */
+ jobj_af = json_object_new_object();
+ json_object_object_add(jobj_af, "type", json_object_new_string("luks1"));
+ json_object_object_add(jobj_af, "stripes", json_object_new_int(params->af.luks1.stripes));
+ json_object_object_add(jobj_keyslot, "af", jobj_af);
+
+ /* Area object */
+ jobj_area = json_object_new_object();
+ json_object_object_add(jobj_area, "type", json_object_new_string("raw"));
+ json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(area_offset));
+ json_object_object_add(jobj_area, "size", crypt_jobj_new_uint64(area_length));
+ json_object_object_add(jobj_keyslot, "area", jobj_area);
+
+ json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot);
+
+ r = luks2_keyslot_update_json(cd, jobj_keyslot, params);
+
+ if (!r && LUKS2_check_json_size(cd, hdr)) {
+ log_dbg(cd, "Not enough space in header json area for new keyslot.");
+ r = -ENOSPC;
+ }
+
+ if (r)
+ json_object_object_del_by_uint(jobj_keyslots, keyslot);
+
+ return r;
+}
+
+static int luks2_keyslot_open(struct crypt_device *cd,
+ int keyslot,
+ const char *password,
+ size_t password_len,
+ char *volume_key,
+ size_t volume_key_len)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj_keyslot;
+
+ log_dbg(cd, "Trying to open LUKS2 keyslot %d.", keyslot);
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ return luks2_keyslot_get_key(cd, jobj_keyslot,
+ password, password_len,
+ volume_key, volume_key_len);
+}
+
+/*
+ * This function must not modify json.
+ * It's called after luks2 keyslot validation.
+ */
+static int luks2_keyslot_store(struct crypt_device *cd,
+ int keyslot,
+ const char *password,
+ size_t password_len,
+ const char *volume_key,
+ size_t volume_key_len)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj_keyslot;
+ int r;
+
+ log_dbg(cd, "Calculating attributes for LUKS2 keyslot %d.", keyslot);
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
+ if(r)
+ return r;
+
+ r = luks2_keyslot_set_key(cd, jobj_keyslot,
+ password, password_len,
+ volume_key, volume_key_len);
+ if (!r)
+ r = LUKS2_hdr_write(cd, hdr);
+
+ device_write_unlock(cd, crypt_metadata_device(cd));
+
+ return r < 0 ? r : keyslot;
+}
+
+static int luks2_keyslot_wipe(struct crypt_device *cd, int keyslot)
+{
+ struct luks2_hdr *hdr;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ /* Remove any reference of deleted keyslot from digests and tokens */
+ LUKS2_digest_assign(cd, hdr, keyslot, CRYPT_ANY_DIGEST, 0, 0);
+ LUKS2_token_assign(cd, hdr, keyslot, CRYPT_ANY_TOKEN, 0, 0);
+
+ return 0;
+}
+
+static int luks2_keyslot_dump(struct crypt_device *cd, int keyslot)
+{
+ json_object *jobj_keyslot, *jobj1, *jobj_kdf, *jobj_af, *jobj_area;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(crypt_get_hdr(cd, CRYPT_LUKS2), keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf) ||
+ !json_object_object_get_ex(jobj_keyslot, "af", &jobj_af) ||
+ !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_area, "encryption", &jobj1);
+ log_std(cd, "\tCipher: %s\n", json_object_get_string(jobj1));
+
+ json_object_object_get_ex(jobj_area, "key_size", &jobj1);
+ log_std(cd, "\tCipher key: %u bits\n", crypt_jobj_get_uint32(jobj1) * 8);
+
+ json_object_object_get_ex(jobj_kdf, "type", &jobj1);
+ log_std(cd, "\tPBKDF: %s\n", json_object_get_string(jobj1));
+
+ if (!strcmp(json_object_get_string(jobj1), CRYPT_KDF_PBKDF2)) {
+ json_object_object_get_ex(jobj_kdf, "hash", &jobj1);
+ log_std(cd, "\tHash: %s\n", json_object_get_string(jobj1));
+
+ json_object_object_get_ex(jobj_kdf, "iterations", &jobj1);
+ log_std(cd, "\tIterations: %" PRIu64 "\n", crypt_jobj_get_uint64(jobj1));
+ } else {
+ json_object_object_get_ex(jobj_kdf, "time", &jobj1);
+ log_std(cd, "\tTime cost: %" PRIu64 "\n", json_object_get_int64(jobj1));
+
+ json_object_object_get_ex(jobj_kdf, "memory", &jobj1);
+ log_std(cd, "\tMemory: %" PRIu64 "\n", json_object_get_int64(jobj1));
+
+ json_object_object_get_ex(jobj_kdf, "cpus", &jobj1);
+ log_std(cd, "\tThreads: %" PRIu64 "\n", json_object_get_int64(jobj1));
+ }
+ json_object_object_get_ex(jobj_kdf, "salt", &jobj1);
+ log_std(cd, "\tSalt: ");
+ hexprint_base64(cd, jobj1, " ", " ");
+
+
+ json_object_object_get_ex(jobj_af, "stripes", &jobj1);
+ log_std(cd, "\tAF stripes: %u\n", json_object_get_int(jobj1));
+
+ json_object_object_get_ex(jobj_af, "hash", &jobj1);
+ log_std(cd, "\tAF hash: %s\n", json_object_get_string(jobj1));
+
+ json_object_object_get_ex(jobj_area, "offset", &jobj1);
+ log_std(cd, "\tArea offset:%" PRIu64 " [bytes]\n", crypt_jobj_get_uint64(jobj1));
+
+ json_object_object_get_ex(jobj_area, "size", &jobj1);
+ log_std(cd, "\tArea length:%" PRIu64 " [bytes]\n", crypt_jobj_get_uint64(jobj1));
+
+ return 0;
+}
+
+static int luks2_keyslot_validate(struct crypt_device *cd, json_object *jobj_keyslot)
+{
+ json_object *jobj_kdf, *jobj_af, *jobj_area, *jobj1;
+ const char *type;
+ int count;
+
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ if (!(jobj_kdf = json_contains(cd, jobj_keyslot, "", "keyslot", "kdf", json_type_object)) ||
+ !(jobj_af = json_contains(cd, jobj_keyslot, "", "keyslot", "af", json_type_object)) ||
+ !(jobj_area = json_contains(cd, jobj_keyslot, "", "keyslot", "area", json_type_object)))
+ return -EINVAL;
+
+ count = json_object_object_length(jobj_kdf);
+
+ jobj1 = json_contains_string(cd, jobj_kdf, "", "kdf section", "type");
+ if (!jobj1)
+ return -EINVAL;
+ type = json_object_get_string(jobj1);
+
+ if (!strcmp(type, CRYPT_KDF_PBKDF2)) {
+ if (count != 4 || /* type, salt, hash, iterations only */
+ !json_contains_string(cd, jobj_kdf, "kdf type", type, "hash") ||
+ !json_contains(cd, jobj_kdf, "kdf type", type, "iterations", json_type_int) ||
+ !json_contains_string(cd, jobj_kdf, "kdf type", type, "salt"))
+ return -EINVAL;
+ } else if (!strcmp(type, CRYPT_KDF_ARGON2I) || !strcmp(type, CRYPT_KDF_ARGON2ID)) {
+ if (count != 5 || /* type, salt, time, memory, cpus only */
+ !json_contains(cd, jobj_kdf, "kdf type", type, "time", json_type_int) ||
+ !json_contains(cd, jobj_kdf, "kdf type", type, "memory", json_type_int) ||
+ !json_contains(cd, jobj_kdf, "kdf type", type, "cpus", json_type_int) ||
+ !json_contains_string(cd, jobj_kdf, "kdf type", type, "salt"))
+ return -EINVAL;
+ }
+
+ jobj1 = json_contains_string(cd, jobj_af, "", "af section", "type");
+ if (!jobj1)
+ return -EINVAL;
+ type = json_object_get_string(jobj1);
+
+ if (!strcmp(type, "luks1")) {
+ if (!json_contains_string(cd, jobj_af, "", "luks1 af", "hash") ||
+ !json_contains(cd, jobj_af, "", "luks1 af", "stripes", json_type_int))
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ // FIXME check numbered
+ jobj1 = json_contains_string(cd, jobj_area, "", "area section", "type");
+ if (!jobj1)
+ return -EINVAL;
+ type = json_object_get_string(jobj1);
+
+ if (!strcmp(type, "raw")) {
+ if (!json_contains_string(cd, jobj_area, "area", "raw type", "encryption") ||
+ !json_contains(cd, jobj_area, "area", "raw type", "key_size", json_type_int) ||
+ !json_contains_string(cd, jobj_area, "area", "raw type", "offset") ||
+ !json_contains_string(cd, jobj_area, "area", "raw type", "size"))
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int luks2_keyslot_update(struct crypt_device *cd,
+ int keyslot,
+ const struct luks2_keyslot_params *params)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj_keyslot;
+ int r;
+
+ log_dbg(cd, "Updating LUKS2 keyslot %d.", keyslot);
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ r = luks2_keyslot_update_json(cd, jobj_keyslot, params);
+
+ if (!r && LUKS2_check_json_size(cd, hdr)) {
+ log_dbg(cd, "Not enough space in header json area for updated keyslot %d.", keyslot);
+ r = -ENOSPC;
+ }
+
+ return r;
+}
+
+static void luks2_keyslot_repair(json_object *jobj_keyslot)
+{
+ const char *type;
+ json_object *jobj_kdf, *jobj_type;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf) ||
+ !json_object_is_type(jobj_kdf, json_type_object))
+ return;
+
+ if (!json_object_object_get_ex(jobj_kdf, "type", &jobj_type) ||
+ !json_object_is_type(jobj_type, json_type_string))
+ return;
+
+ type = json_object_get_string(jobj_type);
+
+ if (!strcmp(type, CRYPT_KDF_PBKDF2)) {
+ /* type, salt, hash, iterations only */
+ json_object_object_foreach(jobj_kdf, key, val) {
+ UNUSED(val);
+ if (!strcmp(key, "type") || !strcmp(key, "salt") ||
+ !strcmp(key, "hash") || !strcmp(key, "iterations"))
+ continue;
+ json_object_object_del(jobj_kdf, key);
+ }
+ } else if (!strcmp(type, CRYPT_KDF_ARGON2I) || !strcmp(type, CRYPT_KDF_ARGON2ID)) {
+ /* type, salt, time, memory, cpus only */
+ json_object_object_foreach(jobj_kdf, key, val) {
+ UNUSED(val);
+ if (!strcmp(key, "type") || !strcmp(key, "salt") ||
+ !strcmp(key, "time") || !strcmp(key, "memory") ||
+ !strcmp(key, "cpus"))
+ continue;
+ json_object_object_del(jobj_kdf, key);
+ }
+ }
+}
+
+const keyslot_handler luks2_keyslot = {
+ .name = "luks2",
+ .alloc = luks2_keyslot_alloc,
+ .update = luks2_keyslot_update,
+ .open = luks2_keyslot_open,
+ .store = luks2_keyslot_store,
+ .wipe = luks2_keyslot_wipe,
+ .dump = luks2_keyslot_dump,
+ .validate = luks2_keyslot_validate,
+ .repair = luks2_keyslot_repair
+};
diff --git a/lib/luks2/luks2_keyslot_reenc.c b/lib/luks2/luks2_keyslot_reenc.c
new file mode 100644
index 0000000..4291d0c
--- /dev/null
+++ b/lib/luks2/luks2_keyslot_reenc.c
@@ -0,0 +1,752 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, reencryption keyslot handler
+ *
+ * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+static int reenc_keyslot_open(struct crypt_device *cd __attribute__((unused)),
+ int keyslot __attribute__((unused)),
+ const char *password __attribute__((unused)),
+ size_t password_len __attribute__((unused)),
+ char *volume_key __attribute__((unused)),
+ size_t volume_key_len __attribute__((unused)))
+{
+ return -ENOENT;
+}
+
+static json_object *reencrypt_keyslot_area_jobj(struct crypt_device *cd,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment,
+ uint64_t area_offset,
+ uint64_t area_length)
+{
+ json_object *jobj_area = json_object_new_object();
+
+ if (!jobj_area || !params || !params->resilience)
+ return NULL;
+
+ json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(area_offset));
+ json_object_object_add(jobj_area, "size", crypt_jobj_new_uint64(area_length));
+ json_object_object_add(jobj_area, "type", json_object_new_string(params->resilience));
+
+ if (!strcmp(params->resilience, "checksum")) {
+ log_dbg(cd, "Setting reencrypt keyslot for checksum protection.");
+ json_object_object_add(jobj_area, "hash", json_object_new_string(params->hash));
+ json_object_object_add(jobj_area, "sector_size", json_object_new_int64(alignment));
+ } else if (!strcmp(params->resilience, "journal")) {
+ log_dbg(cd, "Setting reencrypt keyslot for journal protection.");
+ } else if (!strcmp(params->resilience, "none")) {
+ log_dbg(cd, "Setting reencrypt keyslot for none protection.");
+ } else if (!strcmp(params->resilience, "datashift")) {
+ log_dbg(cd, "Setting reencrypt keyslot for datashift protection.");
+ json_object_object_add(jobj_area, "shift_size",
+ crypt_jobj_new_uint64(params->data_shift << SECTOR_SHIFT));
+ } else if (!strcmp(params->resilience, "datashift-checksum")) {
+ log_dbg(cd, "Setting reencrypt keyslot for datashift and checksum protection.");
+ json_object_object_add(jobj_area, "hash", json_object_new_string(params->hash));
+ json_object_object_add(jobj_area, "sector_size", json_object_new_int64(alignment));
+ json_object_object_add(jobj_area, "shift_size",
+ crypt_jobj_new_uint64(params->data_shift << SECTOR_SHIFT));
+ } else if (!strcmp(params->resilience, "datashift-journal")) {
+ log_dbg(cd, "Setting reencrypt keyslot for datashift and journal protection.");
+ json_object_object_add(jobj_area, "shift_size",
+ crypt_jobj_new_uint64(params->data_shift << SECTOR_SHIFT));
+ } else {
+ json_object_put(jobj_area);
+ return NULL;
+ }
+
+ return jobj_area;
+}
+
+static json_object *reencrypt_keyslot_area_jobj_update_block_size(struct crypt_device *cd,
+ json_object *jobj_area, size_t alignment)
+{
+ json_object *jobj_type, *jobj_area_new = NULL;
+
+ if (!jobj_area ||
+ !json_object_object_get_ex(jobj_area, "type", &jobj_type) ||
+ (strcmp(json_object_get_string(jobj_type), "checksum") &&
+ strcmp(json_object_get_string(jobj_type), "datashift-checksum")))
+ return NULL;
+
+ if (json_object_copy(jobj_area, &jobj_area_new))
+ return NULL;
+
+ log_dbg(cd, "Updating reencrypt resilience checksum block size.");
+
+ json_object_object_add(jobj_area_new, "sector_size", json_object_new_int64(alignment));
+
+ return jobj_area_new;
+}
+
+static int reenc_keyslot_alloc(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment)
+{
+ int r;
+ json_object *jobj_keyslots, *jobj_keyslot, *jobj_area;
+ uint64_t area_offset, area_length;
+
+ log_dbg(cd, "Allocating reencrypt keyslot %d.", keyslot);
+
+ if (!params || !params->resilience || params->direction > CRYPT_REENCRYPT_BACKWARD)
+ return -EINVAL;
+
+ if (keyslot < 0 || keyslot >= LUKS2_KEYSLOTS_MAX)
+ return -ENOMEM;
+
+ if (!json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots))
+ return -EINVAL;
+
+ /* only plain datashift resilience mode does not require additional storage */
+ if (!strcmp(params->resilience, "datashift"))
+ r = LUKS2_find_area_gap(cd, hdr, 1, &area_offset, &area_length);
+ else
+ r = LUKS2_find_area_max_gap(cd, hdr, &area_offset, &area_length);
+ if (r < 0)
+ return r;
+
+ jobj_area = reencrypt_keyslot_area_jobj(cd, params, alignment, area_offset, area_length);
+ if (!jobj_area)
+ return -EINVAL;
+
+ jobj_keyslot = json_object_new_object();
+ if (!jobj_keyslot) {
+ json_object_put(jobj_area);
+ return -ENOMEM;
+ }
+ json_object_object_add(jobj_keyslot, "area", jobj_area);
+
+ json_object_object_add(jobj_keyslot, "type", json_object_new_string("reencrypt"));
+ json_object_object_add(jobj_keyslot, "key_size", json_object_new_int(1)); /* useless but mandatory */
+ json_object_object_add(jobj_keyslot, "mode", json_object_new_string(crypt_reencrypt_mode_to_str(params->mode)));
+ if (params->direction == CRYPT_REENCRYPT_FORWARD)
+ json_object_object_add(jobj_keyslot, "direction", json_object_new_string("forward"));
+ else
+ json_object_object_add(jobj_keyslot, "direction", json_object_new_string("backward"));
+
+ json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot);
+ if (LUKS2_check_json_size(cd, hdr)) {
+ log_dbg(cd, "New keyslot too large to fit in free metadata space.");
+ json_object_object_del_by_uint(jobj_keyslots, keyslot);
+ return -ENOSPC;
+ }
+
+ JSON_DBG(cd, hdr->jobj, "JSON:");
+
+ return 0;
+}
+
+static int reenc_keyslot_store_data(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ const void *buffer, size_t buffer_len)
+{
+ int devfd, r;
+ json_object *jobj_area, *jobj_offset, *jobj_length;
+ uint64_t area_offset, area_length;
+ struct device *device = crypt_metadata_device(cd);
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area) ||
+ !json_object_object_get_ex(jobj_area, "offset", &jobj_offset) ||
+ !json_object_object_get_ex(jobj_area, "size", &jobj_length))
+ return -EINVAL;
+
+ area_offset = crypt_jobj_get_uint64(jobj_offset);
+ area_length = crypt_jobj_get_uint64(jobj_length);
+
+ if (!area_offset || !area_length || ((uint64_t)buffer_len > area_length))
+ return -EINVAL;
+
+ devfd = device_open_locked(cd, device, O_RDWR);
+ if (devfd >= 0) {
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), CONST_CAST(void *)buffer,
+ buffer_len, area_offset) < 0)
+ r = -EIO;
+ else
+ r = 0;
+ } else
+ r = -EINVAL;
+
+ if (r)
+ log_err(cd, _("IO error while encrypting keyslot."));
+
+ return r;
+}
+
+static int reenc_keyslot_store(struct crypt_device *cd,
+ int keyslot,
+ const char *password __attribute__((unused)),
+ size_t password_len __attribute__((unused)),
+ const char *buffer,
+ size_t buffer_len)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj_keyslot;
+ int r = 0;
+
+ if (!cd || !buffer || !buffer_len)
+ return -EINVAL;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ log_dbg(cd, "Reencrypt keyslot %d store.", keyslot);
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
+ if (r)
+ return r;
+
+ r = reenc_keyslot_store_data(cd, jobj_keyslot, buffer, buffer_len);
+ if (r < 0) {
+ device_write_unlock(cd, crypt_metadata_device(cd));
+ return r;
+ }
+
+ r = LUKS2_hdr_write(cd, hdr);
+
+ device_write_unlock(cd, crypt_metadata_device(cd));
+
+ return r < 0 ? r : keyslot;
+}
+
+static int reenc_keyslot_wipe(struct crypt_device *cd,
+ int keyslot)
+{
+ struct luks2_hdr *hdr;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ /* remove reencryption verification data */
+ LUKS2_digest_assign(cd, hdr, keyslot, CRYPT_ANY_DIGEST, 0, 0);
+
+ return 0;
+}
+
+static int reenc_keyslot_dump(struct crypt_device *cd, int keyslot)
+{
+ json_object *jobj_keyslot, *jobj_area, *jobj_direction, *jobj_mode, *jobj_resilience,
+ *jobj1;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(crypt_get_hdr(cd, CRYPT_LUKS2), keyslot);
+ if (!jobj_keyslot)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "direction", &jobj_direction) ||
+ !json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode) ||
+ !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area) ||
+ !json_object_object_get_ex(jobj_area, "type", &jobj_resilience))
+ return -EINVAL;
+
+ log_std(cd, "\t%-12s%s\n", "Mode:", json_object_get_string(jobj_mode));
+ log_std(cd, "\t%-12s%s\n", "Direction:", json_object_get_string(jobj_direction));
+ log_std(cd, "\t%-12s%s\n", "Resilience:", json_object_get_string(jobj_resilience));
+
+ if (!strcmp(json_object_get_string(jobj_resilience), "checksum")) {
+ json_object_object_get_ex(jobj_area, "hash", &jobj1);
+ log_std(cd, "\t%-12s%s\n", "Hash:", json_object_get_string(jobj1));
+ json_object_object_get_ex(jobj_area, "sector_size", &jobj1);
+ log_std(cd, "\t%-12s%d [bytes]\n", "Hash data:", json_object_get_int(jobj1));
+ } else if (!strcmp(json_object_get_string(jobj_resilience), "datashift")) {
+ json_object_object_get_ex(jobj_area, "shift_size", &jobj1);
+ log_std(cd, "\t%-12s%" PRIu64 "[bytes]\n", "Shift size:", crypt_jobj_get_uint64(jobj1));
+ }
+
+ json_object_object_get_ex(jobj_area, "offset", &jobj1);
+ log_std(cd, "\tArea offset:%" PRIu64 " [bytes]\n", crypt_jobj_get_uint64(jobj1));
+
+ json_object_object_get_ex(jobj_area, "size", &jobj1);
+ log_std(cd, "\tArea length:%" PRIu64 " [bytes]\n", crypt_jobj_get_uint64(jobj1));
+
+ return 0;
+}
+
+static int reenc_keyslot_validate(struct crypt_device *cd, json_object *jobj_keyslot)
+{
+ json_object *jobj_mode, *jobj_area, *jobj_type, *jobj_shift_size, *jobj_hash,
+ *jobj_sector_size, *jobj_direction, *jobj_key_size;
+ const char *mode, *type, *direction;
+ uint32_t sector_size;
+ uint64_t shift_size;
+
+ /* mode (string: encrypt,reencrypt,decrypt)
+ * direction (string:)
+ * area {
+ * type: (string: datashift, journal, checksum, none, datashift-journal, datashift-checksum)
+ * hash: (string: checksum and datashift-checksum types)
+ * sector_size (uint32: checksum and datashift-checksum types)
+ * shift_size (uint64: all datashift based types)
+ * }
+ */
+
+ /* area and area type are validated in general validation code */
+ if (!jobj_keyslot || !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area) ||
+ !json_object_object_get_ex(jobj_area, "type", &jobj_type))
+ return -EINVAL;
+
+ jobj_key_size = json_contains(cd, jobj_keyslot, "", "reencrypt keyslot", "key_size", json_type_int);
+ jobj_mode = json_contains_string(cd, jobj_keyslot, "", "reencrypt keyslot", "mode");
+ jobj_direction = json_contains_string(cd, jobj_keyslot, "", "reencrypt keyslot", "direction");
+
+ if (!jobj_mode || !jobj_direction || !jobj_key_size)
+ return -EINVAL;
+
+ if (!validate_json_uint32(jobj_key_size) || crypt_jobj_get_uint32(jobj_key_size) != 1) {
+ log_dbg(cd, "Illegal reencrypt key size.");
+ return -EINVAL;
+ }
+
+ mode = json_object_get_string(jobj_mode);
+ type = json_object_get_string(jobj_type);
+ direction = json_object_get_string(jobj_direction);
+
+ if (strcmp(mode, "reencrypt") && strcmp(mode, "encrypt") &&
+ strcmp(mode, "decrypt")) {
+ log_dbg(cd, "Illegal reencrypt mode %s.", mode);
+ return -EINVAL;
+ }
+
+ if (strcmp(direction, "forward") && strcmp(direction, "backward")) {
+ log_dbg(cd, "Illegal reencrypt direction %s.", direction);
+ return -EINVAL;
+ }
+
+ if (!strcmp(type, "checksum") || !strcmp(type, "datashift-checksum")) {
+ jobj_hash = json_contains_string(cd, jobj_area, "type:checksum",
+ "Keyslot area", "hash");
+ jobj_sector_size = json_contains(cd, jobj_area, "type:checksum",
+ "Keyslot area", "sector_size", json_type_int);
+ if (!jobj_hash || !jobj_sector_size)
+ return -EINVAL;
+ if (!validate_json_uint32(jobj_sector_size))
+ return -EINVAL;
+ sector_size = crypt_jobj_get_uint32(jobj_sector_size);
+ if (sector_size < SECTOR_SIZE || NOTPOW2(sector_size)) {
+ log_dbg(cd, "Invalid sector_size (%" PRIu32 ") for checksum resilience mode.",
+ sector_size);
+ return -EINVAL;
+ }
+ } else if (!strcmp(type, "datashift") ||
+ !strcmp(type, "datashift-checksum") ||
+ !strcmp(type, "datashift-journal")) {
+ if (!(jobj_shift_size = json_contains_string(cd, jobj_area, "type:datashift",
+ "Keyslot area", "shift_size")))
+ return -EINVAL;
+
+ shift_size = crypt_jobj_get_uint64(jobj_shift_size);
+ if (!shift_size)
+ return -EINVAL;
+
+ if (MISALIGNED_512(shift_size)) {
+ log_dbg(cd, "Shift size field has to be aligned to 512 bytes.");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int reenc_keyslot_update_needed(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment)
+{
+ const char *type;
+ json_object *jobj_area, *jobj_type, *jobj;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area) ||
+ !json_object_object_get_ex(jobj_area, "type", &jobj_type) ||
+ !(type = json_object_get_string(jobj_type)))
+ return -EINVAL;
+
+ /*
+ * If no resilience mode change is requested and effective
+ * resilience mode is 'checksum' then check alignment matches
+ * stored checksum block size.
+ */
+ if (!params || !params->resilience) {
+ if (!strcmp(json_object_get_string(jobj_type), "checksum") ||
+ !strcmp(json_object_get_string(jobj_type), "datashift-checksum"))
+ return (json_object_object_get_ex(jobj_area, "sector_size", &jobj) ||
+ alignment != crypt_jobj_get_uint32(jobj));
+ return 0;
+ }
+
+ if (strcmp(params->resilience, type))
+ return 1;
+
+ if (!strcmp(type, "checksum") ||
+ !strcmp(type, "datashift-checksum")) {
+ if (!params->hash)
+ return -EINVAL;
+ if (!json_object_object_get_ex(jobj_area, "hash", &jobj) ||
+ strcmp(json_object_get_string(jobj), params->hash) ||
+ !json_object_object_get_ex(jobj_area, "sector_size", &jobj) ||
+ crypt_jobj_get_uint32(jobj) != alignment)
+ return 1;
+ }
+
+ if (!strncmp(type, "datashift", 9)) {
+ if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj))
+ return -EINVAL;
+ if ((params->data_shift << SECTOR_SHIFT) != crypt_jobj_get_uint64(jobj))
+ return 1;
+ }
+
+ /* nothing to compare with 'none' and 'journal' */
+ return 0;
+}
+
+static int load_checksum_protection(struct crypt_device *cd,
+ json_object *jobj_area,
+ uint64_t area_length,
+ struct reenc_protection *rp)
+{
+ int r;
+ json_object *jobj_hash, *jobj_block_size;
+
+ if (!jobj_area || !rp ||
+ !json_object_object_get_ex(jobj_area, "hash", &jobj_hash) ||
+ !json_object_object_get_ex(jobj_area, "sector_size", &jobj_block_size))
+ return -EINVAL;
+
+ r = snprintf(rp->p.csum.hash, sizeof(rp->p.csum.hash), "%s", json_object_get_string(jobj_hash));
+ if (r < 0 || (size_t)r >= sizeof(rp->p.csum.hash))
+ return -EINVAL;
+
+ if (crypt_hash_init(&rp->p.csum.ch, rp->p.csum.hash)) {
+ log_err(cd, _("Hash algorithm %s is not available."), rp->p.csum.hash);
+ return -EINVAL;
+ }
+
+ r = crypt_hash_size(rp->p.csum.hash);
+ if (r <= 0) {
+ crypt_hash_destroy(rp->p.csum.ch);
+ rp->p.csum.ch = NULL;
+ log_dbg(cd, "Invalid hash size");
+ return -EINVAL;
+ }
+
+ rp->p.csum.hash_size = r;
+ rp->p.csum.block_size = crypt_jobj_get_uint32(jobj_block_size);
+ rp->p.csum.checksums_len = area_length;
+
+ rp->type = REENC_PROTECTION_CHECKSUM;
+ return 0;
+}
+
+static int reenc_keyslot_load_resilience_primary(struct crypt_device *cd,
+ const char *type,
+ json_object *jobj_area,
+ uint64_t area_length,
+ struct reenc_protection *rp)
+{
+ json_object *jobj;
+
+ if (!strcmp(type, "checksum")) {
+ log_dbg(cd, "Initializing checksum resilience mode.");
+ return load_checksum_protection(cd, jobj_area, area_length, rp);
+ } else if (!strcmp(type, "journal")) {
+ log_dbg(cd, "Initializing journal resilience mode.");
+ rp->type = REENC_PROTECTION_JOURNAL;
+ } else if (!strcmp(type, "none")) {
+ log_dbg(cd, "Initializing none resilience mode.");
+ rp->type = REENC_PROTECTION_NONE;
+ } else if (!strcmp(type, "datashift") ||
+ !strcmp(type, "datashift-checksum") ||
+ !strcmp(type, "datashift-journal")) {
+ log_dbg(cd, "Initializing datashift resilience mode.");
+ if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj))
+ return -EINVAL;
+ rp->type = REENC_PROTECTION_DATASHIFT;
+ rp->p.ds.data_shift = crypt_jobj_get_uint64(jobj);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int reenc_keyslot_load_resilience_secondary(struct crypt_device *cd,
+ const char *type,
+ json_object *jobj_area,
+ uint64_t area_length,
+ struct reenc_protection *rp)
+{
+ if (!strcmp(type, "datashift-checksum")) {
+ log_dbg(cd, "Initializing checksum resilience mode.");
+ return load_checksum_protection(cd, jobj_area, area_length, rp);
+ } else if (!strcmp(type, "datashift-journal")) {
+ log_dbg(cd, "Initializing journal resilience mode.");
+ rp->type = REENC_PROTECTION_JOURNAL;
+ } else
+ rp->type = REENC_PROTECTION_NOT_SET;
+
+ return 0;
+}
+
+static int reenc_keyslot_load_resilience(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ struct reenc_protection *rp,
+ bool primary)
+{
+ const char *type;
+ int r;
+ json_object *jobj_area, *jobj_type;
+ uint64_t dummy, area_length;
+
+ if (!rp || !json_object_object_get_ex(jobj_keyslot, "area", &jobj_area) ||
+ !json_object_object_get_ex(jobj_area, "type", &jobj_type))
+ return -EINVAL;
+
+ r = LUKS2_keyslot_jobj_area(jobj_keyslot, &dummy, &area_length);
+ if (r < 0)
+ return r;
+
+ type = json_object_get_string(jobj_type);
+ if (!type)
+ return -EINVAL;
+
+ if (primary)
+ return reenc_keyslot_load_resilience_primary(cd, type, jobj_area, area_length, rp);
+ else
+ return reenc_keyslot_load_resilience_secondary(cd, type, jobj_area, area_length, rp);
+}
+
+static bool reenc_keyslot_update_is_valid(struct crypt_device *cd,
+ json_object *jobj_area,
+ const struct crypt_params_reencrypt *params)
+{
+ const char *type;
+ json_object *jobj_type, *jobj;
+
+ if (!json_object_object_get_ex(jobj_area, "type", &jobj_type) ||
+ !(type = json_object_get_string(jobj_type)))
+ return false;
+
+ /* do not allow switch to/away from datashift resilience type */
+ if ((strcmp(params->resilience, "datashift") && !strcmp(type, "datashift")) ||
+ (!strcmp(params->resilience, "datashift") && strcmp(type, "datashift")))
+ return false;
+
+ /* do not allow switch to/away from datashift- resilience subvariants */
+ if ((strncmp(params->resilience, "datashift-", 10) &&
+ !strncmp(type, "datashift-", 10)) ||
+ (!strncmp(params->resilience, "datashift-", 10) &&
+ strncmp(type, "datashift-", 10)))
+ return false;
+
+ /* datashift value is also immutable */
+ if (!strncmp(type, "datashift", 9)) {
+ if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj))
+ return false;
+ return (params->data_shift << SECTOR_SHIFT) == crypt_jobj_get_uint64(jobj);
+ }
+
+ return true;
+}
+
+static int reenc_keyslot_update(struct crypt_device *cd,
+ json_object *jobj_keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment)
+{
+ int r;
+ json_object *jobj_area, *jobj_area_new;
+ uint64_t area_offset, area_length;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+
+ r = LUKS2_keyslot_jobj_area(jobj_keyslot, &area_offset, &area_length);
+ if (r < 0)
+ return r;
+
+ if (!params || !params->resilience)
+ jobj_area_new = reencrypt_keyslot_area_jobj_update_block_size(cd, jobj_area, alignment);
+ else {
+ if (!reenc_keyslot_update_is_valid(cd, jobj_area, params)) {
+ log_err(cd, _("Invalid reencryption resilience mode change requested."));
+ return -EINVAL;
+ }
+
+ jobj_area_new = reencrypt_keyslot_area_jobj(cd, params, alignment,
+ area_offset, area_length);
+ }
+
+ if (!jobj_area_new)
+ return -EINVAL;
+
+ /* increase refcount for validation purposes */
+ json_object_get(jobj_area);
+
+ json_object_object_add(jobj_keyslot, "area", jobj_area_new);
+
+ r = reenc_keyslot_validate(cd, jobj_keyslot);
+ if (r) {
+ /* replace invalid object with previous valid one */
+ json_object_object_add(jobj_keyslot, "area", jobj_area);
+ return -EINVAL;
+ }
+
+ /* previous area object is no longer needed */
+ json_object_put(jobj_area);
+
+ return 0;
+}
+
+int LUKS2_keyslot_reencrypt_allocate(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment)
+{
+ int r;
+
+ if (keyslot == CRYPT_ANY_SLOT)
+ return -EINVAL;
+
+ r = reenc_keyslot_alloc(cd, hdr, keyslot, params, alignment);
+ if (r < 0)
+ return r;
+
+ r = LUKS2_keyslot_priority_set(cd, hdr, keyslot, CRYPT_SLOT_PRIORITY_IGNORE, 0);
+ if (r < 0)
+ return r;
+
+ r = reenc_keyslot_validate(cd, LUKS2_get_keyslot_jobj(hdr, keyslot));
+ if (r) {
+ log_dbg(cd, "Keyslot validation failed.");
+ return r;
+ }
+
+ return 0;
+}
+
+int LUKS2_keyslot_reencrypt_update_needed(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment)
+{
+ int r;
+ json_object *jobj_type, *jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+
+ if (!jobj_keyslot ||
+ !json_object_object_get_ex(jobj_keyslot, "type", &jobj_type) ||
+ strcmp(json_object_get_string(jobj_type), "reencrypt"))
+ return -EINVAL;
+
+ r = reenc_keyslot_update_needed(cd, jobj_keyslot, params, alignment);
+ if (!r)
+ log_dbg(cd, "No update of reencrypt keyslot needed.");
+
+ return r;
+}
+
+int LUKS2_keyslot_reencrypt_update(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ const struct crypt_params_reencrypt *params,
+ size_t alignment,
+ struct volume_key *vks)
+{
+ int r;
+ uint8_t version;
+ uint64_t max_size, moved_segment_size;
+ json_object *jobj_type, *jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ struct reenc_protection check_rp = {};
+
+ if (!jobj_keyslot ||
+ !json_object_object_get_ex(jobj_keyslot, "type", &jobj_type) ||
+ strcmp(json_object_get_string(jobj_type), "reencrypt"))
+ return -EINVAL;
+
+ if (LUKS2_config_get_reencrypt_version(hdr, &version))
+ return -EINVAL;
+
+ /* verify existing reencryption metadata before updating */
+ r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
+ if (r < 0)
+ return r;
+
+ r = reenc_keyslot_update(cd, jobj_keyslot, params, alignment);
+ if (r < 0)
+ return r;
+
+ r = reenc_keyslot_load_resilience(cd, jobj_keyslot, &check_rp, false);
+ if (r < 0)
+ return r;
+
+ if (check_rp.type != REENC_PROTECTION_NOT_SET) {
+ r = LUKS2_reencrypt_max_hotzone_size(cd, hdr, &check_rp, keyslot, &max_size);
+ LUKS2_reencrypt_protection_erase(&check_rp);
+ if (r < 0)
+ return r;
+ moved_segment_size = json_segment_get_size(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), 0);
+ if (!moved_segment_size)
+ return -EINVAL;
+ if (moved_segment_size > max_size) {
+ log_err(cd, _("Can not update resilience type. "
+ "New type only provides %" PRIu64 " bytes, "
+ "required space is: %" PRIu64 " bytes."),
+ max_size, moved_segment_size);
+ return -EINVAL;
+ }
+ }
+
+ r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, version, vks);
+ if (r < 0)
+ log_err(cd, _("Failed to refresh reencryption verification digest."));
+
+ return r ?: LUKS2_hdr_write(cd, hdr);
+}
+
+int LUKS2_keyslot_reencrypt_load(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ struct reenc_protection *rp,
+ bool primary)
+{
+ json_object *jobj_type, *jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+
+ if (!jobj_keyslot ||
+ !json_object_object_get_ex(jobj_keyslot, "type", &jobj_type) ||
+ strcmp(json_object_get_string(jobj_type), "reencrypt"))
+ return -EINVAL;
+
+ return reenc_keyslot_load_resilience(cd, jobj_keyslot, rp, primary);
+}
+
+const keyslot_handler reenc_keyslot = {
+ .name = "reencrypt",
+ .open = reenc_keyslot_open,
+ .store = reenc_keyslot_store, /* initialization only or also per every chunk write */
+ .wipe = reenc_keyslot_wipe,
+ .dump = reenc_keyslot_dump,
+ .validate = reenc_keyslot_validate
+};
diff --git a/lib/luks2/luks2_luks1_convert.c b/lib/luks2/luks2_luks1_convert.c
new file mode 100644
index 0000000..6d3fa1e
--- /dev/null
+++ b/lib/luks2/luks2_luks1_convert.c
@@ -0,0 +1,945 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, LUKS1 conversion code
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Ondrej Kozina
+ * Copyright (C) 2015-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+#include "../luks1/luks.h"
+#include "../luks1/af.h"
+
+/* This differs from LUKS_check_cipher() that it does not check dm-crypt fallback. */
+int LUKS2_check_cipher(struct crypt_device *cd,
+ size_t keylength,
+ const char *cipher,
+ const char *cipher_mode)
+{
+ int r;
+ struct crypt_storage *s;
+ char buf[SECTOR_SIZE], *empty_key;
+
+ log_dbg(cd, "Checking if cipher %s-%s is usable (storage wrapper).", cipher, cipher_mode);
+
+ empty_key = crypt_safe_alloc(keylength);
+ if (!empty_key)
+ return -ENOMEM;
+
+ /* No need to get KEY quality random but it must avoid known weak keys. */
+ r = crypt_random_get(cd, empty_key, keylength, CRYPT_RND_NORMAL);
+ if (r < 0)
+ goto out;
+
+ r = crypt_storage_init(&s, SECTOR_SIZE, cipher, cipher_mode, empty_key, keylength, false);
+ if (r < 0)
+ goto out;
+
+ memset(buf, 0, sizeof(buf));
+ r = crypt_storage_decrypt(s, 0, sizeof(buf), buf);
+ crypt_storage_destroy(s);
+out:
+ crypt_safe_free(empty_key);
+ crypt_safe_memzero(buf, sizeof(buf));
+ return r;
+}
+
+static int json_luks1_keyslot(const struct luks_phdr *hdr_v1, int keyslot, struct json_object **keyslot_object)
+{
+ char *base64_str, cipher[LUKS_CIPHERNAME_L+LUKS_CIPHERMODE_L];
+ size_t base64_len;
+ struct json_object *keyslot_obj, *field, *jobj_kdf, *jobj_af, *jobj_area;
+ uint64_t offset, area_size, length;
+ int r;
+
+ keyslot_obj = json_object_new_object();
+ json_object_object_add(keyslot_obj, "type", json_object_new_string("luks2"));
+ json_object_object_add(keyslot_obj, "key_size", json_object_new_int64(hdr_v1->keyBytes));
+
+ /* KDF */
+ jobj_kdf = json_object_new_object();
+ json_object_object_add(jobj_kdf, "type", json_object_new_string(CRYPT_KDF_PBKDF2));
+ json_object_object_add(jobj_kdf, "hash", json_object_new_string(hdr_v1->hashSpec));
+ json_object_object_add(jobj_kdf, "iterations", json_object_new_int64(hdr_v1->keyblock[keyslot].passwordIterations));
+ /* salt field */
+ r = crypt_base64_encode(&base64_str, &base64_len, hdr_v1->keyblock[keyslot].passwordSalt, LUKS_SALTSIZE);
+ if (r < 0) {
+ json_object_put(keyslot_obj);
+ json_object_put(jobj_kdf);
+ return r;
+ }
+ field = json_object_new_string_len(base64_str, base64_len);
+ free(base64_str);
+ json_object_object_add(jobj_kdf, "salt", field);
+ json_object_object_add(keyslot_obj, "kdf", jobj_kdf);
+
+ /* AF */
+ jobj_af = json_object_new_object();
+ json_object_object_add(jobj_af, "type", json_object_new_string("luks1"));
+ json_object_object_add(jobj_af, "hash", json_object_new_string(hdr_v1->hashSpec));
+ /* stripes field ignored, fixed to LUKS_STRIPES (4000) */
+ json_object_object_add(jobj_af, "stripes", json_object_new_int(LUKS_STRIPES));
+ json_object_object_add(keyslot_obj, "af", jobj_af);
+
+ /* Area */
+ jobj_area = json_object_new_object();
+ json_object_object_add(jobj_area, "type", json_object_new_string("raw"));
+
+ /* encryption algorithm field */
+ if (*hdr_v1->cipherMode != '\0') {
+ if (snprintf(cipher, sizeof(cipher), "%s-%s", hdr_v1->cipherName, hdr_v1->cipherMode) < 0) {
+ json_object_put(keyslot_obj);
+ json_object_put(jobj_area);
+ return -EINVAL;
+ }
+ json_object_object_add(jobj_area, "encryption", json_object_new_string(cipher));
+ } else
+ json_object_object_add(jobj_area, "encryption", json_object_new_string(hdr_v1->cipherName));
+
+ /* area */
+ if (LUKS_keyslot_area(hdr_v1, keyslot, &offset, &length)) {
+ json_object_put(keyslot_obj);
+ json_object_put(jobj_area);
+ return -EINVAL;
+ }
+ area_size = size_round_up(length, 4096);
+ json_object_object_add(jobj_area, "key_size", json_object_new_int(hdr_v1->keyBytes));
+ json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(offset));
+ json_object_object_add(jobj_area, "size", crypt_jobj_new_uint64(area_size));
+ json_object_object_add(keyslot_obj, "area", jobj_area);
+
+ *keyslot_object = keyslot_obj;
+ return 0;
+}
+
+static int json_luks1_keyslots(const struct luks_phdr *hdr_v1, struct json_object **keyslots_object)
+{
+ int keyslot, r;
+ struct json_object *keyslot_obj, *field;
+
+ keyslot_obj = json_object_new_object();
+ if (!keyslot_obj)
+ return -ENOMEM;
+
+ for (keyslot = 0; keyslot < LUKS_NUMKEYS; keyslot++) {
+ if (hdr_v1->keyblock[keyslot].active != LUKS_KEY_ENABLED)
+ continue;
+ r = json_luks1_keyslot(hdr_v1, keyslot, &field);
+ if (r) {
+ json_object_put(keyslot_obj);
+ return r;
+ }
+ json_object_object_add_by_uint(keyslot_obj, keyslot, field);
+ }
+
+ *keyslots_object = keyslot_obj;
+ return 0;
+}
+
+static int json_luks1_segment(const struct luks_phdr *hdr_v1, struct json_object **segment_object)
+{
+ const char *c;
+ char cipher[LUKS_CIPHERNAME_L+LUKS_CIPHERMODE_L];
+ struct json_object *segment_obj, *field;
+ uint64_t number;
+
+ segment_obj = json_object_new_object();
+ if (!segment_obj)
+ return -ENOMEM;
+
+ /* type field */
+ field = json_object_new_string("crypt");
+ if (!field) {
+ json_object_put(segment_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(segment_obj, "type", field);
+
+ /* offset field */
+ number = (uint64_t)hdr_v1->payloadOffset * SECTOR_SIZE;
+
+ field = crypt_jobj_new_uint64(number);
+ if (!field) {
+ json_object_put(segment_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(segment_obj, "offset", field);
+
+ /* iv_tweak field */
+ field = json_object_new_string("0");
+ if (!field) {
+ json_object_put(segment_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(segment_obj, "iv_tweak", field);
+
+ /* length field */
+ field = json_object_new_string("dynamic");
+ if (!field) {
+ json_object_put(segment_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(segment_obj, "size", field);
+
+ /* cipher field */
+ if (*hdr_v1->cipherMode != '\0') {
+ if (snprintf(cipher, sizeof(cipher), "%s-%s", hdr_v1->cipherName, hdr_v1->cipherMode) < 0) {
+ json_object_put(segment_obj);
+ return -EINVAL;
+ }
+ c = cipher;
+ } else
+ c = hdr_v1->cipherName;
+
+ field = json_object_new_string(c);
+ if (!field) {
+ json_object_put(segment_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(segment_obj, "encryption", field);
+
+ /* block field */
+ field = json_object_new_int(SECTOR_SIZE);
+ if (!field) {
+ json_object_put(segment_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(segment_obj, "sector_size", field);
+
+ *segment_object = segment_obj;
+ return 0;
+}
+
+static int json_luks1_segments(const struct luks_phdr *hdr_v1, struct json_object **segments_object)
+{
+ int r;
+ struct json_object *segments_obj, *field;
+
+ segments_obj = json_object_new_object();
+ if (!segments_obj)
+ return -ENOMEM;
+
+ r = json_luks1_segment(hdr_v1, &field);
+ if (r) {
+ json_object_put(segments_obj);
+ return r;
+ }
+ json_object_object_add_by_uint(segments_obj, 0, field);
+
+ *segments_object = segments_obj;
+ return 0;
+}
+
+static int json_luks1_digest(const struct luks_phdr *hdr_v1, struct json_object **digest_object)
+{
+ char keyslot_str[16], *base64_str;
+ int r, ks;
+ size_t base64_len;
+ struct json_object *digest_obj, *array, *field;
+
+ digest_obj = json_object_new_object();
+ if (!digest_obj)
+ return -ENOMEM;
+
+ /* type field */
+ field = json_object_new_string("pbkdf2");
+ if (!field) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "type", field);
+
+ /* keyslots array */
+ array = json_object_new_array();
+ if (!array) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "keyslots", json_object_get(array));
+
+ for (ks = 0; ks < LUKS_NUMKEYS; ks++) {
+ if (hdr_v1->keyblock[ks].active != LUKS_KEY_ENABLED)
+ continue;
+ if (snprintf(keyslot_str, sizeof(keyslot_str), "%d", ks) < 0) {
+ json_object_put(field);
+ json_object_put(array);
+ json_object_put(digest_obj);
+ return -EINVAL;
+ }
+
+ field = json_object_new_string(keyslot_str);
+ if (!field || json_object_array_add(array, field) < 0) {
+ json_object_put(field);
+ json_object_put(array);
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ }
+
+ json_object_put(array);
+
+ /* segments array */
+ array = json_object_new_array();
+ if (!array) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "segments", json_object_get(array));
+
+ field = json_object_new_string("0");
+ if (!field || json_object_array_add(array, field) < 0) {
+ json_object_put(field);
+ json_object_put(array);
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+
+ json_object_put(array);
+
+ /* hash field */
+ field = json_object_new_string(hdr_v1->hashSpec);
+ if (!field) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "hash", field);
+
+ /* salt field */
+ r = crypt_base64_encode(&base64_str, &base64_len, hdr_v1->mkDigestSalt, LUKS_SALTSIZE);
+ if (r < 0) {
+ json_object_put(digest_obj);
+ return r;
+ }
+
+ field = json_object_new_string_len(base64_str, base64_len);
+ free(base64_str);
+ if (!field) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "salt", field);
+
+ /* digest field */
+ r = crypt_base64_encode(&base64_str, &base64_len, hdr_v1->mkDigest, LUKS_DIGESTSIZE);
+ if (r < 0) {
+ json_object_put(digest_obj);
+ return r;
+ }
+
+ field = json_object_new_string_len(base64_str, base64_len);
+ free(base64_str);
+ if (!field) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "digest", field);
+
+ /* iterations field */
+ field = json_object_new_int64(hdr_v1->mkDigestIterations);
+ if (!field) {
+ json_object_put(digest_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(digest_obj, "iterations", field);
+
+ *digest_object = digest_obj;
+ return 0;
+}
+
+static int json_luks1_digests(const struct luks_phdr *hdr_v1, struct json_object **digests_object)
+{
+ int r;
+ struct json_object *digests_obj, *field;
+
+ digests_obj = json_object_new_object();
+ if (!digests_obj)
+ return -ENOMEM;
+
+ r = json_luks1_digest(hdr_v1, &field);
+ if (r) {
+ json_object_put(digests_obj);
+ return r;
+ }
+ json_object_object_add(digests_obj, "0", field);
+
+ *digests_object = digests_obj;
+ return 0;
+}
+
+static int json_luks1_object(struct luks_phdr *hdr_v1, struct json_object **luks1_object, uint64_t keyslots_size)
+{
+ int r;
+ struct json_object *luks1_obj, *field;
+ uint64_t json_size;
+
+ luks1_obj = json_object_new_object();
+ if (!luks1_obj)
+ return -ENOMEM;
+
+ /* keyslots field */
+ r = json_luks1_keyslots(hdr_v1, &field);
+ if (r) {
+ json_object_put(luks1_obj);
+ return r;
+ }
+ json_object_object_add(luks1_obj, "keyslots", field);
+
+ /* tokens field */
+ field = json_object_new_object();
+ if (!field) {
+ json_object_put(luks1_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(luks1_obj, "tokens", field);
+
+ /* segments field */
+ r = json_luks1_segments(hdr_v1, &field);
+ if (r) {
+ json_object_put(luks1_obj);
+ return r;
+ }
+ json_object_object_add(luks1_obj, "segments", field);
+
+ /* digests field */
+ r = json_luks1_digests(hdr_v1, &field);
+ if (r) {
+ json_object_put(luks1_obj);
+ return r;
+ }
+ json_object_object_add(luks1_obj, "digests", field);
+
+ /* config field */
+ /* anything else? */
+ field = json_object_new_object();
+ if (!field) {
+ json_object_put(luks1_obj);
+ return -ENOMEM;
+ }
+ json_object_object_add(luks1_obj, "config", field);
+
+ json_size = LUKS2_HDR_16K_LEN - LUKS2_HDR_BIN_LEN;
+ json_object_object_add(field, "json_size", crypt_jobj_new_uint64(json_size));
+ keyslots_size -= (keyslots_size % 4096);
+ json_object_object_add(field, "keyslots_size", crypt_jobj_new_uint64(keyslots_size));
+
+ *luks1_object = luks1_obj;
+ return 0;
+}
+
+static void move_keyslot_offset(json_object *jobj, int offset_add)
+{
+ json_object *jobj1, *jobj2, *jobj_area;
+ uint64_t offset = 0;
+
+ json_object_object_get_ex(jobj, "keyslots", &jobj1);
+ json_object_object_foreach(jobj1, key, val) {
+ UNUSED(key);
+ json_object_object_get_ex(val, "area", &jobj_area);
+ json_object_object_get_ex(jobj_area, "offset", &jobj2);
+ offset = crypt_jobj_get_uint64(jobj2) + offset_add;
+ json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(offset));
+ }
+}
+
+static int move_keyslot_areas(struct crypt_device *cd, off_t offset_from,
+ off_t offset_to, size_t buf_size)
+{
+ int devfd, r = -EIO;
+ struct device *device = crypt_metadata_device(cd);
+ void *buf = NULL;
+
+ log_dbg(cd, "Moving keyslot areas of size %zu from %jd to %jd.",
+ buf_size, (intmax_t)offset_from, (intmax_t)offset_to);
+
+ if (posix_memalign(&buf, crypt_getpagesize(), buf_size))
+ return -ENOMEM;
+
+ devfd = device_open(cd, device, O_RDWR);
+ if (devfd < 0) {
+ free(buf);
+ return -EIO;
+ }
+
+ /* This can safely fail (for block devices). It only allocates space if it is possible. */
+ if (posix_fallocate(devfd, offset_to, buf_size))
+ log_dbg(cd, "Preallocation (fallocate) of new keyslot area not available.");
+
+ /* Try to read *new* area to check that area is there (trimmed backup). */
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), buf, buf_size,
+ offset_to)!= (ssize_t)buf_size)
+ goto out;
+
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), buf, buf_size,
+ offset_from)!= (ssize_t)buf_size)
+ goto out;
+
+ if (write_lseek_blockwise(devfd, device_block_size(cd, device),
+ device_alignment(device), buf, buf_size,
+ offset_to) != (ssize_t)buf_size)
+ goto out;
+
+ r = 0;
+out:
+ device_sync(cd, device);
+ crypt_safe_memzero(buf, buf_size);
+ free(buf);
+
+ return r;
+}
+
+static int luks_header_in_use(struct crypt_device *cd)
+{
+ int r;
+
+ r = lookup_dm_dev_by_uuid(cd, crypt_get_uuid(cd), crypt_get_type(cd));
+ if (r < 0)
+ log_err(cd, _("Cannot check status of device with uuid: %s."), crypt_get_uuid(cd));
+
+ return r;
+}
+
+/* Check if there is a luksmeta area (foreign metadata created by the luksmeta package) */
+static int luksmeta_header_present(struct crypt_device *cd, off_t luks1_size)
+{
+ int devfd, r = 0;
+ static const uint8_t LM_MAGIC[] = { 'L', 'U', 'K', 'S', 'M', 'E', 'T', 'A' };
+ struct device *device = crypt_metadata_device(cd);
+ void *buf = NULL;
+
+ if (posix_memalign(&buf, crypt_getpagesize(), sizeof(LM_MAGIC)))
+ return -ENOMEM;
+
+ devfd = device_open(cd, device, O_RDONLY);
+ if (devfd < 0) {
+ free(buf);
+ return -EIO;
+ }
+
+ /* Note: we must not detect failure as problem here, header can be trimmed. */
+ if (read_lseek_blockwise(devfd, device_block_size(cd, device), device_alignment(device),
+ buf, sizeof(LM_MAGIC), luks1_size) == (ssize_t)sizeof(LM_MAGIC) &&
+ !memcmp(LM_MAGIC, buf, sizeof(LM_MAGIC))) {
+ log_err(cd, _("Unable to convert header with LUKSMETA additional metadata."));
+ r = -EBUSY;
+ }
+
+ free(buf);
+ return r;
+}
+
+/* Convert LUKS1 -> LUKS2 */
+int LUKS2_luks1_to_luks2(struct crypt_device *cd, struct luks_phdr *hdr1, struct luks2_hdr *hdr2)
+{
+ int r;
+ json_object *jobj = NULL;
+ size_t buf_size, buf_offset, luks1_size, luks1_shift = 2 * LUKS2_HDR_16K_LEN - LUKS_ALIGN_KEYSLOTS;
+ uint64_t required_size, max_size = crypt_get_data_offset(cd) * SECTOR_SIZE;
+
+ /* for detached headers max size == device size */
+ if (!max_size && (r = device_size(crypt_metadata_device(cd), &max_size)))
+ return r;
+
+ luks1_size = LUKS_device_sectors(hdr1) << SECTOR_SHIFT;
+ luks1_size = size_round_up(luks1_size, LUKS_ALIGN_KEYSLOTS);
+ if (!luks1_size)
+ return -EINVAL;
+
+ if (LUKS_keyslots_offset(hdr1) != (LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE)) {
+ log_dbg(cd, "Unsupported keyslots material offset: %zu.", LUKS_keyslots_offset(hdr1));
+ return -EINVAL;
+ }
+
+ if (LUKS2_check_cipher(cd, hdr1->keyBytes, hdr1->cipherName, hdr1->cipherMode)) {
+ log_err(cd, _("Unable to use cipher specification %s-%s for LUKS2."),
+ hdr1->cipherName, hdr1->cipherMode);
+ return -EINVAL;
+ }
+
+ if (luksmeta_header_present(cd, luks1_size))
+ return -EINVAL;
+
+ log_dbg(cd, "Max size: %" PRIu64 ", LUKS1 (full) header size %zu , required shift: %zu",
+ max_size, luks1_size, luks1_shift);
+
+ required_size = luks1_size + luks1_shift;
+
+ if ((max_size < required_size) &&
+ device_fallocate(crypt_metadata_device(cd), required_size)) {
+ log_err(cd, _("Unable to move keyslot area. Not enough space."));
+ return -EINVAL;
+ }
+
+ if (max_size < required_size)
+ max_size = required_size;
+
+ r = json_luks1_object(hdr1, &jobj, max_size - 2 * LUKS2_HDR_16K_LEN);
+ if (r < 0)
+ return r;
+
+ move_keyslot_offset(jobj, luks1_shift);
+
+ /* Create and fill LUKS2 hdr */
+ memset(hdr2, 0, sizeof(*hdr2));
+ hdr2->hdr_size = LUKS2_HDR_16K_LEN;
+ hdr2->seqid = 1;
+ hdr2->version = 2;
+ strncpy(hdr2->checksum_alg, "sha256", LUKS2_CHECKSUM_ALG_L);
+ crypt_random_get(cd, (char*)hdr2->salt1, sizeof(hdr2->salt1), CRYPT_RND_SALT);
+ crypt_random_get(cd, (char*)hdr2->salt2, sizeof(hdr2->salt2), CRYPT_RND_SALT);
+ strncpy(hdr2->uuid, crypt_get_uuid(cd), LUKS2_UUID_L-1); /* UUID should be max 36 chars */
+ hdr2->jobj = jobj;
+
+ /*
+ * It duplicates check in LUKS2_hdr_write() but we don't want to move
+ * keyslot areas in case it would fail later
+ */
+ if (max_size < LUKS2_hdr_and_areas_size(hdr2)) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* check future LUKS2 metadata before moving keyslots area */
+ if (LUKS2_hdr_validate(cd, hdr2->jobj, hdr2->hdr_size - LUKS2_HDR_BIN_LEN)) {
+ log_err(cd, _("Cannot convert to LUKS2 format - invalid metadata."));
+ r = -EINVAL;
+ goto out;
+ }
+
+ if ((r = luks_header_in_use(cd))) {
+ if (r > 0)
+ r = -EBUSY;
+ goto out;
+ }
+
+ /* move keyslots 4k -> 32k offset */
+ buf_offset = 2 * LUKS2_HDR_16K_LEN;
+ buf_size = luks1_size - LUKS_ALIGN_KEYSLOTS;
+
+ /* check future LUKS2 keyslots area is at least as large as LUKS1 keyslots area */
+ if (buf_size > LUKS2_keyslots_size(hdr2)) {
+ log_err(cd, _("Unable to move keyslot area. LUKS2 keyslots area too small."));
+ r = -EINVAL;
+ goto out;
+ }
+
+ if ((r = move_keyslot_areas(cd, 8 * SECTOR_SIZE, buf_offset, buf_size)) < 0) {
+ log_err(cd, _("Unable to move keyslot area."));
+ goto out;
+ }
+
+ /* Write new LUKS2 JSON */
+ r = LUKS2_hdr_write(cd, hdr2);
+out:
+ LUKS2_hdr_free(cd, hdr2);
+
+ return r;
+}
+
+static int keyslot_LUKS1_compatible(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, uint32_t key_size, const char *hash)
+{
+ json_object *jobj_keyslot, *jobj, *jobj_kdf, *jobj_af;
+ uint64_t l2_offset, l2_length;
+ size_t ks_key_size;
+ const char *ks_cipher, *data_cipher;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot);
+ if (!jobj_keyslot)
+ return 1;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "type", &jobj) ||
+ strcmp(json_object_get_string(jobj), "luks2"))
+ return 0;
+
+ /* Using PBKDF2, this implies memory and parallel is not used. */
+ jobj = NULL;
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf) ||
+ !json_object_object_get_ex(jobj_kdf, "type", &jobj) ||
+ strcmp(json_object_get_string(jobj), CRYPT_KDF_PBKDF2) ||
+ !json_object_object_get_ex(jobj_kdf, "hash", &jobj) ||
+ strcmp(json_object_get_string(jobj), hash))
+ return 0;
+
+ jobj = NULL;
+ if (!json_object_object_get_ex(jobj_keyslot, "af", &jobj_af) ||
+ !json_object_object_get_ex(jobj_af, "stripes", &jobj) ||
+ json_object_get_int(jobj) != LUKS_STRIPES)
+ return 0;
+
+ jobj = NULL;
+ if (!json_object_object_get_ex(jobj_af, "hash", &jobj) ||
+ (crypt_hash_size(json_object_get_string(jobj)) < 0) ||
+ strcmp(json_object_get_string(jobj), hash))
+ return 0;
+
+ ks_cipher = LUKS2_get_keyslot_cipher(hdr, keyslot, &ks_key_size);
+ data_cipher = LUKS2_get_cipher(hdr, CRYPT_DEFAULT_SEGMENT);
+ if (!ks_cipher || !data_cipher || key_size != ks_key_size || strcmp(ks_cipher, data_cipher)) {
+ log_dbg(cd, "Cipher in keyslot %d is different from volume key encryption.", keyslot);
+ return 0;
+ }
+
+ if (LUKS2_keyslot_area(hdr, keyslot, &l2_offset, &l2_length))
+ return 0;
+
+ if (l2_length != (size_round_up(AF_split_sectors(key_size, LUKS_STRIPES) * SECTOR_SIZE, 4096))) {
+ log_dbg(cd, "Area length in LUKS2 keyslot (%d) is not compatible with LUKS1", keyslot);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Convert LUKS2 -> LUKS1 */
+int LUKS2_luks2_to_luks1(struct crypt_device *cd, struct luks2_hdr *hdr2, struct luks_phdr *hdr1)
+{
+ size_t buf_size, buf_offset;
+ char cipher[LUKS_CIPHERNAME_L], cipher_mode[LUKS_CIPHERMODE_L];
+ char *digest, *digest_salt;
+ const char *hash;
+ size_t len;
+ json_object *jobj_keyslot, *jobj_digest, *jobj_segment, *jobj_kdf, *jobj_area, *jobj1, *jobj2;
+ uint32_t key_size;
+ int i, r, last_active = 0;
+ uint64_t offset, area_length;
+ char *buf, luksMagic[] = LUKS_MAGIC;
+
+ jobj_digest = LUKS2_get_digest_jobj(hdr2, 0);
+ if (!jobj_digest)
+ return -EINVAL;
+
+ jobj_segment = LUKS2_get_segment_jobj(hdr2, CRYPT_DEFAULT_SEGMENT);
+ if (!jobj_segment)
+ return -EINVAL;
+
+ if (json_segment_get_sector_size(jobj_segment) != SECTOR_SIZE) {
+ log_err(cd, _("Cannot convert to LUKS1 format - default segment encryption sector size is not 512 bytes."));
+ return -EINVAL;
+ }
+
+ json_object_object_get_ex(hdr2->jobj, "digests", &jobj1);
+ if (!json_object_object_get_ex(jobj_digest, "type", &jobj2) ||
+ strcmp(json_object_get_string(jobj2), "pbkdf2") ||
+ json_object_object_length(jobj1) != 1) {
+ log_err(cd, _("Cannot convert to LUKS1 format - key slot digests are not LUKS1 compatible."));
+ return -EINVAL;
+ }
+ if (!json_object_object_get_ex(jobj_digest, "hash", &jobj2))
+ return -EINVAL;
+ hash = json_object_get_string(jobj2);
+
+ r = crypt_parse_name_and_mode(LUKS2_get_cipher(hdr2, CRYPT_DEFAULT_SEGMENT), cipher, NULL, cipher_mode);
+ if (r < 0)
+ return r;
+
+ if (crypt_cipher_wrapped_key(cipher, cipher_mode)) {
+ log_err(cd, _("Cannot convert to LUKS1 format - device uses wrapped key cipher %s."), cipher);
+ return -EINVAL;
+ }
+
+ if (json_segments_count(LUKS2_get_segments_jobj(hdr2)) != 1) {
+ log_err(cd, _("Cannot convert to LUKS1 format - device uses more segments."));
+ return -EINVAL;
+ }
+
+ r = LUKS2_tokens_count(hdr2);
+ if (r < 0)
+ return r;
+ if (r > 0) {
+ log_err(cd, _("Cannot convert to LUKS1 format - LUKS2 header contains %u token(s)."), r);
+ return -EINVAL;
+ }
+
+ r = LUKS2_get_volume_key_size(hdr2, 0);
+ if (r < 0)
+ return -EINVAL;
+ key_size = r;
+
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++) {
+ if (LUKS2_keyslot_info(hdr2, i) == CRYPT_SLOT_INACTIVE)
+ continue;
+
+ if (LUKS2_keyslot_info(hdr2, i) == CRYPT_SLOT_INVALID) {
+ log_err(cd, _("Cannot convert to LUKS1 format - keyslot %u is in invalid state."), i);
+ return -EINVAL;
+ }
+
+ if (i >= LUKS_NUMKEYS) {
+ log_err(cd, _("Cannot convert to LUKS1 format - slot %u (over maximum slots) is still active."), i);
+ return -EINVAL;
+ }
+
+ if (!keyslot_LUKS1_compatible(cd, hdr2, i, key_size, hash)) {
+ log_err(cd, _("Cannot convert to LUKS1 format - keyslot %u is not LUKS1 compatible."), i);
+ return -EINVAL;
+ }
+ }
+
+ memset(hdr1, 0, sizeof(*hdr1));
+
+ for (i = 0; i < LUKS_NUMKEYS; i++) {
+ hdr1->keyblock[i].active = LUKS_KEY_DISABLED;
+ hdr1->keyblock[i].stripes = LUKS_STRIPES;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr2, i);
+
+ if (jobj_keyslot) {
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+ if (!json_object_object_get_ex(jobj_area, "offset", &jobj1))
+ return -EINVAL;
+ offset = crypt_jobj_get_uint64(jobj1);
+ } else {
+ if (LUKS2_find_area_gap(cd, hdr2, key_size, &offset, &area_length))
+ return -EINVAL;
+ /*
+ * We have to create placeholder luks2 keyslots in place of all
+ * inactive keyslots. Otherwise we would allocate all
+ * inactive luks1 keyslots over same binary keyslot area.
+ */
+ if (placeholder_keyslot_alloc(cd, i, offset, area_length))
+ return -EINVAL;
+ }
+
+ offset /= SECTOR_SIZE;
+ if (offset > UINT32_MAX)
+ return -EINVAL;
+
+ hdr1->keyblock[i].keyMaterialOffset = offset;
+ hdr1->keyblock[i].keyMaterialOffset -=
+ ((2 * LUKS2_HDR_16K_LEN - LUKS_ALIGN_KEYSLOTS) / SECTOR_SIZE);
+
+ if (!jobj_keyslot)
+ continue;
+
+ hdr1->keyblock[i].active = LUKS_KEY_ENABLED;
+ last_active = i;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf))
+ continue;
+
+ if (!json_object_object_get_ex(jobj_kdf, "iterations", &jobj1))
+ continue;
+ hdr1->keyblock[i].passwordIterations = crypt_jobj_get_uint32(jobj1);
+
+ if (!json_object_object_get_ex(jobj_kdf, "salt", &jobj1))
+ continue;
+
+ if (crypt_base64_decode(&buf, &len, json_object_get_string(jobj1),
+ json_object_get_string_len(jobj1)))
+ continue;
+ if (len > 0 && len != LUKS_SALTSIZE) {
+ free(buf);
+ continue;
+ }
+ memcpy(hdr1->keyblock[i].passwordSalt, buf, LUKS_SALTSIZE);
+ free(buf);
+ }
+
+ if (!jobj_keyslot) {
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr2, last_active);
+ if (!jobj_keyslot)
+ return -EINVAL;
+ }
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return -EINVAL;
+ if (!json_object_object_get_ex(jobj_area, "encryption", &jobj1))
+ return -EINVAL;
+ r = crypt_parse_name_and_mode(json_object_get_string(jobj1), cipher, NULL, cipher_mode);
+ if (r < 0)
+ return r;
+
+ strncpy(hdr1->cipherName, cipher, LUKS_CIPHERNAME_L - 1);
+ hdr1->cipherName[LUKS_CIPHERNAME_L-1] = '\0';
+ strncpy(hdr1->cipherMode, cipher_mode, LUKS_CIPHERMODE_L - 1);
+ hdr1->cipherMode[LUKS_CIPHERMODE_L-1] = '\0';
+
+ if (!json_object_object_get_ex(jobj_keyslot, "kdf", &jobj_kdf))
+ return -EINVAL;
+ if (!json_object_object_get_ex(jobj_kdf, "hash", &jobj1))
+ return -EINVAL;
+ strncpy(hdr1->hashSpec, json_object_get_string(jobj1), sizeof(hdr1->hashSpec) - 1);
+
+ hdr1->keyBytes = key_size;
+
+ if (!json_object_object_get_ex(jobj_digest, "iterations", &jobj1))
+ return -EINVAL;
+ hdr1->mkDigestIterations = crypt_jobj_get_uint32(jobj1);
+
+ if (!json_object_object_get_ex(jobj_digest, "digest", &jobj1))
+ return -EINVAL;
+ r = crypt_base64_decode(&digest, &len, json_object_get_string(jobj1),
+ json_object_get_string_len(jobj1));
+ if (r < 0)
+ return r;
+ /* We can store full digest here, not only sha1 length */
+ if (len < LUKS_DIGESTSIZE) {
+ free(digest);
+ return -EINVAL;
+ }
+ memcpy(hdr1->mkDigest, digest, LUKS_DIGESTSIZE);
+ free(digest);
+
+ if (!json_object_object_get_ex(jobj_digest, "salt", &jobj1))
+ return -EINVAL;
+ r = crypt_base64_decode(&digest_salt, &len, json_object_get_string(jobj1),
+ json_object_get_string_len(jobj1));
+ if (r < 0)
+ return r;
+ if (len != LUKS_SALTSIZE) {
+ free(digest_salt);
+ return -EINVAL;
+ }
+ memcpy(hdr1->mkDigestSalt, digest_salt, LUKS_SALTSIZE);
+ free(digest_salt);
+
+ if (!json_object_object_get_ex(jobj_segment, "offset", &jobj1))
+ return -EINVAL;
+ offset = crypt_jobj_get_uint64(jobj1) / SECTOR_SIZE;
+ if (offset > UINT32_MAX)
+ return -EINVAL;
+ hdr1->payloadOffset = offset;
+
+ strncpy(hdr1->uuid, hdr2->uuid, UUID_STRING_L); /* max 36 chars */
+ hdr1->uuid[UUID_STRING_L-1] = '\0';
+
+ memcpy(hdr1->magic, luksMagic, LUKS_MAGIC_L);
+
+ hdr1->version = 1;
+
+ r = luks_header_in_use(cd);
+ if (r)
+ return r > 0 ? -EBUSY : r;
+
+ /* move keyslots 32k -> 4k offset */
+ buf_offset = 2 * LUKS2_HDR_16K_LEN;
+ buf_size = LUKS2_keyslots_size(hdr2);
+ r = move_keyslot_areas(cd, buf_offset, 8 * SECTOR_SIZE, buf_size);
+ if (r < 0) {
+ log_err(cd, _("Unable to move keyslot area."));
+ return r;
+ }
+
+ crypt_wipe_device(cd, crypt_metadata_device(cd), CRYPT_WIPE_ZERO, 0,
+ 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL);
+
+ /* Write new LUKS1 hdr */
+ return LUKS_write_phdr(hdr1, cd);
+}
diff --git a/lib/luks2/luks2_reencrypt.c b/lib/luks2/luks2_reencrypt.c
new file mode 100644
index 0000000..b0dcd6d
--- /dev/null
+++ b/lib/luks2/luks2_reencrypt.c
@@ -0,0 +1,4375 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, reencryption helpers
+ *
+ * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+#include "utils_device_locking.h"
+
+struct luks2_reencrypt {
+ /* reencryption window attributes */
+ uint64_t offset;
+ uint64_t progress;
+ uint64_t length;
+ uint64_t device_size;
+ bool online;
+ bool fixed_length;
+ crypt_reencrypt_direction_info direction;
+ crypt_reencrypt_mode_info mode;
+
+ char *device_name;
+ char *hotzone_name;
+ char *overlay_name;
+ uint32_t flags;
+
+ /* reencryption window persistence attributes */
+ struct reenc_protection rp;
+ struct reenc_protection rp_moved_segment;
+
+ int reenc_keyslot;
+
+ /* already running reencryption */
+ json_object *jobj_segs_hot;
+ struct json_object *jobj_segs_post;
+
+ /* backup segments */
+ json_object *jobj_segment_new;
+ int digest_new;
+ json_object *jobj_segment_old;
+ int digest_old;
+ json_object *jobj_segment_moved;
+
+ struct volume_key *vks;
+
+ void *reenc_buffer;
+ ssize_t read;
+
+ struct crypt_storage_wrapper *cw1;
+ struct crypt_storage_wrapper *cw2;
+
+ uint32_t wflags1;
+ uint32_t wflags2;
+
+ struct crypt_lock_handle *reenc_lock;
+};
+#if USE_LUKS2_REENCRYPTION
+static uint64_t data_shift_value(struct reenc_protection *rp)
+{
+ return rp->type == REENC_PROTECTION_DATASHIFT ? rp->p.ds.data_shift : 0;
+}
+
+static json_object *reencrypt_segment(struct luks2_hdr *hdr, unsigned new)
+{
+ return LUKS2_get_segment_by_flag(hdr, new ? "backup-final" : "backup-previous");
+}
+
+static json_object *reencrypt_segment_new(struct luks2_hdr *hdr)
+{
+ return reencrypt_segment(hdr, 1);
+}
+
+static json_object *reencrypt_segment_old(struct luks2_hdr *hdr)
+{
+ return reencrypt_segment(hdr, 0);
+}
+
+static json_object *reencrypt_segments_old(struct luks2_hdr *hdr)
+{
+ json_object *jobj_segments, *jobj = NULL;
+
+ if (json_object_copy(reencrypt_segment_old(hdr), &jobj))
+ return NULL;
+
+ json_segment_remove_flag(jobj, "backup-previous");
+
+ jobj_segments = json_object_new_object();
+ if (!jobj_segments) {
+ json_object_put(jobj);
+ return NULL;
+ }
+
+ if (json_object_object_add_by_uint(jobj_segments, 0, jobj)) {
+ json_object_put(jobj);
+ json_object_put(jobj_segments);
+ return NULL;
+ }
+
+ return jobj_segments;
+}
+
+static const char *reencrypt_segment_cipher_new(struct luks2_hdr *hdr)
+{
+ return json_segment_get_cipher(reencrypt_segment(hdr, 1));
+}
+
+static const char *reencrypt_segment_cipher_old(struct luks2_hdr *hdr)
+{
+ return json_segment_get_cipher(reencrypt_segment(hdr, 0));
+}
+
+static uint32_t reencrypt_get_sector_size_new(struct luks2_hdr *hdr)
+{
+ return json_segment_get_sector_size(reencrypt_segment(hdr, 1));
+}
+
+static uint32_t reencrypt_get_sector_size_old(struct luks2_hdr *hdr)
+{
+ return json_segment_get_sector_size(reencrypt_segment(hdr, 0));
+}
+
+static uint64_t reencrypt_data_offset(struct luks2_hdr *hdr, unsigned new)
+{
+ json_object *jobj = reencrypt_segment(hdr, new);
+ if (jobj)
+ return json_segment_get_offset(jobj, 0);
+
+ return LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
+}
+
+static uint64_t LUKS2_reencrypt_get_data_offset_moved(struct luks2_hdr *hdr)
+{
+ json_object *jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment");
+
+ if (!jobj_segment)
+ return 0;
+
+ return json_segment_get_offset(jobj_segment, 0);
+}
+
+static uint64_t reencrypt_get_data_offset_new(struct luks2_hdr *hdr)
+{
+ return reencrypt_data_offset(hdr, 1);
+}
+
+static uint64_t reencrypt_get_data_offset_old(struct luks2_hdr *hdr)
+{
+ return reencrypt_data_offset(hdr, 0);
+}
+#endif
+static int reencrypt_digest(struct luks2_hdr *hdr, unsigned new)
+{
+ int segment = LUKS2_get_segment_id_by_flag(hdr, new ? "backup-final" : "backup-previous");
+
+ if (segment < 0)
+ return segment;
+
+ return LUKS2_digest_by_segment(hdr, segment);
+}
+
+int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr)
+{
+ return reencrypt_digest(hdr, 1);
+}
+
+int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr)
+{
+ return reencrypt_digest(hdr, 0);
+}
+
+/* none, checksums, journal or shift */
+static const char *reencrypt_resilience_type(struct luks2_hdr *hdr)
+{
+ json_object *jobj_keyslot, *jobj_area, *jobj_type;
+ int ks = LUKS2_find_keyslot(hdr, "reencrypt");
+
+ if (ks < 0)
+ return NULL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
+
+ json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
+ if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
+ return NULL;
+
+ return json_object_get_string(jobj_type);
+}
+
+static const char *reencrypt_resilience_hash(struct luks2_hdr *hdr)
+{
+ json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash;
+ int ks = LUKS2_find_keyslot(hdr, "reencrypt");
+
+ if (ks < 0)
+ return NULL;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
+
+ json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
+ if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
+ return NULL;
+ if (strcmp(json_object_get_string(jobj_type), "checksum"))
+ return NULL;
+ if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
+ return NULL;
+
+ return json_object_get_string(jobj_hash);
+}
+#if USE_LUKS2_REENCRYPTION
+static json_object *_enc_create_segments_shift_after(struct luks2_reencrypt *rh, uint64_t data_offset)
+{
+ int reenc_seg, i = 0;
+ json_object *jobj_copy, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object();
+ uint64_t tmp;
+
+ if (!rh->jobj_segs_hot || !jobj_segs_post)
+ goto err;
+
+ if (json_segments_count(rh->jobj_segs_hot) == 0)
+ return jobj_segs_post;
+
+ reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
+ if (reenc_seg < 0)
+ goto err;
+
+ while (i < reenc_seg) {
+ jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, i);
+ if (!jobj_copy)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
+ }
+
+ if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1), &jobj_seg_new)) {
+ if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg), &jobj_seg_new))
+ goto err;
+ json_segment_remove_flag(jobj_seg_new, "in-reencryption");
+ tmp = rh->length;
+ } else {
+ json_object_object_add(jobj_seg_new, "offset", crypt_jobj_new_uint64(rh->offset + data_offset));
+ json_object_object_add(jobj_seg_new, "iv_tweak", crypt_jobj_new_uint64(rh->offset >> SECTOR_SHIFT));
+ tmp = json_segment_get_size(jobj_seg_new, 0) + rh->length;
+ }
+
+ /* alter size of new segment, reenc_seg == 0 we're finished */
+ json_object_object_add(jobj_seg_new, "size", reenc_seg > 0 ? crypt_jobj_new_uint64(tmp) : json_object_new_string("dynamic"));
+ json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new);
+
+ return jobj_segs_post;
+err:
+ json_object_put(jobj_segs_post);
+ return NULL;
+}
+
+static json_object *reencrypt_make_hot_segments_encrypt_shift(struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t data_offset)
+{
+ int sg, crypt_seg, i = 0;
+ uint64_t segment_size;
+ json_object *jobj_seg_shrunk, *jobj_seg_new, *jobj_copy, *jobj_enc_seg = NULL,
+ *jobj_segs_hot = json_object_new_object();
+
+ if (!jobj_segs_hot)
+ return NULL;
+
+ crypt_seg = LUKS2_segment_by_type(hdr, "crypt");
+
+ /* FIXME: This is hack. Find proper way to fix it. */
+ sg = LUKS2_last_segment_by_type(hdr, "linear");
+ if (rh->offset && sg < 0)
+ goto err;
+ if (sg < 0)
+ return jobj_segs_hot;
+
+ jobj_enc_seg = json_segment_create_crypt(data_offset + rh->offset,
+ rh->offset >> SECTOR_SHIFT,
+ &rh->length,
+ reencrypt_segment_cipher_new(hdr),
+ reencrypt_get_sector_size_new(hdr),
+ 1);
+
+ while (i < sg) {
+ jobj_copy = LUKS2_get_segment_jobj(hdr, i);
+ if (!jobj_copy)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy));
+ }
+
+ segment_size = LUKS2_segment_size(hdr, sg, 0);
+ if (segment_size > rh->length) {
+ jobj_seg_shrunk = NULL;
+ if (json_object_copy(LUKS2_get_segment_jobj(hdr, sg), &jobj_seg_shrunk))
+ goto err;
+ json_object_object_add(jobj_seg_shrunk, "size", crypt_jobj_new_uint64(segment_size - rh->length));
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_seg_shrunk);
+ }
+
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_enc_seg);
+ jobj_enc_seg = NULL; /* see err: label */
+
+ /* first crypt segment after encryption ? */
+ if (crypt_seg >= 0) {
+ jobj_seg_new = LUKS2_get_segment_jobj(hdr, crypt_seg);
+ if (!jobj_seg_new)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new));
+ }
+
+ return jobj_segs_hot;
+err:
+ json_object_put(jobj_enc_seg);
+ json_object_put(jobj_segs_hot);
+
+ return NULL;
+}
+
+static json_object *reencrypt_make_segment_new(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct luks2_reencrypt *rh,
+ uint64_t data_offset,
+ uint64_t segment_offset,
+ uint64_t iv_offset,
+ const uint64_t *segment_length)
+{
+ switch (rh->mode) {
+ case CRYPT_REENCRYPT_REENCRYPT:
+ case CRYPT_REENCRYPT_ENCRYPT:
+ return json_segment_create_crypt(data_offset + segment_offset,
+ crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
+ segment_length,
+ reencrypt_segment_cipher_new(hdr),
+ reencrypt_get_sector_size_new(hdr), 0);
+ case CRYPT_REENCRYPT_DECRYPT:
+ return json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
+ }
+
+ return NULL;
+}
+
+static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t data_offset)
+{
+ int reenc_seg;
+ json_object *jobj_new_seg_after, *jobj_old_seg, *jobj_old_seg_copy = NULL,
+ *jobj_segs_post = json_object_new_object();
+ uint64_t fixed_length = rh->offset + rh->length;
+
+ if (!rh->jobj_segs_hot || !jobj_segs_post)
+ goto err;
+
+ reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
+ if (reenc_seg < 0)
+ return NULL;
+
+ jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
+
+ /*
+ * if there's no old segment after reencryption, we're done.
+ * Set size to 'dynamic' again.
+ */
+ jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL);
+ if (!jobj_new_seg_after)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_post, 0, jobj_new_seg_after);
+
+ if (jobj_old_seg) {
+ if (rh->fixed_length) {
+ if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy))
+ goto err;
+ jobj_old_seg = jobj_old_seg_copy;
+ fixed_length = rh->device_size - fixed_length;
+ json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(fixed_length));
+ } else
+ json_object_get(jobj_old_seg);
+ json_object_object_add_by_uint(jobj_segs_post, 1, jobj_old_seg);
+ }
+
+ return jobj_segs_post;
+err:
+ json_object_put(jobj_segs_post);
+ return NULL;
+}
+
+static json_object *reencrypt_make_post_segments_backward(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t data_offset)
+{
+ int reenc_seg;
+ uint64_t fixed_length;
+
+ json_object *jobj_new_seg_after, *jobj_old_seg,
+ *jobj_segs_post = json_object_new_object();
+
+ if (!rh->jobj_segs_hot || !jobj_segs_post)
+ goto err;
+
+ reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
+ if (reenc_seg < 0)
+ return NULL;
+
+ jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg - 1);
+ if (jobj_old_seg)
+ json_object_object_add_by_uint(jobj_segs_post, reenc_seg - 1, json_object_get(jobj_old_seg));
+ if (rh->fixed_length && rh->offset) {
+ fixed_length = rh->device_size - rh->offset;
+ jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length);
+ } else
+ jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL);
+ if (!jobj_new_seg_after)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after);
+
+ return jobj_segs_post;
+err:
+ json_object_put(jobj_segs_post);
+ return NULL;
+}
+
+static json_object *reencrypt_make_segment_reencrypt(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct luks2_reencrypt *rh,
+ uint64_t data_offset,
+ uint64_t segment_offset,
+ uint64_t iv_offset,
+ const uint64_t *segment_length)
+{
+ switch (rh->mode) {
+ case CRYPT_REENCRYPT_REENCRYPT:
+ case CRYPT_REENCRYPT_ENCRYPT:
+ return json_segment_create_crypt(data_offset + segment_offset,
+ crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
+ segment_length,
+ reencrypt_segment_cipher_new(hdr),
+ reencrypt_get_sector_size_new(hdr), 1);
+ case CRYPT_REENCRYPT_DECRYPT:
+ return json_segment_create_linear(data_offset + segment_offset, segment_length, 1);
+ }
+
+ return NULL;
+}
+
+static json_object *reencrypt_make_segment_old(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct luks2_reencrypt *rh,
+ uint64_t data_offset,
+ uint64_t segment_offset,
+ const uint64_t *segment_length)
+{
+ json_object *jobj_old_seg = NULL;
+
+ switch (rh->mode) {
+ case CRYPT_REENCRYPT_REENCRYPT:
+ case CRYPT_REENCRYPT_DECRYPT:
+ jobj_old_seg = json_segment_create_crypt(data_offset + segment_offset,
+ crypt_get_iv_offset(cd) + (segment_offset >> SECTOR_SHIFT),
+ segment_length,
+ reencrypt_segment_cipher_old(hdr),
+ reencrypt_get_sector_size_old(hdr),
+ 0);
+ break;
+ case CRYPT_REENCRYPT_ENCRYPT:
+ jobj_old_seg = json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
+ }
+
+ return jobj_old_seg;
+}
+
+static json_object *reencrypt_make_hot_segments_forward(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t device_size,
+ uint64_t data_offset)
+{
+ json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
+ uint64_t fixed_length, tmp = rh->offset + rh->length;
+ unsigned int sg = 0;
+
+ jobj_segs_hot = json_object_new_object();
+ if (!jobj_segs_hot)
+ return NULL;
+
+ if (rh->offset) {
+ jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, &rh->offset);
+ if (!jobj_new_seg)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
+ }
+
+ jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
+ if (!jobj_reenc_seg)
+ goto err;
+
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
+
+ if (tmp < device_size) {
+ fixed_length = device_size - tmp;
+ jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, data_offset + data_shift_value(&rh->rp),
+ rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
+ if (!jobj_old_seg)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
+ }
+
+ return jobj_segs_hot;
+err:
+ json_object_put(jobj_segs_hot);
+ return NULL;
+}
+
+static json_object *reencrypt_make_hot_segments_decrypt_shift(struct crypt_device *cd,
+ struct luks2_hdr *hdr, struct luks2_reencrypt *rh,
+ uint64_t device_size, uint64_t data_offset)
+{
+ json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
+ uint64_t fixed_length, tmp = rh->offset + rh->length, linear_length = rh->progress;
+ unsigned int sg = 0;
+
+ jobj_segs_hot = json_object_new_object();
+ if (!jobj_segs_hot)
+ return NULL;
+
+ if (rh->offset) {
+ jobj_new_seg = LUKS2_get_segment_jobj(hdr, 0);
+ if (!jobj_new_seg)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, json_object_get(jobj_new_seg));
+
+ if (linear_length) {
+ jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh,
+ data_offset,
+ json_segment_get_size(jobj_new_seg, 0),
+ 0,
+ &linear_length);
+ if (!jobj_new_seg)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
+ }
+ }
+
+ jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset,
+ rh->offset,
+ rh->offset,
+ &rh->length);
+ if (!jobj_reenc_seg)
+ goto err;
+
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
+
+ if (!rh->offset && (jobj_new_seg = LUKS2_get_segment_jobj(hdr, 1)) &&
+ !json_segment_is_backup(jobj_new_seg))
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, json_object_get(jobj_new_seg));
+ else if (tmp < device_size) {
+ fixed_length = device_size - tmp;
+ jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh,
+ data_offset + data_shift_value(&rh->rp),
+ rh->offset + rh->length,
+ rh->fixed_length ? &fixed_length : NULL);
+ if (!jobj_old_seg)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
+ }
+
+ return jobj_segs_hot;
+err:
+ json_object_put(jobj_segs_hot);
+ return NULL;
+}
+
+static json_object *_dec_create_segments_shift_after(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t data_offset)
+{
+ int reenc_seg, i = 0;
+ json_object *jobj_copy, *jobj_seg_old, *jobj_seg_new,
+ *jobj_segs_post = json_object_new_object();
+ unsigned segs;
+ uint64_t tmp;
+
+ if (!rh->jobj_segs_hot || !jobj_segs_post)
+ goto err;
+
+ segs = json_segments_count(rh->jobj_segs_hot);
+ if (segs == 0)
+ return jobj_segs_post;
+
+ reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
+ if (reenc_seg < 0)
+ goto err;
+
+ if (reenc_seg == 0) {
+ jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, NULL);
+ if (!jobj_seg_new)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_post, 0, jobj_seg_new);
+
+ return jobj_segs_post;
+ }
+
+ jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, 0);
+ if (!jobj_copy)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
+
+ jobj_seg_old = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
+
+ tmp = rh->length + rh->progress;
+ jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset,
+ json_segment_get_size(rh->jobj_segment_moved, 0),
+ data_shift_value(&rh->rp),
+ jobj_seg_old ? &tmp : NULL);
+ json_object_object_add_by_uint(jobj_segs_post, i++, jobj_seg_new);
+
+ if (jobj_seg_old)
+ json_object_object_add_by_uint(jobj_segs_post, i, json_object_get(jobj_seg_old));
+
+ return jobj_segs_post;
+err:
+ json_object_put(jobj_segs_post);
+ return NULL;
+}
+
+static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t device_size,
+ uint64_t data_offset)
+{
+ json_object *jobj_reenc_seg, *jobj_new_seg, *jobj_old_seg = NULL,
+ *jobj_segs_hot = json_object_new_object();
+ int sg = 0;
+ uint64_t fixed_length, tmp = rh->offset + rh->length;
+
+ if (!jobj_segs_hot)
+ return NULL;
+
+ if (rh->offset) {
+ if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_old_seg))
+ goto err;
+ json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(rh->offset));
+
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_old_seg);
+ }
+
+ jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
+ if (!jobj_reenc_seg)
+ goto err;
+
+ json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
+
+ if (tmp < device_size) {
+ fixed_length = device_size - tmp;
+ jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length,
+ rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
+ if (!jobj_new_seg)
+ goto err;
+ json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_new_seg);
+ }
+
+ return jobj_segs_hot;
+err:
+ json_object_put(jobj_segs_hot);
+ return NULL;
+}
+
+static int reencrypt_make_hot_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t device_size,
+ uint64_t data_offset)
+{
+ rh->jobj_segs_hot = NULL;
+
+ if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
+ rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
+ log_dbg(cd, "Calculating hot segments for encryption with data move.");
+ rh->jobj_segs_hot = reencrypt_make_hot_segments_encrypt_shift(hdr, rh, data_offset);
+ } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD &&
+ rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
+ log_dbg(cd, "Calculating hot segments for decryption with data move.");
+ rh->jobj_segs_hot = reencrypt_make_hot_segments_decrypt_shift(cd, hdr, rh, device_size, data_offset);
+ } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
+ log_dbg(cd, "Calculating hot segments (forward direction).");
+ rh->jobj_segs_hot = reencrypt_make_hot_segments_forward(cd, hdr, rh, device_size, data_offset);
+ } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
+ log_dbg(cd, "Calculating hot segments (backward direction).");
+ rh->jobj_segs_hot = reencrypt_make_hot_segments_backward(cd, hdr, rh, device_size, data_offset);
+ }
+
+ return rh->jobj_segs_hot ? 0 : -EINVAL;
+}
+
+static int reencrypt_make_post_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t data_offset)
+{
+ rh->jobj_segs_post = NULL;
+
+ if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
+ rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
+ log_dbg(cd, "Calculating post segments for encryption with data move.");
+ rh->jobj_segs_post = _enc_create_segments_shift_after(rh, data_offset);
+ } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD &&
+ rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
+ log_dbg(cd, "Calculating post segments for decryption with data move.");
+ rh->jobj_segs_post = _dec_create_segments_shift_after(cd, hdr, rh, data_offset);
+ } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
+ log_dbg(cd, "Calculating post segments (forward direction).");
+ rh->jobj_segs_post = reencrypt_make_post_segments_forward(cd, hdr, rh, data_offset);
+ } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
+ log_dbg(cd, "Calculating segments (backward direction).");
+ rh->jobj_segs_post = reencrypt_make_post_segments_backward(cd, hdr, rh, data_offset);
+ }
+
+ return rh->jobj_segs_post ? 0 : -EINVAL;
+}
+#endif
+static uint64_t reencrypt_data_shift(struct luks2_hdr *hdr)
+{
+ json_object *jobj_keyslot, *jobj_area, *jobj_data_shift;
+ int ks = LUKS2_find_keyslot(hdr, "reencrypt");
+
+ if (ks < 0)
+ return 0;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
+
+ json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
+ if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj_data_shift))
+ return 0;
+
+ return crypt_jobj_get_uint64(jobj_data_shift);
+}
+
+static crypt_reencrypt_mode_info reencrypt_mode(struct luks2_hdr *hdr)
+{
+ const char *mode;
+ crypt_reencrypt_mode_info mi = CRYPT_REENCRYPT_REENCRYPT;
+ json_object *jobj_keyslot, *jobj_mode;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
+ if (!jobj_keyslot)
+ return mi;
+
+ json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode);
+ mode = json_object_get_string(jobj_mode);
+
+ /* validation enforces allowed values */
+ if (!strcmp(mode, "encrypt"))
+ mi = CRYPT_REENCRYPT_ENCRYPT;
+ else if (!strcmp(mode, "decrypt"))
+ mi = CRYPT_REENCRYPT_DECRYPT;
+
+ return mi;
+}
+
+static crypt_reencrypt_direction_info reencrypt_direction(struct luks2_hdr *hdr)
+{
+ const char *value;
+ json_object *jobj_keyslot, *jobj_mode;
+ crypt_reencrypt_direction_info di = CRYPT_REENCRYPT_FORWARD;
+
+ jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
+ if (!jobj_keyslot)
+ return di;
+
+ json_object_object_get_ex(jobj_keyslot, "direction", &jobj_mode);
+ value = json_object_get_string(jobj_mode);
+
+ /* validation enforces allowed values */
+ if (strcmp(value, "forward"))
+ di = CRYPT_REENCRYPT_BACKWARD;
+
+ return di;
+}
+
+typedef enum { REENC_OK = 0, REENC_ERR, REENC_ROLLBACK, REENC_FATAL } reenc_status_t;
+
+void LUKS2_reencrypt_protection_erase(struct reenc_protection *rp)
+{
+ if (!rp || rp->type != REENC_PROTECTION_CHECKSUM)
+ return;
+
+ if (rp->p.csum.ch) {
+ crypt_hash_destroy(rp->p.csum.ch);
+ rp->p.csum.ch = NULL;
+ }
+
+ if (rp->p.csum.checksums) {
+ crypt_safe_memzero(rp->p.csum.checksums, rp->p.csum.checksums_len);
+ free(rp->p.csum.checksums);
+ rp->p.csum.checksums = NULL;
+ }
+}
+
+void LUKS2_reencrypt_free(struct crypt_device *cd, struct luks2_reencrypt *rh)
+{
+ if (!rh)
+ return;
+
+ LUKS2_reencrypt_protection_erase(&rh->rp);
+ LUKS2_reencrypt_protection_erase(&rh->rp_moved_segment);
+
+ json_object_put(rh->jobj_segs_hot);
+ rh->jobj_segs_hot = NULL;
+ json_object_put(rh->jobj_segs_post);
+ rh->jobj_segs_post = NULL;
+ json_object_put(rh->jobj_segment_old);
+ rh->jobj_segment_old = NULL;
+ json_object_put(rh->jobj_segment_new);
+ rh->jobj_segment_new = NULL;
+ json_object_put(rh->jobj_segment_moved);
+ rh->jobj_segment_moved = NULL;
+
+ free(rh->reenc_buffer);
+ rh->reenc_buffer = NULL;
+ crypt_storage_wrapper_destroy(rh->cw1);
+ rh->cw1 = NULL;
+ crypt_storage_wrapper_destroy(rh->cw2);
+ rh->cw2 = NULL;
+
+ free(rh->device_name);
+ free(rh->overlay_name);
+ free(rh->hotzone_name);
+ crypt_drop_keyring_key(cd, rh->vks);
+ crypt_free_volume_key(rh->vks);
+ device_release_excl(cd, crypt_data_device(cd));
+ crypt_unlock_internal(cd, rh->reenc_lock);
+ free(rh);
+}
+
+int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct reenc_protection *rp,
+ int reencrypt_keyslot,
+ uint64_t *r_length)
+{
+#if USE_LUKS2_REENCRYPTION
+ int r;
+ uint64_t dummy, area_length;
+
+ assert(hdr);
+ assert(rp);
+ assert(r_length);
+
+ if (rp->type <= REENC_PROTECTION_NONE) {
+ *r_length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
+ return 0;
+ }
+
+ if (rp->type == REENC_PROTECTION_DATASHIFT) {
+ *r_length = rp->p.ds.data_shift;
+ return 0;
+ }
+
+ r = LUKS2_keyslot_area(hdr, reencrypt_keyslot, &dummy, &area_length);
+ if (r < 0)
+ return -EINVAL;
+
+ if (rp->type == REENC_PROTECTION_JOURNAL) {
+ *r_length = area_length;
+ return 0;
+ }
+
+ if (rp->type == REENC_PROTECTION_CHECKSUM) {
+ *r_length = (area_length / rp->p.csum.hash_size) * rp->p.csum.block_size;
+ return 0;
+ }
+
+ return -EINVAL;
+#else
+ return -ENOTSUP;
+#endif
+}
+#if USE_LUKS2_REENCRYPTION
+static size_t reencrypt_get_alignment(struct crypt_device *cd,
+ struct luks2_hdr *hdr)
+{
+ size_t ss, alignment = device_block_size(cd, crypt_data_device(cd));
+
+ ss = reencrypt_get_sector_size_old(hdr);
+ if (ss > alignment)
+ alignment = ss;
+ ss = reencrypt_get_sector_size_new(hdr);
+ if (ss > alignment)
+ alignment = ss;
+
+ return alignment;
+}
+
+/* returns void because it must not fail on valid LUKS2 header */
+static void _load_backup_segments(struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh)
+{
+ int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
+
+ if (segment >= 0) {
+ rh->jobj_segment_new = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
+ rh->digest_new = LUKS2_digest_by_segment(hdr, segment);
+ } else {
+ rh->jobj_segment_new = NULL;
+ rh->digest_new = -ENOENT;
+ }
+
+ segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
+ if (segment >= 0) {
+ rh->jobj_segment_old = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
+ rh->digest_old = LUKS2_digest_by_segment(hdr, segment);
+ } else {
+ rh->jobj_segment_old = NULL;
+ rh->digest_old = -ENOENT;
+ }
+
+ segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
+ if (segment >= 0)
+ rh->jobj_segment_moved = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
+ else
+ rh->jobj_segment_moved = NULL;
+}
+
+static int reencrypt_offset_backward_moved(struct luks2_hdr *hdr, json_object *jobj_segments,
+ uint64_t *reencrypt_length, uint64_t data_shift, uint64_t *offset)
+{
+ uint64_t tmp, linear_length = 0;
+ int sg, segs = json_segments_count(jobj_segments);
+
+ /* find reencrypt offset with data shift */
+ for (sg = 0; sg < segs; sg++)
+ if (LUKS2_segment_is_type(hdr, sg, "linear"))
+ linear_length += LUKS2_segment_size(hdr, sg, 0);
+
+ /* all active linear segments length */
+ if (linear_length && segs > 1) {
+ if (linear_length < data_shift)
+ return -EINVAL;
+ tmp = linear_length - data_shift;
+ if (tmp && tmp < data_shift) {
+ *offset = data_shift;
+ *reencrypt_length = tmp;
+ } else
+ *offset = tmp;
+ return 0;
+ }
+
+ if (segs == 1) {
+ *offset = 0;
+ return 0;
+ }
+
+ /* should be unreachable */
+
+ return -EINVAL;
+}
+
+static int reencrypt_offset_forward_moved(struct luks2_hdr *hdr,
+ json_object *jobj_segments,
+ uint64_t data_shift,
+ uint64_t *offset)
+{
+ int last_crypt = LUKS2_last_segment_by_type(hdr, "crypt");
+
+ /* if last crypt segment exists and it's first one, just return offset = 0 */
+ if (last_crypt <= 0) {
+ *offset = 0;
+ return 0;
+ }
+
+ *offset = LUKS2_segment_offset(hdr, last_crypt, 0) - data_shift;
+ return 0;
+}
+
+static int _offset_forward(json_object *jobj_segments, uint64_t *offset)
+{
+ int segs = json_segments_count(jobj_segments);
+
+ if (segs == 1)
+ *offset = 0;
+ else if (segs == 2) {
+ *offset = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
+ if (!*offset)
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int _offset_backward(json_object *jobj_segments, uint64_t device_size, uint64_t *length, uint64_t *offset)
+{
+ int segs = json_segments_count(jobj_segments);
+ uint64_t tmp;
+
+ if (segs == 1) {
+ if (device_size < *length)
+ *length = device_size;
+ *offset = device_size - *length;
+ } else if (segs == 2) {
+ tmp = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
+ if (tmp < *length)
+ *length = tmp;
+ *offset = tmp - *length;
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+/* must be always relative to data offset */
+/* the LUKS2 header MUST be valid */
+static int reencrypt_offset(struct luks2_hdr *hdr,
+ crypt_reencrypt_direction_info di,
+ uint64_t device_size,
+ uint64_t *reencrypt_length,
+ uint64_t *offset)
+{
+ int r, sg;
+ json_object *jobj_segments;
+ uint64_t data_shift = reencrypt_data_shift(hdr);
+
+ if (!offset)
+ return -EINVAL;
+
+ /* if there's segment in reencryption return directly offset of it */
+ json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments);
+ sg = json_segments_segment_in_reencrypt(jobj_segments);
+ if (sg >= 0) {
+ *offset = LUKS2_segment_offset(hdr, sg, 0) - (reencrypt_get_data_offset_new(hdr));
+ return 0;
+ }
+
+ if (di == CRYPT_REENCRYPT_FORWARD) {
+ if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT &&
+ LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0) {
+ r = reencrypt_offset_forward_moved(hdr, jobj_segments, data_shift, offset);
+ if (!r && *offset > device_size)
+ *offset = device_size;
+ return r;
+ }
+ return _offset_forward(jobj_segments, offset);
+ } else if (di == CRYPT_REENCRYPT_BACKWARD) {
+ if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_ENCRYPT &&
+ LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
+ return reencrypt_offset_backward_moved(hdr, jobj_segments, reencrypt_length, data_shift, offset);
+ return _offset_backward(jobj_segments, device_size, reencrypt_length, offset);
+ }
+
+ return -EINVAL;
+}
+
+static uint64_t reencrypt_length(struct crypt_device *cd,
+ struct reenc_protection *rp,
+ uint64_t keyslot_area_length,
+ uint64_t length_max,
+ size_t alignment)
+{
+ unsigned long dummy, optimal_alignment;
+ uint64_t length, soft_mem_limit;
+
+ if (rp->type == REENC_PROTECTION_NONE)
+ length = length_max ?: LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
+ else if (rp->type == REENC_PROTECTION_CHECKSUM)
+ length = (keyslot_area_length / rp->p.csum.hash_size) * rp->p.csum.block_size;
+ else if (rp->type == REENC_PROTECTION_DATASHIFT)
+ return rp->p.ds.data_shift;
+ else
+ length = keyslot_area_length;
+
+ /* hard limit */
+ if (length > LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH)
+ length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
+
+ /* soft limit is 1/4 of system memory */
+ soft_mem_limit = crypt_getphysmemory_kb() << 8; /* multiply by (1024/4) */
+
+ if (soft_mem_limit && length > soft_mem_limit)
+ length = soft_mem_limit;
+
+ if (length_max && length > length_max)
+ length = length_max;
+
+ length -= (length % alignment);
+
+ /* Emits error later */
+ if (!length)
+ return length;
+
+ device_topology_alignment(cd, crypt_data_device(cd), &optimal_alignment, &dummy, length);
+
+ /* we have to stick with encryption sector size alignment */
+ if (optimal_alignment % alignment)
+ return length;
+
+ /* align to opt-io size only if remaining size allows it */
+ if (length > optimal_alignment)
+ length -= (length % optimal_alignment);
+
+ return length;
+}
+
+static int reencrypt_context_init(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t device_size,
+ uint64_t max_hotzone_size,
+ uint64_t fixed_device_size)
+{
+ int r;
+ size_t alignment;
+ uint64_t dummy, area_length;
+
+ rh->reenc_keyslot = LUKS2_find_keyslot(hdr, "reencrypt");
+ if (rh->reenc_keyslot < 0)
+ return -EINVAL;
+ if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &dummy, &area_length) < 0)
+ return -EINVAL;
+
+ rh->mode = reencrypt_mode(hdr);
+
+ rh->direction = reencrypt_direction(hdr);
+
+ r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp, true);
+ if (r < 0)
+ return r;
+
+ if (rh->rp.type == REENC_PROTECTION_CHECKSUM)
+ alignment = rh->rp.p.csum.block_size;
+ else
+ alignment = reencrypt_get_alignment(cd, hdr);
+
+ if (!alignment)
+ return -EINVAL;
+
+ if ((max_hotzone_size << SECTOR_SHIFT) % alignment) {
+ log_err(cd, _("Hotzone size must be multiple of calculated zone alignment (%zu bytes)."), alignment);
+ return -EINVAL;
+ }
+
+ if ((fixed_device_size << SECTOR_SHIFT) % alignment) {
+ log_err(cd, _("Device size must be multiple of calculated zone alignment (%zu bytes)."), alignment);
+ return -EINVAL;
+ }
+
+ if (fixed_device_size) {
+ log_dbg(cd, "Switching reencryption to fixed size mode.");
+ device_size = fixed_device_size << SECTOR_SHIFT;
+ rh->fixed_length = true;
+ } else
+ rh->fixed_length = false;
+
+ rh->length = reencrypt_length(cd, &rh->rp, area_length, max_hotzone_size << SECTOR_SHIFT, alignment);
+ if (!rh->length) {
+ log_dbg(cd, "Invalid reencryption length.");
+ return -EINVAL;
+ }
+
+ if (reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) {
+ log_dbg(cd, "Failed to get reencryption offset.");
+ return -EINVAL;
+ }
+
+ if (rh->offset > device_size)
+ return -EINVAL;
+ if (rh->length > device_size - rh->offset)
+ rh->length = device_size - rh->offset;
+
+ _load_backup_segments(hdr, rh);
+
+ r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp_moved_segment, false);
+ if (r < 0)
+ return r;
+
+ if (rh->rp_moved_segment.type == REENC_PROTECTION_NOT_SET)
+ log_dbg(cd, "No moved segment resilience configured.");
+
+ if (rh->direction == CRYPT_REENCRYPT_BACKWARD)
+ rh->progress = device_size - rh->offset - rh->length;
+ else if (rh->jobj_segment_moved && rh->direction == CRYPT_REENCRYPT_FORWARD) {
+ if (rh->offset == json_segment_get_offset(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false))
+ rh->progress = device_size - json_segment_get_size(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false);
+ else
+ rh->progress = rh->offset - json_segment_get_size(rh->jobj_segment_moved, 0);
+ } else
+ rh->progress = rh->offset;
+
+ log_dbg(cd, "reencrypt-direction: %s", rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward");
+ log_dbg(cd, "backup-previous digest id: %d", rh->digest_old);
+ log_dbg(cd, "backup-final digest id: %d", rh->digest_new);
+ log_dbg(cd, "reencrypt length: %" PRIu64, rh->length);
+ log_dbg(cd, "reencrypt offset: %" PRIu64, rh->offset);
+ log_dbg(cd, "reencrypt shift: %s%" PRIu64,
+ (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_BACKWARD ? "-" : ""),
+ data_shift_value(&rh->rp));
+ log_dbg(cd, "reencrypt alignment: %zu", alignment);
+ log_dbg(cd, "reencrypt progress: %" PRIu64, rh->progress);
+
+ rh->device_size = device_size;
+
+ return rh->length < 512 ? -EINVAL : 0;
+}
+
+static size_t reencrypt_buffer_length(struct luks2_reencrypt *rh)
+{
+ if (rh->rp.type == REENC_PROTECTION_DATASHIFT)
+ return data_shift_value(&rh->rp);
+ return rh->length;
+}
+
+static int reencrypt_load_clean(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint64_t device_size,
+ uint64_t max_hotzone_size,
+ uint64_t fixed_device_size,
+ struct luks2_reencrypt **rh)
+{
+ int r;
+ struct luks2_reencrypt *tmp = crypt_zalloc(sizeof (*tmp));
+
+ if (!tmp)
+ return -ENOMEM;
+
+ log_dbg(cd, "Loading stored reencryption context.");
+
+ r = reencrypt_context_init(cd, hdr, tmp, device_size, max_hotzone_size, fixed_device_size);
+ if (r)
+ goto err;
+
+ if (posix_memalign(&tmp->reenc_buffer, device_alignment(crypt_data_device(cd)),
+ reencrypt_buffer_length(tmp))) {
+ r = -ENOMEM;
+ goto err;
+ }
+
+ *rh = tmp;
+
+ return 0;
+err:
+ LUKS2_reencrypt_free(cd, tmp);
+
+ return r;
+}
+
+static int reencrypt_make_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t device_size)
+{
+ int r;
+ uint64_t data_offset = reencrypt_get_data_offset_new(hdr);
+
+ log_dbg(cd, "Calculating segments.");
+
+ r = reencrypt_make_hot_segments(cd, hdr, rh, device_size, data_offset);
+ if (!r) {
+ r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
+ if (r)
+ json_object_put(rh->jobj_segs_hot);
+ }
+
+ if (r)
+ log_dbg(cd, "Failed to make reencryption segments.");
+
+ return r;
+}
+
+static int reencrypt_make_segments_crashed(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh)
+{
+ int r;
+ uint64_t data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
+
+ if (!rh)
+ return -EINVAL;
+
+ rh->jobj_segs_hot = json_object_new_object();
+ if (!rh->jobj_segs_hot)
+ return -ENOMEM;
+
+ json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, val) {
+ if (json_segment_is_backup(val))
+ continue;
+ json_object_object_add(rh->jobj_segs_hot, key, json_object_get(val));
+ }
+
+ r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
+ if (r) {
+ json_object_put(rh->jobj_segs_hot);
+ rh->jobj_segs_hot = NULL;
+ }
+
+ return r;
+}
+
+static int reencrypt_load_crashed(struct crypt_device *cd,
+ struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reencrypt **rh)
+{
+ bool dynamic;
+ uint64_t required_device_size;
+ int r, reenc_seg;
+
+ if (LUKS2_get_data_size(hdr, &required_device_size, &dynamic))
+ return -EINVAL;
+
+ if (dynamic)
+ required_device_size = 0;
+ else
+ required_device_size >>= SECTOR_SHIFT;
+
+ r = reencrypt_load_clean(cd, hdr, device_size, 0, required_device_size, rh);
+
+ if (!r) {
+ reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
+ if (reenc_seg < 0)
+ r = -EINVAL;
+ else
+ (*rh)->length = LUKS2_segment_size(hdr, reenc_seg, 0);
+ }
+
+ if (!r)
+ r = reencrypt_make_segments_crashed(cd, hdr, *rh);
+
+ if (r) {
+ LUKS2_reencrypt_free(cd, *rh);
+ *rh = NULL;
+ }
+ return r;
+}
+
+static int reencrypt_init_storage_wrappers(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ struct volume_key *vks)
+{
+ int r;
+ struct volume_key *vk;
+ uint32_t wrapper_flags = (getuid() || geteuid()) ? 0 : DISABLE_KCAPI;
+
+ vk = crypt_volume_key_by_id(vks, rh->digest_old);
+ r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
+ reencrypt_get_data_offset_old(hdr),
+ crypt_get_iv_offset(cd),
+ reencrypt_get_sector_size_old(hdr),
+ reencrypt_segment_cipher_old(hdr),
+ vk, wrapper_flags | OPEN_READONLY);
+ if (r) {
+ log_err(cd, _("Failed to initialize old segment storage wrapper."));
+ return r;
+ }
+ rh->wflags1 = wrapper_flags | OPEN_READONLY;
+ log_dbg(cd, "Old cipher storage wrapper type: %d.", crypt_storage_wrapper_get_type(rh->cw1));
+
+ vk = crypt_volume_key_by_id(vks, rh->digest_new);
+ r = crypt_storage_wrapper_init(cd, &rh->cw2, crypt_data_device(cd),
+ reencrypt_get_data_offset_new(hdr),
+ crypt_get_iv_offset(cd),
+ reencrypt_get_sector_size_new(hdr),
+ reencrypt_segment_cipher_new(hdr),
+ vk, wrapper_flags);
+ if (r) {
+ log_err(cd, _("Failed to initialize new segment storage wrapper."));
+ return r;
+ }
+ rh->wflags2 = wrapper_flags;
+ log_dbg(cd, "New cipher storage wrapper type: %d", crypt_storage_wrapper_get_type(rh->cw2));
+
+ return 0;
+}
+
+static int reencrypt_context_set_names(struct luks2_reencrypt *rh, const char *name)
+{
+ if (!rh | !name)
+ return -EINVAL;
+
+ if (*name == '/') {
+ if (!(rh->device_name = dm_device_name(name)))
+ return -EINVAL;
+ } else if (!(rh->device_name = strdup(name)))
+ return -ENOMEM;
+
+ if (asprintf(&rh->hotzone_name, "%s-hotzone-%s", rh->device_name,
+ rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward") < 0) {
+ rh->hotzone_name = NULL;
+ return -ENOMEM;
+ }
+ if (asprintf(&rh->overlay_name, "%s-overlay", rh->device_name) < 0) {
+ rh->overlay_name = NULL;
+ return -ENOMEM;
+ }
+
+ rh->online = true;
+ return 0;
+}
+
+static int modify_offset(uint64_t *offset, uint64_t data_shift, crypt_reencrypt_direction_info di)
+{
+ int r = -EINVAL;
+
+ if (!offset)
+ return r;
+
+ if (di == CRYPT_REENCRYPT_FORWARD) {
+ if (*offset >= data_shift) {
+ *offset -= data_shift;
+ r = 0;
+ }
+ } else if (di == CRYPT_REENCRYPT_BACKWARD) {
+ *offset += data_shift;
+ r = 0;
+ }
+
+ return r;
+}
+
+static int reencrypt_update_flag(struct crypt_device *cd, uint8_t version,
+ bool enable, bool commit)
+{
+ uint32_t reqs;
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+
+ if (enable) {
+ log_dbg(cd, "Going to store reencryption requirement flag (version: %u).", version);
+ return LUKS2_config_set_requirement_version(cd, hdr, CRYPT_REQUIREMENT_ONLINE_REENCRYPT, version, commit);
+ }
+
+ if (LUKS2_config_get_requirements(cd, hdr, &reqs))
+ return -EINVAL;
+
+ reqs &= ~CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
+
+ log_dbg(cd, "Going to wipe reencryption requirement flag.");
+
+ return LUKS2_config_set_requirements(cd, hdr, reqs, commit);
+}
+
+static int reencrypt_hotzone_protect_ready(struct crypt_device *cd,
+ struct reenc_protection *rp)
+{
+ assert(rp);
+
+ if (rp->type == REENC_PROTECTION_NOT_SET)
+ return -EINVAL;
+
+ if (rp->type != REENC_PROTECTION_CHECKSUM)
+ return 0;
+
+ if (!rp->p.csum.checksums) {
+ log_dbg(cd, "Allocating buffer for storing resilience checksums.");
+ if (posix_memalign(&rp->p.csum.checksums, device_alignment(crypt_metadata_device(cd)),
+ rp->p.csum.checksums_len))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int reencrypt_recover_segment(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ struct volume_key *vks)
+{
+ struct volume_key *vk_old, *vk_new;
+ size_t count, s;
+ ssize_t read, w;
+ struct reenc_protection *rp;
+ int devfd, r, new_sector_size, old_sector_size, rseg;
+ uint64_t area_offset, area_length, area_length_read, crash_iv_offset,
+ data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
+ char *checksum_tmp = NULL, *data_buffer = NULL;
+ struct crypt_storage_wrapper *cw1 = NULL, *cw2 = NULL;
+
+ assert(hdr);
+ assert(rh);
+ assert(vks);
+
+ rseg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
+ if (rh->offset == 0 && rh->rp_moved_segment.type > REENC_PROTECTION_NOT_SET) {
+ log_dbg(cd, "Recovery using moved segment protection.");
+ rp = &rh->rp_moved_segment;
+ } else
+ rp = &rh->rp;
+
+ if (rseg < 0 || rh->length < 512)
+ return -EINVAL;
+
+ r = reencrypt_hotzone_protect_ready(cd, rp);
+ if (r) {
+ log_err(cd, _("Failed to initialize hotzone protection."));
+ return -EINVAL;
+ }
+
+ vk_new = crypt_volume_key_by_id(vks, rh->digest_new);
+ if (!vk_new && rh->mode != CRYPT_REENCRYPT_DECRYPT)
+ return -EINVAL;
+ vk_old = crypt_volume_key_by_id(vks, rh->digest_old);
+ if (!vk_old && rh->mode != CRYPT_REENCRYPT_ENCRYPT)
+ return -EINVAL;
+ old_sector_size = json_segment_get_sector_size(reencrypt_segment_old(hdr));
+ new_sector_size = json_segment_get_sector_size(reencrypt_segment_new(hdr));
+ if (rh->mode == CRYPT_REENCRYPT_DECRYPT)
+ crash_iv_offset = rh->offset >> SECTOR_SHIFT; /* TODO: + old iv_tweak */
+ else
+ crash_iv_offset = json_segment_get_iv_offset(json_segments_get_segment(rh->jobj_segs_hot, rseg));
+
+ log_dbg(cd, "crash_offset: %" PRIu64 ", crash_length: %" PRIu64 ", crash_iv_offset: %" PRIu64,
+ data_offset + rh->offset, rh->length, crash_iv_offset);
+
+ r = crypt_storage_wrapper_init(cd, &cw2, crypt_data_device(cd),
+ data_offset + rh->offset, crash_iv_offset, new_sector_size,
+ reencrypt_segment_cipher_new(hdr), vk_new, 0);
+ if (r) {
+ log_err(cd, _("Failed to initialize new segment storage wrapper."));
+ return r;
+ }
+
+ if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &area_offset, &area_length)) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (posix_memalign((void**)&data_buffer, device_alignment(crypt_data_device(cd)), rh->length)) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ switch (rp->type) {
+ case REENC_PROTECTION_CHECKSUM:
+ log_dbg(cd, "Checksums based recovery.");
+
+ r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
+ data_offset + rh->offset, crash_iv_offset, old_sector_size,
+ reencrypt_segment_cipher_old(hdr), vk_old, 0);
+ if (r) {
+ log_err(cd, _("Failed to initialize old segment storage wrapper."));
+ goto out;
+ }
+
+ count = rh->length / rp->p.csum.block_size;
+ area_length_read = count * rp->p.csum.hash_size;
+ if (area_length_read > area_length) {
+ log_dbg(cd, "Internal error in calculated area_length.");
+ r = -EINVAL;
+ goto out;
+ }
+
+ checksum_tmp = malloc(rp->p.csum.hash_size);
+ if (!checksum_tmp) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ /* TODO: lock for read */
+ devfd = device_open(cd, crypt_metadata_device(cd), O_RDONLY);
+ if (devfd < 0)
+ goto out;
+
+ /* read old data checksums */
+ read = read_lseek_blockwise(devfd, device_block_size(cd, crypt_metadata_device(cd)),
+ device_alignment(crypt_metadata_device(cd)), rp->p.csum.checksums, area_length_read, area_offset);
+ if (read < 0 || (size_t)read != area_length_read) {
+ log_err(cd, _("Failed to read checksums for current hotzone."));
+ r = -EINVAL;
+ goto out;
+ }
+
+ read = crypt_storage_wrapper_read(cw2, 0, data_buffer, rh->length);
+ if (read < 0 || (size_t)read != rh->length) {
+ log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset + data_offset);
+ r = -EINVAL;
+ goto out;
+ }
+
+ for (s = 0; s < count; s++) {
+ if (crypt_hash_write(rp->p.csum.ch, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) {
+ log_dbg(cd, "Failed to write hash.");
+ r = EINVAL;
+ goto out;
+ }
+ if (crypt_hash_final(rp->p.csum.ch, checksum_tmp, rp->p.csum.hash_size)) {
+ log_dbg(cd, "Failed to finalize hash.");
+ r = EINVAL;
+ goto out;
+ }
+ if (!memcmp(checksum_tmp, (char *)rp->p.csum.checksums + (s * rp->p.csum.hash_size), rp->p.csum.hash_size)) {
+ log_dbg(cd, "Sector %zu (size %zu, offset %zu) needs recovery", s, rp->p.csum.block_size, s * rp->p.csum.block_size);
+ if (crypt_storage_wrapper_decrypt(cw1, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) {
+ log_err(cd, _("Failed to decrypt sector %zu."), s);
+ r = -EINVAL;
+ goto out;
+ }
+ w = crypt_storage_wrapper_encrypt_write(cw2, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size);
+ if (w < 0 || (size_t)w != rp->p.csum.block_size) {
+ log_err(cd, _("Failed to recover sector %zu."), s);
+ r = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ r = 0;
+ break;
+ case REENC_PROTECTION_JOURNAL:
+ log_dbg(cd, "Journal based recovery.");
+
+ /* FIXME: validation candidate */
+ if (rh->length > area_length) {
+ r = -EINVAL;
+ log_dbg(cd, "Invalid journal size.");
+ goto out;
+ }
+
+ /* TODO locking */
+ r = crypt_storage_wrapper_init(cd, &cw1, crypt_metadata_device(cd),
+ area_offset, crash_iv_offset, old_sector_size,
+ reencrypt_segment_cipher_old(hdr), vk_old, 0);
+ if (r) {
+ log_err(cd, _("Failed to initialize old segment storage wrapper."));
+ goto out;
+ }
+ read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
+ if (read < 0 || (size_t)read != rh->length) {
+ log_dbg(cd, "Failed to read journaled data.");
+ r = -EIO;
+ /* may content plaintext */
+ crypt_safe_memzero(data_buffer, rh->length);
+ goto out;
+ }
+ read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
+ /* may content plaintext */
+ crypt_safe_memzero(data_buffer, rh->length);
+ if (read < 0 || (size_t)read != rh->length) {
+ log_dbg(cd, "recovery write failed.");
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = 0;
+ break;
+ case REENC_PROTECTION_DATASHIFT:
+ log_dbg(cd, "Data shift based recovery.");
+
+ if (rseg == 0) {
+ r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
+ json_segment_get_offset(rh->jobj_segment_moved, 0), 0,
+ reencrypt_get_sector_size_old(hdr),
+ reencrypt_segment_cipher_old(hdr), vk_old, 0);
+ } else {
+ if (rh->direction == CRYPT_REENCRYPT_FORWARD)
+ data_offset = data_offset + rh->offset + data_shift_value(rp);
+ else
+ data_offset = data_offset + rh->offset - data_shift_value(rp);
+ r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
+ data_offset,
+ crash_iv_offset,
+ reencrypt_get_sector_size_old(hdr),
+ reencrypt_segment_cipher_old(hdr), vk_old, 0);
+ }
+ if (r) {
+ log_err(cd, _("Failed to initialize old segment storage wrapper."));
+ goto out;
+ }
+
+ read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
+ if (read < 0 || (size_t)read != rh->length) {
+ log_dbg(cd, "Failed to read data.");
+ r = -EIO;
+ /* may content plaintext */
+ crypt_safe_memzero(data_buffer, rh->length);
+ goto out;
+ }
+
+ read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
+ /* may content plaintext */
+ crypt_safe_memzero(data_buffer, rh->length);
+ if (read < 0 || (size_t)read != rh->length) {
+ log_dbg(cd, "recovery write failed.");
+ r = -EINVAL;
+ goto out;
+ }
+ r = 0;
+ break;
+ default:
+ r = -EINVAL;
+ }
+
+ if (!r)
+ rh->read = rh->length;
+out:
+ free(data_buffer);
+ free(checksum_tmp);
+ crypt_storage_wrapper_destroy(cw1);
+ crypt_storage_wrapper_destroy(cw2);
+
+ return r;
+}
+
+static int reencrypt_add_moved_segment(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
+{
+ int digest = rh->digest_old, s = LUKS2_segment_first_unused_id(hdr);
+
+ if (!rh->jobj_segment_moved)
+ return 0;
+
+ if (s < 0)
+ return s;
+
+ if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(rh->jobj_segment_moved))) {
+ json_object_put(rh->jobj_segment_moved);
+ return -EINVAL;
+ }
+
+ if (!strcmp(json_segment_type(rh->jobj_segment_moved), "crypt"))
+ return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
+
+ return 0;
+}
+
+static int reencrypt_add_backup_segment(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ unsigned final)
+{
+ int digest, s = LUKS2_segment_first_unused_id(hdr);
+ json_object *jobj;
+
+ if (s < 0)
+ return s;
+
+ digest = final ? rh->digest_new : rh->digest_old;
+ jobj = final ? rh->jobj_segment_new : rh->jobj_segment_old;
+
+ if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(jobj))) {
+ json_object_put(jobj);
+ return -EINVAL;
+ }
+
+ if (strcmp(json_segment_type(jobj), "crypt"))
+ return 0;
+
+ return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
+}
+
+static int reencrypt_assign_segments_simple(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ unsigned hot,
+ unsigned commit)
+{
+ int r, sg;
+
+ if (hot && json_segments_count(rh->jobj_segs_hot) > 0) {
+ log_dbg(cd, "Setting 'hot' segments.");
+
+ r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
+ if (!r)
+ rh->jobj_segs_hot = NULL;
+ } else if (!hot && json_segments_count(rh->jobj_segs_post) > 0) {
+ log_dbg(cd, "Setting 'post' segments.");
+ r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
+ if (!r)
+ rh->jobj_segs_post = NULL;
+ } else {
+ log_dbg(cd, "No segments to set.");
+ return -EINVAL;
+ }
+
+ if (r) {
+ log_dbg(cd, "Failed to assign new enc segments.");
+ return r;
+ }
+
+ r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
+ if (r) {
+ log_dbg(cd, "Failed to assign reencryption previous backup segment.");
+ return r;
+ }
+
+ r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
+ if (r) {
+ log_dbg(cd, "Failed to assign reencryption final backup segment.");
+ return r;
+ }
+
+ r = reencrypt_add_moved_segment(cd, hdr, rh);
+ if (r) {
+ log_dbg(cd, "Failed to assign reencryption moved backup segment.");
+ return r;
+ }
+
+ for (sg = 0; sg < LUKS2_segments_count(hdr); sg++) {
+ if (LUKS2_segment_is_type(hdr, sg, "crypt") &&
+ LUKS2_digest_segment_assign(cd, hdr, sg, rh->mode == CRYPT_REENCRYPT_ENCRYPT ? rh->digest_new : rh->digest_old, 1, 0)) {
+ log_dbg(cd, "Failed to assign digest %u to segment %u.", rh->digest_new, sg);
+ return -EINVAL;
+ }
+ }
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+static int reencrypt_assign_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ unsigned hot,
+ unsigned commit)
+{
+ bool forward;
+ int rseg, scount, r = -EINVAL;
+
+ /* FIXME: validate in reencrypt context load */
+ if (rh->digest_new < 0 && rh->mode != CRYPT_REENCRYPT_DECRYPT)
+ return -EINVAL;
+
+ if (LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0))
+ return -EINVAL;
+
+ if (rh->mode == CRYPT_REENCRYPT_ENCRYPT || rh->mode == CRYPT_REENCRYPT_DECRYPT)
+ return reencrypt_assign_segments_simple(cd, hdr, rh, hot, commit);
+
+ if (hot && rh->jobj_segs_hot) {
+ log_dbg(cd, "Setting 'hot' segments.");
+
+ r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
+ if (!r)
+ rh->jobj_segs_hot = NULL;
+ } else if (!hot && rh->jobj_segs_post) {
+ log_dbg(cd, "Setting 'post' segments.");
+ r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
+ if (!r)
+ rh->jobj_segs_post = NULL;
+ }
+
+ if (r)
+ return r;
+
+ scount = LUKS2_segments_count(hdr);
+
+ /* segment in reencryption has to hold reference on both digests */
+ rseg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
+ if (rseg < 0 && hot)
+ return -EINVAL;
+
+ if (rseg >= 0) {
+ LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_new, 1, 0);
+ LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_old, 1, 0);
+ }
+
+ forward = (rh->direction == CRYPT_REENCRYPT_FORWARD);
+ if (hot) {
+ if (rseg > 0)
+ LUKS2_digest_segment_assign(cd, hdr, 0, forward ? rh->digest_new : rh->digest_old, 1, 0);
+ if (scount > rseg + 1)
+ LUKS2_digest_segment_assign(cd, hdr, rseg + 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
+ } else {
+ LUKS2_digest_segment_assign(cd, hdr, 0, forward || scount == 1 ? rh->digest_new : rh->digest_old, 1, 0);
+ if (scount > 1)
+ LUKS2_digest_segment_assign(cd, hdr, 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
+ }
+
+ r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
+ if (r) {
+ log_dbg(cd, "Failed to assign hot reencryption backup segment.");
+ return r;
+ }
+ r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
+ if (r) {
+ log_dbg(cd, "Failed to assign post reencryption backup segment.");
+ return r;
+ }
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+static int reencrypt_set_encrypt_segments(struct crypt_device *cd, struct luks2_hdr *hdr,
+ uint64_t dev_size, uint64_t data_shift, bool move_first_segment,
+ crypt_reencrypt_direction_info di)
+{
+ int r;
+ uint64_t first_segment_offset, first_segment_length,
+ second_segment_offset, second_segment_length,
+ data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT,
+ data_size = dev_size - data_shift;
+ json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
+
+ if (dev_size < data_shift)
+ return -EINVAL;
+
+ if (data_shift && (di == CRYPT_REENCRYPT_FORWARD))
+ return -ENOTSUP;
+
+ if (move_first_segment) {
+ /*
+ * future data_device layout:
+ * [future LUKS2 header (data shift size)][second data segment][gap (data shift size)][first data segment (data shift size)]
+ */
+ first_segment_offset = dev_size;
+ if (data_size < data_shift) {
+ first_segment_length = data_size;
+ second_segment_length = second_segment_offset = 0;
+ } else {
+ first_segment_length = data_shift;
+ second_segment_offset = data_shift;
+ second_segment_length = data_size - data_shift;
+ }
+ } else if (data_shift) {
+ first_segment_offset = data_offset;
+ first_segment_length = dev_size;
+ } else {
+ /* future data_device layout with detached header: [first data segment] */
+ first_segment_offset = data_offset;
+ first_segment_length = 0; /* dynamic */
+ }
+
+ jobj_segments = json_object_new_object();
+ if (!jobj_segments)
+ return -ENOMEM;
+
+ r = -EINVAL;
+ if (move_first_segment) {
+ jobj_segment_first = json_segment_create_linear(first_segment_offset, &first_segment_length, 0);
+ if (second_segment_length &&
+ !(jobj_segment_second = json_segment_create_linear(second_segment_offset, &second_segment_length, 0))) {
+ log_dbg(cd, "Failed generate 2nd segment.");
+ return r;
+ }
+ } else
+ jobj_segment_first = json_segment_create_linear(first_segment_offset, first_segment_length ? &first_segment_length : NULL, 0);
+
+ if (!jobj_segment_first) {
+ log_dbg(cd, "Failed generate 1st segment.");
+ return r;
+ }
+
+ json_object_object_add(jobj_segments, "0", jobj_segment_first);
+ if (jobj_segment_second)
+ json_object_object_add(jobj_segments, "1", jobj_segment_second);
+
+ r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
+
+ return r ?: LUKS2_segments_set(cd, hdr, jobj_segments, 0);
+}
+
+static int reencrypt_set_decrypt_shift_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint64_t dev_size,
+ uint64_t moved_segment_length,
+ crypt_reencrypt_direction_info di)
+{
+ int r;
+ uint64_t first_segment_offset, first_segment_length,
+ second_segment_offset, second_segment_length,
+ data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
+ json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
+
+ if (di == CRYPT_REENCRYPT_BACKWARD)
+ return -ENOTSUP;
+
+ /*
+ * future data_device layout:
+ * [encrypted first segment (max data shift size)][gap (data shift size)][second encrypted data segment]
+ */
+ first_segment_offset = 0;
+ first_segment_length = moved_segment_length;
+ if (dev_size > moved_segment_length) {
+ second_segment_offset = data_offset + first_segment_length;
+ second_segment_length = 0;
+ }
+
+ jobj_segments = json_object_new_object();
+ if (!jobj_segments)
+ return -ENOMEM;
+
+ r = -EINVAL;
+ jobj_segment_first = json_segment_create_crypt(first_segment_offset,
+ crypt_get_iv_offset(cd), &first_segment_length,
+ crypt_get_cipher_spec(cd), crypt_get_sector_size(cd), 0);
+
+ if (!jobj_segment_first) {
+ log_dbg(cd, "Failed generate 1st segment.");
+ return r;
+ }
+
+ if (dev_size > moved_segment_length) {
+ jobj_segment_second = json_segment_create_crypt(second_segment_offset,
+ crypt_get_iv_offset(cd) + (first_segment_length >> SECTOR_SHIFT),
+ second_segment_length ? &second_segment_length : NULL,
+ crypt_get_cipher_spec(cd),
+ crypt_get_sector_size(cd), 0);
+ if (!jobj_segment_second) {
+ json_object_put(jobj_segment_first);
+ log_dbg(cd, "Failed generate 2nd segment.");
+ return r;
+ }
+ }
+
+ json_object_object_add(jobj_segments, "0", jobj_segment_first);
+ if (jobj_segment_second)
+ json_object_object_add(jobj_segments, "1", jobj_segment_second);
+
+ r = LUKS2_segments_set(cd, hdr, jobj_segments, 0);
+
+ return r ?: LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, 0, 1, 0);
+}
+
+static int reencrypt_make_targets(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct device *hz_device,
+ struct volume_key *vks,
+ struct dm_target *result,
+ uint64_t size)
+{
+ bool reenc_seg;
+ struct volume_key *vk;
+ uint64_t segment_size, segment_offset, segment_start = 0;
+ int r;
+ int s = 0;
+ json_object *jobj, *jobj_segments = LUKS2_get_segments_jobj(hdr);
+
+ while (result) {
+ jobj = json_segments_get_segment(jobj_segments, s);
+ if (!jobj) {
+ log_dbg(cd, "Internal error. Segment %u is null.", s);
+ return -EINVAL;
+ }
+
+ reenc_seg = (s == json_segments_segment_in_reencrypt(jobj_segments));
+
+ segment_offset = json_segment_get_offset(jobj, 1);
+ segment_size = json_segment_get_size(jobj, 1);
+ /* 'dynamic' length allowed in last segment only */
+ if (!segment_size && !result->next)
+ segment_size = (size >> SECTOR_SHIFT) - segment_start;
+ if (!segment_size) {
+ log_dbg(cd, "Internal error. Wrong segment size %u", s);
+ return -EINVAL;
+ }
+
+ if (reenc_seg)
+ segment_offset -= crypt_get_data_offset(cd);
+
+ if (!strcmp(json_segment_type(jobj), "crypt")) {
+ vk = crypt_volume_key_by_id(vks, reenc_seg ? LUKS2_reencrypt_digest_new(hdr) : LUKS2_digest_by_segment(hdr, s));
+ if (!vk) {
+ log_err(cd, _("Missing key for dm-crypt segment %u"), s);
+ return -EINVAL;
+ }
+
+ r = dm_crypt_target_set(result, segment_start, segment_size,
+ reenc_seg ? hz_device : crypt_data_device(cd),
+ vk,
+ json_segment_get_cipher(jobj),
+ json_segment_get_iv_offset(jobj),
+ segment_offset,
+ "none",
+ 0,
+ json_segment_get_sector_size(jobj));
+ if (r) {
+ log_err(cd, _("Failed to set dm-crypt segment."));
+ return r;
+ }
+ } else if (!strcmp(json_segment_type(jobj), "linear")) {
+ r = dm_linear_target_set(result, segment_start, segment_size, reenc_seg ? hz_device : crypt_data_device(cd), segment_offset);
+ if (r) {
+ log_err(cd, _("Failed to set dm-linear segment."));
+ return r;
+ }
+ } else
+ return EINVAL;
+
+ segment_start += segment_size;
+ s++;
+ result = result->next;
+ }
+
+ return s;
+}
+
+/* GLOBAL FIXME: audit function names and parameters names */
+
+/* FIXME:
+ * 1) audit log routines
+ * 2) can't we derive hotzone device name from crypt context? (unlocked name, device uuid, etc?)
+ */
+static int reencrypt_load_overlay_device(struct crypt_device *cd, struct luks2_hdr *hdr,
+ const char *overlay, const char *hotzone, struct volume_key *vks, uint64_t size,
+ uint32_t flags)
+{
+ char hz_path[PATH_MAX];
+ int r;
+
+ struct device *hz_dev = NULL;
+ struct crypt_dm_active_device dmd = {
+ .flags = flags,
+ };
+
+ log_dbg(cd, "Loading new table for overlay device %s.", overlay);
+
+ r = snprintf(hz_path, PATH_MAX, "%s/%s", dm_get_dir(), hotzone);
+ if (r < 0 || r >= PATH_MAX) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = device_alloc(cd, &hz_dev, hz_path);
+ if (r)
+ goto out;
+
+ r = dm_targets_allocate(&dmd.segment, LUKS2_segments_count(hdr));
+ if (r)
+ goto out;
+
+ r = reencrypt_make_targets(cd, hdr, hz_dev, vks, &dmd.segment, size);
+ if (r < 0)
+ goto out;
+
+ r = dm_reload_device(cd, overlay, &dmd, 0, 0);
+
+ /* what else on error here ? */
+out:
+ dm_targets_free(cd, &dmd);
+ device_free(cd, hz_dev);
+
+ return r;
+}
+
+static int reencrypt_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags)
+{
+ int r, exists = 1;
+ struct crypt_dm_active_device dmd_source, dmd_target = {};
+ uint32_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH;
+
+ log_dbg(cd, "Replacing table in device %s with table from device %s.", target, source);
+
+ /* check only whether target device exists */
+ r = dm_status_device(cd, target);
+ if (r < 0) {
+ if (r == -ENODEV)
+ exists = 0;
+ else
+ return r;
+ }
+
+ r = dm_query_device(cd, source, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
+ DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmd_source);
+
+ if (r < 0)
+ return r;
+
+ if (exists && ((r = dm_query_device(cd, target, 0, &dmd_target)) < 0))
+ goto out;
+
+ dmd_source.flags |= flags;
+ dmd_source.uuid = crypt_get_uuid(cd);
+
+ if (exists) {
+ if (dmd_target.size != dmd_source.size) {
+ log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."),
+ dmd_source.size, dmd_target.size);
+ r = -EINVAL;
+ goto out;
+ }
+ r = dm_reload_device(cd, target, &dmd_source, 0, 0);
+ if (!r) {
+ log_dbg(cd, "Resuming device %s", target);
+ r = dm_resume_device(cd, target, dmflags | act2dmflags(dmd_source.flags));
+ }
+ } else
+ r = dm_create_device(cd, target, CRYPT_SUBDEV, &dmd_source);
+out:
+ dm_targets_free(cd, &dmd_source);
+ dm_targets_free(cd, &dmd_target);
+
+ return r;
+}
+
+static int reencrypt_swap_backing_device(struct crypt_device *cd, const char *name,
+ const char *new_backend_name)
+{
+ int r;
+ struct device *overlay_dev = NULL;
+ char overlay_path[PATH_MAX] = { 0 };
+ struct crypt_dm_active_device dmd = {};
+
+ log_dbg(cd, "Redirecting %s mapping to new backing device: %s.", name, new_backend_name);
+
+ r = snprintf(overlay_path, PATH_MAX, "%s/%s", dm_get_dir(), new_backend_name);
+ if (r < 0 || r >= PATH_MAX) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = device_alloc(cd, &overlay_dev, overlay_path);
+ if (r)
+ goto out;
+
+ r = device_block_adjust(cd, overlay_dev, DEV_OK,
+ 0, &dmd.size, &dmd.flags);
+ if (r)
+ goto out;
+
+ r = dm_linear_target_set(&dmd.segment, 0, dmd.size, overlay_dev, 0);
+ if (r)
+ goto out;
+
+ r = dm_reload_device(cd, name, &dmd, 0, 0);
+ if (!r) {
+ log_dbg(cd, "Resuming device %s", name);
+ r = dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
+ }
+
+out:
+ dm_targets_free(cd, &dmd);
+ device_free(cd, overlay_dev);
+
+ return r;
+}
+
+static int reencrypt_activate_hotzone_device(struct crypt_device *cd, const char *name, uint64_t device_size, uint32_t flags)
+{
+ int r;
+ uint64_t new_offset = reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT;
+
+ struct crypt_dm_active_device dmd = {
+ .flags = flags,
+ .uuid = crypt_get_uuid(cd),
+ .size = device_size >> SECTOR_SHIFT
+ };
+
+ log_dbg(cd, "Activating hotzone device %s.", name);
+
+ r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
+ new_offset, &dmd.size, &dmd.flags);
+ if (r)
+ goto out;
+
+ r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), new_offset);
+ if (r)
+ goto out;
+
+ r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd);
+out:
+ dm_targets_free(cd, &dmd);
+
+ return r;
+}
+
+static int reencrypt_init_device_stack(struct crypt_device *cd,
+ const struct luks2_reencrypt *rh)
+{
+ int r;
+
+ /* Activate hotzone device 1:1 linear mapping to data_device */
+ r = reencrypt_activate_hotzone_device(cd, rh->hotzone_name, rh->device_size, CRYPT_ACTIVATE_PRIVATE);
+ if (r) {
+ log_err(cd, _("Failed to activate hotzone device %s."), rh->hotzone_name);
+ return r;
+ }
+
+ /*
+ * Activate overlay device with exactly same table as original 'name' mapping.
+ * Note that within this step the 'name' device may already include a table
+ * constructed from more than single dm-crypt segment. Therefore transfer
+ * mapping as is.
+ *
+ * If we're about to resume reencryption orig mapping has to be already validated for
+ * abrupt shutdown and rchunk_offset has to point on next chunk to reencrypt!
+ *
+ * TODO: in crypt_activate_by*
+ */
+ r = reencrypt_replace_device(cd, rh->overlay_name, rh->device_name, CRYPT_ACTIVATE_PRIVATE);
+ if (r) {
+ log_err(cd, _("Failed to activate overlay device %s with actual origin table."), rh->overlay_name);
+ goto err;
+ }
+
+ /* swap origin mapping to overlay device */
+ r = reencrypt_swap_backing_device(cd, rh->device_name, rh->overlay_name);
+ if (r) {
+ log_err(cd, _("Failed to load new mapping for device %s."), rh->device_name);
+ goto err;
+ }
+
+ /*
+ * Now the 'name' (unlocked luks) device is mapped via dm-linear to an overlay dev.
+ * The overlay device has a original live table of 'name' device in-before the swap.
+ */
+
+ return 0;
+err:
+ /* TODO: force error helper devices on error path */
+ dm_remove_device(cd, rh->overlay_name, 0);
+ dm_remove_device(cd, rh->hotzone_name, 0);
+
+ return r;
+}
+
+/* TODO:
+ * 1) audit error path. any error in this routine is fatal and should be unlikely.
+ * usually it would hint some collision with another userspace process touching
+ * dm devices directly.
+ */
+static int reenc_refresh_helper_devices(struct crypt_device *cd, const char *overlay, const char *hotzone)
+{
+ int r;
+
+ /*
+ * we have to explicitly suspend the overlay device before suspending
+ * the hotzone one. Resuming overlay device (aka switching tables) only
+ * after suspending the hotzone may lead to deadlock.
+ *
+ * In other words: always suspend the stack from top to bottom!
+ */
+ r = dm_suspend_device(cd, overlay, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
+ if (r) {
+ log_err(cd, _("Failed to suspend device %s."), overlay);
+ return r;
+ }
+
+ /* suspend HZ device */
+ r = dm_suspend_device(cd, hotzone, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
+ if (r) {
+ log_err(cd, _("Failed to suspend device %s."), hotzone);
+ return r;
+ }
+
+ /* resume overlay device: inactive table (with hotozne) -> live */
+ r = dm_resume_device(cd, overlay, DM_RESUME_PRIVATE);
+ if (r)
+ log_err(cd, _("Failed to resume device %s."), overlay);
+
+ return r;
+}
+
+static int reencrypt_refresh_overlay_devices(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const char *overlay,
+ const char *hotzone,
+ struct volume_key *vks,
+ uint64_t device_size,
+ uint32_t flags)
+{
+ int r = reencrypt_load_overlay_device(cd, hdr, overlay, hotzone, vks, device_size, flags);
+ if (r) {
+ log_err(cd, _("Failed to reload device %s."), overlay);
+ return REENC_ERR;
+ }
+
+ r = reenc_refresh_helper_devices(cd, overlay, hotzone);
+ if (r) {
+ log_err(cd, _("Failed to refresh reencryption devices stack."));
+ return REENC_ROLLBACK;
+ }
+
+ return REENC_OK;
+}
+
+static int reencrypt_move_data(struct crypt_device *cd,
+ int devfd,
+ uint64_t data_shift,
+ crypt_reencrypt_mode_info mode)
+{
+ void *buffer;
+ int r;
+ ssize_t ret;
+ uint64_t buffer_len, offset,
+ read_offset = (mode == CRYPT_REENCRYPT_ENCRYPT ? 0 : data_shift);
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+
+ offset = json_segment_get_offset(LUKS2_get_segment_jobj(hdr, 0), 0);
+ buffer_len = json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0);
+ if (!buffer_len || buffer_len > data_shift)
+ return -EINVAL;
+
+ if (posix_memalign(&buffer, device_alignment(crypt_data_device(cd)), buffer_len))
+ return -ENOMEM;
+
+ ret = read_lseek_blockwise(devfd,
+ device_block_size(cd, crypt_data_device(cd)),
+ device_alignment(crypt_data_device(cd)),
+ buffer, buffer_len, read_offset);
+ if (ret < 0 || (uint64_t)ret != buffer_len) {
+ log_dbg(cd, "Failed to read data at offset %" PRIu64 " (size: %zu)",
+ read_offset, buffer_len);
+ r = -EIO;
+ goto out;
+ }
+
+ log_dbg(cd, "Going to write %" PRIu64 " bytes read at offset %" PRIu64 " to new offset %" PRIu64,
+ buffer_len, read_offset, offset);
+ ret = write_lseek_blockwise(devfd,
+ device_block_size(cd, crypt_data_device(cd)),
+ device_alignment(crypt_data_device(cd)),
+ buffer, buffer_len, offset);
+ if (ret < 0 || (uint64_t)ret != buffer_len) {
+ log_dbg(cd, "Failed to write data at offset %" PRIu64 " (size: %zu)",
+ offset, buffer_len);
+ r = -EIO;
+ goto out;
+ }
+
+ r = 0;
+out:
+ crypt_safe_memzero(buffer, buffer_len);
+ free(buffer);
+ return r;
+}
+
+static int reencrypt_make_backup_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot_new,
+ const char *cipher,
+ uint64_t data_offset,
+ const struct crypt_params_reencrypt *params)
+{
+ int r, segment, moved_segment = -1, digest_old = -1, digest_new = -1;
+ json_object *jobj_tmp, *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL;
+ uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
+ uint64_t segment_offset, tmp, data_shift = params->data_shift << SECTOR_SHIFT,
+ device_size = params->device_size << SECTOR_SHIFT;
+
+ if (params->mode != CRYPT_REENCRYPT_DECRYPT) {
+ digest_new = LUKS2_digest_by_keyslot(hdr, keyslot_new);
+ if (digest_new < 0)
+ return -EINVAL;
+ }
+
+ if (params->mode != CRYPT_REENCRYPT_ENCRYPT) {
+ digest_old = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT);
+ if (digest_old < 0)
+ return -EINVAL;
+ }
+
+ segment = LUKS2_segment_first_unused_id(hdr);
+ if (segment < 0)
+ return -EINVAL;
+
+ if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) {
+ if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_segment_bcp)) {
+ r = -EINVAL;
+ goto err;
+ }
+ r = LUKS2_segment_set_flag(jobj_segment_bcp, "backup-moved-segment");
+ if (r)
+ goto err;
+ moved_segment = segment++;
+ json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), moved_segment, jobj_segment_bcp);
+ if (!strcmp(json_segment_type(jobj_segment_bcp), "crypt"))
+ LUKS2_digest_segment_assign(cd, hdr, moved_segment, digest_old, 1, 0);
+ }
+
+ /* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */
+ if (digest_old >= 0) {
+ if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) {
+ jobj_tmp = LUKS2_get_segment_jobj(hdr, 0);
+ if (!jobj_tmp) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ jobj_segment_old = json_segment_create_crypt(data_offset,
+ json_segment_get_iv_offset(jobj_tmp),
+ device_size ? &device_size : NULL,
+ json_segment_get_cipher(jobj_tmp),
+ json_segment_get_sector_size(jobj_tmp),
+ 0);
+ } else {
+ if (json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old)) {
+ r = -EINVAL;
+ goto err;
+ }
+ }
+ } else if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
+ r = LUKS2_get_data_size(hdr, &tmp, NULL);
+ if (r)
+ goto err;
+
+ if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT)
+ jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0);
+ else
+ jobj_segment_old = json_segment_create_linear(data_offset, tmp ? &tmp : NULL, 0);
+ }
+
+ if (!jobj_segment_old) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous");
+ if (r)
+ goto err;
+ json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_old);
+ jobj_segment_old = NULL;
+ if (digest_old >= 0)
+ LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0);
+ segment++;
+
+ if (digest_new >= 0) {
+ segment_offset = data_offset;
+ if (params->mode != CRYPT_REENCRYPT_ENCRYPT &&
+ modify_offset(&segment_offset, data_shift, params->direction)) {
+ r = -EINVAL;
+ goto err;
+ }
+ jobj_segment_new = json_segment_create_crypt(segment_offset,
+ crypt_get_iv_offset(cd),
+ NULL, cipher, sector_size, 0);
+ } else if (params->mode == CRYPT_REENCRYPT_DECRYPT) {
+ segment_offset = data_offset;
+ if (modify_offset(&segment_offset, data_shift, params->direction)) {
+ r = -EINVAL;
+ goto err;
+ }
+ jobj_segment_new = json_segment_create_linear(segment_offset, NULL, 0);
+ }
+
+ if (!jobj_segment_new) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final");
+ if (r)
+ goto err;
+ json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_new);
+ jobj_segment_new = NULL;
+ if (digest_new >= 0)
+ LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0);
+
+ /* FIXME: also check occupied space by keyslot in shrunk area */
+ if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift &&
+ crypt_metadata_device(cd) == crypt_data_device(cd) &&
+ LUKS2_set_keyslots_size(hdr, json_segment_get_offset(reencrypt_segment_new(hdr), 0))) {
+ log_err(cd, _("Failed to set new keyslots area size."));
+ r = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+err:
+ json_object_put(jobj_segment_new);
+ json_object_put(jobj_segment_old);
+ return r;
+}
+
+static int reencrypt_verify_single_key(struct crypt_device *cd, int digest, struct volume_key *vks)
+{
+ struct volume_key *vk;
+
+ vk = crypt_volume_key_by_id(vks, digest);
+ if (!vk)
+ return -ENOENT;
+
+ if (LUKS2_digest_verify_by_digest(cd, digest, vk) != digest)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int reencrypt_verify_keys(struct crypt_device *cd,
+ int digest_old,
+ int digest_new,
+ struct volume_key *vks)
+{
+ int r;
+
+ if (digest_new >= 0 && (r = reencrypt_verify_single_key(cd, digest_new, vks)))
+ return r;
+
+ if (digest_old >= 0 && (r = reencrypt_verify_single_key(cd, digest_old, vks)))
+ return r;
+
+ return 0;
+}
+
+static int reencrypt_upload_single_key(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int digest,
+ struct volume_key *vks)
+{
+ struct volume_key *vk;
+
+ vk = crypt_volume_key_by_id(vks, digest);
+ if (!vk)
+ return -EINVAL;
+
+ return LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, digest);
+}
+
+static int reencrypt_upload_keys(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int digest_old,
+ int digest_new,
+ struct volume_key *vks)
+{
+ int r;
+
+ if (!crypt_use_keyring_for_vk(cd))
+ return 0;
+
+ if (digest_new >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_new(hdr)) &&
+ (r = reencrypt_upload_single_key(cd, hdr, digest_new, vks)))
+ return r;
+
+ if (digest_old >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr)) &&
+ (r = reencrypt_upload_single_key(cd, hdr, digest_old, vks))) {
+ crypt_drop_keyring_key(cd, vks);
+ return r;
+ }
+
+ return 0;
+}
+
+static int reencrypt_verify_and_upload_keys(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int digest_old,
+ int digest_new,
+ struct volume_key *vks)
+{
+ int r;
+
+ r = reencrypt_verify_keys(cd, digest_old, digest_new, vks);
+ if (r)
+ return r;
+
+ r = reencrypt_upload_keys(cd, hdr, digest_old, digest_new, vks);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int reencrypt_verify_checksum_params(struct crypt_device *cd,
+ const struct crypt_params_reencrypt *params)
+{
+ size_t len;
+ struct crypt_hash *ch;
+
+ assert(params);
+
+ if (!params->hash)
+ return -EINVAL;
+
+ len = strlen(params->hash);
+ if (!len || len > (LUKS2_CHECKSUM_ALG_L - 1))
+ return -EINVAL;
+
+ if (crypt_hash_size(params->hash) <= 0)
+ return -EINVAL;
+
+ if (crypt_hash_init(&ch, params->hash)) {
+ log_err(cd, _("Hash algorithm %s is not available."), params->hash);
+ return -EINVAL;
+ }
+ /* We just check for alg availability */
+ crypt_hash_destroy(ch);
+
+ return 0;
+}
+
+static int reencrypt_verify_datashift_params(struct crypt_device *cd,
+ const struct crypt_params_reencrypt *params,
+ uint32_t sector_size)
+{
+ assert(params);
+
+ if (!params->data_shift)
+ return -EINVAL;
+ if (MISALIGNED(params->data_shift, sector_size >> SECTOR_SHIFT)) {
+ log_err(cd, _("Data shift value is not aligned to encryption sector size (%" PRIu32 " bytes)."),
+ sector_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int reencrypt_verify_resilience_params(struct crypt_device *cd,
+ const struct crypt_params_reencrypt *params,
+ uint32_t sector_size, bool move_first_segment)
+{
+ /* no change requested */
+ if (!params || !params->resilience)
+ return 0;
+
+ if (!strcmp(params->resilience, "journal"))
+ return (params->data_shift || move_first_segment) ? -EINVAL : 0;
+ else if (!strcmp(params->resilience, "none"))
+ return (params->data_shift || move_first_segment) ? -EINVAL : 0;
+ else if (!strcmp(params->resilience, "datashift"))
+ return reencrypt_verify_datashift_params(cd, params, sector_size);
+ else if (!strcmp(params->resilience, "checksum")) {
+ if (params->data_shift || move_first_segment)
+ return -EINVAL;
+ return reencrypt_verify_checksum_params(cd, params);
+ } else if (!strcmp(params->resilience, "datashift-checksum")) {
+ if (!move_first_segment ||
+ reencrypt_verify_datashift_params(cd, params, sector_size))
+ return -EINVAL;
+ return reencrypt_verify_checksum_params(cd, params);
+ } else if (!strcmp(params->resilience, "datashift-journal")) {
+ if (!move_first_segment)
+ return -EINVAL;
+ return reencrypt_verify_datashift_params(cd, params, sector_size);
+ }
+
+ log_err(cd, _("Unsupported resilience mode %s"), params->resilience);
+ return -EINVAL;
+}
+
+static int reencrypt_decrypt_with_datashift_init(struct crypt_device *cd,
+ const char *name,
+ struct luks2_hdr *hdr,
+ int reencrypt_keyslot,
+ uint32_t sector_size,
+ uint64_t data_size,
+ uint64_t data_offset,
+ const char *passphrase,
+ size_t passphrase_size,
+ int keyslot_old,
+ const struct crypt_params_reencrypt *params,
+ struct volume_key **vks)
+{
+ bool clear_table = false;
+ int r, devfd = -1;
+ uint64_t data_shift, max_moved_segment_length, moved_segment_length;
+ struct reenc_protection check_rp = {};
+ struct crypt_dm_active_device dmd_target, dmd_source = {
+ .uuid = crypt_get_uuid(cd),
+ .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
+ };
+ json_object *jobj_segments_old;
+
+ assert(hdr);
+ assert(params);
+ assert(params->resilience);
+ assert(params->data_shift);
+ assert(vks);
+
+ if (!data_offset)
+ return -EINVAL;
+
+ if (params->max_hotzone_size > params->data_shift) {
+ log_err(cd, _("Moved segment size can not be greater than data shift value."));
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Initializing decryption with datashift.");
+
+ data_shift = params->data_shift << SECTOR_SHIFT;
+
+ /*
+ * In offline mode we must perform data move with exclusively opened data
+ * device in order to exclude LUKS2 decryption process and filesystem mount.
+ */
+ if (name)
+ devfd = device_open(cd, crypt_data_device(cd), O_RDWR);
+ else
+ devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
+ if (devfd < 0)
+ return -EINVAL;
+
+ /* in-memory only */
+ moved_segment_length = params->max_hotzone_size << SECTOR_SHIFT;
+ if (!moved_segment_length)
+ moved_segment_length = data_shift < LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH ?
+ data_shift : LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
+
+ if (moved_segment_length > data_size)
+ moved_segment_length = data_size;
+
+ r = reencrypt_set_decrypt_shift_segments(cd, hdr, data_size,
+ moved_segment_length,
+ params->direction);
+ if (r)
+ goto out;
+
+ r = reencrypt_make_backup_segments(cd, hdr, CRYPT_ANY_SLOT, NULL, data_offset, params);
+ if (r) {
+ log_dbg(cd, "Failed to create reencryption backup device segments.");
+ goto out;
+ }
+
+ r = reencrypt_verify_resilience_params(cd, params, sector_size, true);
+ if (r < 0) {
+ log_err(cd, _("Invalid reencryption resilience parameters."));
+ goto out;
+ }
+
+ r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot,
+ params, reencrypt_get_alignment(cd, hdr));
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_keyslot_reencrypt_load(cd, hdr, reencrypt_keyslot, &check_rp, false);
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_reencrypt_max_hotzone_size(cd, hdr, &check_rp,
+ reencrypt_keyslot,
+ &max_moved_segment_length);
+ if (r < 0)
+ goto out;
+
+ LUKS2_reencrypt_protection_erase(&check_rp);
+
+ if (moved_segment_length > max_moved_segment_length) {
+ log_err(cd, _("Moved segment too large. Requested size %" PRIu64 ", available space for: %" PRIu64 "."),
+ moved_segment_length, max_moved_segment_length);
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, CRYPT_ANY_SLOT,
+ passphrase, passphrase_size, vks);
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, *vks);
+ if (r < 0)
+ goto out;
+
+ if (name) {
+ r = reencrypt_verify_and_upload_keys(cd, hdr,
+ LUKS2_reencrypt_digest_old(hdr),
+ LUKS2_reencrypt_digest_new(hdr),
+ *vks);
+ if (r)
+ goto out;
+
+ r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
+ DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
+ DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
+ if (r < 0)
+ goto out;
+
+ jobj_segments_old = reencrypt_segments_old(hdr);
+ if (!jobj_segments_old) {
+ r = -EINVAL;
+ goto out;
+ }
+ r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, jobj_segments_old, &dmd_source);
+ if (!r) {
+ r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
+ if (r)
+ log_err(cd, _("Mismatching parameters on device %s."), name);
+ }
+ json_object_put(jobj_segments_old);
+
+ dm_targets_free(cd, &dmd_source);
+ dm_targets_free(cd, &dmd_target);
+ free(CONST_CAST(void*)dmd_target.uuid);
+
+ if (r)
+ goto out;
+
+ dmd_source.size = dmd_target.size;
+ r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
+ if (!r) {
+ r = dm_reload_device(cd, name, &dmd_source, dmd_target.flags, 0);
+ if (r)
+ log_err(cd, _("Failed to reload device %s."), name);
+ else
+ clear_table = true;
+ }
+
+ dm_targets_free(cd, &dmd_source);
+
+ if (r)
+ goto out;
+ }
+
+ if (name) {
+ r = dm_suspend_device(cd, name, DM_SUSPEND_SKIP_LOCKFS);
+ if (r) {
+ log_err(cd, _("Failed to suspend device %s."), name);
+ goto out;
+ }
+ }
+
+ if (reencrypt_move_data(cd, devfd, data_shift, params->mode)) {
+ r = -EIO;
+ goto out;
+ }
+
+ /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
+ r = reencrypt_update_flag(cd, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, true, true);
+ if (r) {
+ log_dbg(cd, "Failed to set online-reencryption requirement.");
+ r = -EINVAL;
+ } else
+ r = reencrypt_keyslot;
+out:
+ if (r < 0 && clear_table && dm_clear_device(cd, name))
+ log_err(cd, _("Failed to clear table."));
+ else if (clear_table && dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS))
+ log_err(cd, _("Failed to resume device %s."), name);
+
+ device_release_excl(cd, crypt_data_device(cd));
+ if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0)
+ log_dbg(cd, "Failed to rollback LUKS2 metadata after failure.");
+
+ return r;
+}
+
+/* This function must be called with metadata lock held */
+static int reencrypt_init(struct crypt_device *cd,
+ const char *name,
+ struct luks2_hdr *hdr,
+ const char *passphrase,
+ size_t passphrase_size,
+ int keyslot_old,
+ int keyslot_new,
+ const char *cipher,
+ const char *cipher_mode,
+ const struct crypt_params_reencrypt *params,
+ struct volume_key **vks)
+{
+ bool move_first_segment;
+ char _cipher[128];
+ uint32_t check_sector_size, new_sector_size, old_sector_size;
+ int r, reencrypt_keyslot, devfd = -1;
+ uint64_t data_offset, data_size = 0;
+ struct crypt_dm_active_device dmd_target, dmd_source = {
+ .uuid = crypt_get_uuid(cd),
+ .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
+ };
+
+ assert(cd);
+ assert(hdr);
+
+ if (!params || !params->resilience || params->mode > CRYPT_REENCRYPT_DECRYPT)
+ return -EINVAL;
+
+ if (params->mode != CRYPT_REENCRYPT_DECRYPT &&
+ (!params->luks2 || !(cipher && cipher_mode) || keyslot_new < 0))
+ return -EINVAL;
+
+ log_dbg(cd, "Initializing reencryption (mode: %s) in LUKS2 metadata.",
+ crypt_reencrypt_mode_to_str(params->mode));
+
+ move_first_segment = (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT);
+
+ old_sector_size = LUKS2_get_sector_size(hdr);
+
+ /* implicit sector size 512 for decryption */
+ new_sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
+ if (new_sector_size < SECTOR_SIZE || new_sector_size > MAX_SECTOR_SIZE ||
+ NOTPOW2(new_sector_size)) {
+ log_err(cd, _("Unsupported encryption sector size."));
+ return -EINVAL;
+ }
+ /* check the larger encryption sector size only */
+ check_sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size;
+
+ if (!cipher_mode || *cipher_mode == '\0')
+ r = snprintf(_cipher, sizeof(_cipher), "%s", cipher);
+ else
+ r = snprintf(_cipher, sizeof(_cipher), "%s-%s", cipher, cipher_mode);
+ if (r < 0 || (size_t)r >= sizeof(_cipher))
+ return -EINVAL;
+
+ data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
+
+ r = device_check_access(cd, crypt_data_device(cd), DEV_OK);
+ if (r)
+ return r;
+
+ r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
+ if (r)
+ return r;
+
+ r = device_size(crypt_data_device(cd), &data_size);
+ if (r)
+ return r;
+
+ data_size -= data_offset;
+
+ if (params->device_size) {
+ if ((params->device_size << SECTOR_SHIFT) > data_size) {
+ log_err(cd, _("Reduced data size is larger than real device size."));
+ return -EINVAL;
+ } else
+ data_size = params->device_size << SECTOR_SHIFT;
+ }
+
+ if (MISALIGNED(data_size, check_sector_size)) {
+ log_err(cd, _("Data device is not aligned to encryption sector size (%" PRIu32 " bytes)."), check_sector_size);
+ return -EINVAL;
+ }
+
+ reencrypt_keyslot = LUKS2_keyslot_find_empty(cd, hdr, 0);
+ if (reencrypt_keyslot < 0) {
+ log_err(cd, _("All key slots full."));
+ return -EINVAL;
+ }
+
+ if (params->mode == CRYPT_REENCRYPT_DECRYPT && (params->data_shift > 0) && move_first_segment)
+ return reencrypt_decrypt_with_datashift_init(cd, name, hdr,
+ reencrypt_keyslot,
+ check_sector_size,
+ data_size,
+ data_offset,
+ passphrase,
+ passphrase_size,
+ keyslot_old,
+ params,
+ vks);
+
+
+ /*
+ * We must perform data move with exclusive open data device
+ * to exclude another cryptsetup process to colide with
+ * encryption initialization (or mount)
+ */
+ if (move_first_segment) {
+ if (data_size < (params->data_shift << SECTOR_SHIFT)) {
+ log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
+ return -EINVAL;
+ }
+ if (params->data_shift < LUKS2_get_data_offset(hdr)) {
+ log_err(cd, _("Data shift (%" PRIu64 " sectors) is less than future data offset (%" PRIu64 " sectors)."),
+ params->data_shift, LUKS2_get_data_offset(hdr));
+ return -EINVAL;
+ }
+ devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
+ if (devfd < 0) {
+ if (devfd == -EBUSY)
+ log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."),
+ device_path(crypt_data_device(cd)));
+ return -EINVAL;
+ }
+ }
+
+ if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
+ /* in-memory only */
+ r = reencrypt_set_encrypt_segments(cd, hdr, data_size,
+ params->data_shift << SECTOR_SHIFT,
+ move_first_segment,
+ params->direction);
+ if (r)
+ goto out;
+ }
+
+ r = reencrypt_make_backup_segments(cd, hdr, keyslot_new, _cipher, data_offset, params);
+ if (r) {
+ log_dbg(cd, "Failed to create reencryption backup device segments.");
+ goto out;
+ }
+
+ r = reencrypt_verify_resilience_params(cd, params, check_sector_size, move_first_segment);
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot, params,
+ reencrypt_get_alignment(cd, hdr));
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_REENCRYPT_REQ_VERSION, *vks);
+ if (r < 0)
+ goto out;
+
+ if (name && params->mode != CRYPT_REENCRYPT_ENCRYPT) {
+ r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
+ if (r)
+ goto out;
+
+ r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
+ DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
+ DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
+ if (!r) {
+ r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
+ if (r)
+ log_err(cd, _("Mismatching parameters on device %s."), name);
+ }
+
+ dm_targets_free(cd, &dmd_source);
+ dm_targets_free(cd, &dmd_target);
+ free(CONST_CAST(void*)dmd_target.uuid);
+
+ if (r)
+ goto out;
+ }
+
+ if (move_first_segment && reencrypt_move_data(cd, devfd, params->data_shift << SECTOR_SHIFT, params->mode)) {
+ r = -EIO;
+ goto out;
+ }
+
+ /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
+ r = reencrypt_update_flag(cd, LUKS2_REENCRYPT_REQ_VERSION, true, true);
+ if (r) {
+ log_dbg(cd, "Failed to set online-reencryption requirement.");
+ r = -EINVAL;
+ } else
+ r = reencrypt_keyslot;
+out:
+ device_release_excl(cd, crypt_data_device(cd));
+ if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0)
+ log_dbg(cd, "Failed to rollback LUKS2 metadata after failure.");
+
+ return r;
+}
+
+static int reencrypt_hotzone_protect_final(struct crypt_device *cd,
+ struct luks2_hdr *hdr, int reencrypt_keyslot,
+ const struct reenc_protection *rp,
+ const void *buffer, size_t buffer_len)
+{
+ const void *pbuffer;
+ size_t data_offset, len;
+ int r;
+
+ assert(hdr);
+ assert(rp);
+
+ if (rp->type == REENC_PROTECTION_NONE)
+ return 0;
+
+ if (rp->type == REENC_PROTECTION_CHECKSUM) {
+ log_dbg(cd, "Checksums hotzone resilience.");
+
+ for (data_offset = 0, len = 0; data_offset < buffer_len; data_offset += rp->p.csum.block_size, len += rp->p.csum.hash_size) {
+ if (crypt_hash_write(rp->p.csum.ch, (const char *)buffer + data_offset, rp->p.csum.block_size)) {
+ log_dbg(cd, "Failed to hash sector at offset %zu.", data_offset);
+ return -EINVAL;
+ }
+ if (crypt_hash_final(rp->p.csum.ch, (char *)rp->p.csum.checksums + len, rp->p.csum.hash_size)) {
+ log_dbg(cd, "Failed to finalize hash.");
+ return -EINVAL;
+ }
+ }
+ pbuffer = rp->p.csum.checksums;
+ } else if (rp->type == REENC_PROTECTION_JOURNAL) {
+ log_dbg(cd, "Journal hotzone resilience.");
+ len = buffer_len;
+ pbuffer = buffer;
+ } else if (rp->type == REENC_PROTECTION_DATASHIFT) {
+ log_dbg(cd, "Data shift hotzone resilience.");
+ return LUKS2_hdr_write(cd, hdr);
+ } else
+ return -EINVAL;
+
+ log_dbg(cd, "Going to store %zu bytes in reencrypt keyslot.", len);
+
+ r = LUKS2_keyslot_reencrypt_store(cd, hdr, reencrypt_keyslot, pbuffer, len);
+
+ return r > 0 ? 0 : r;
+}
+
+static int reencrypt_context_update(struct crypt_device *cd,
+ struct luks2_reencrypt *rh)
+{
+ if (rh->read < 0)
+ return -EINVAL;
+
+ if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
+ if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
+ if (rh->offset)
+ rh->offset -= data_shift_value(&rh->rp);
+ if (rh->offset && (rh->offset < data_shift_value(&rh->rp))) {
+ rh->length = rh->offset;
+ rh->offset = data_shift_value(&rh->rp);
+ }
+ if (!rh->offset)
+ rh->length = data_shift_value(&rh->rp);
+ } else {
+ if (rh->offset < rh->length)
+ rh->length = rh->offset;
+ rh->offset -= rh->length;
+ }
+ } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
+ rh->offset += (uint64_t)rh->read;
+ if (rh->device_size == rh->offset &&
+ rh->jobj_segment_moved &&
+ rh->mode == CRYPT_REENCRYPT_DECRYPT &&
+ rh->rp.type == REENC_PROTECTION_DATASHIFT) {
+ rh->offset = 0;
+ rh->length = json_segment_get_size(rh->jobj_segment_moved, 0);
+ }
+ /* it fails in-case of device_size < rh->offset later */
+ else if (rh->device_size - rh->offset < rh->length)
+ rh->length = rh->device_size - rh->offset;
+ } else
+ return -EINVAL;
+
+ if (rh->device_size < rh->offset) {
+ log_dbg(cd, "Calculated reencryption offset %" PRIu64 " is beyond device size %" PRIu64 ".", rh->offset, rh->device_size);
+ return -EINVAL;
+ }
+
+ rh->progress += (uint64_t)rh->read;
+
+ return 0;
+}
+
+static int reencrypt_load(struct crypt_device *cd, struct luks2_hdr *hdr,
+ uint64_t device_size,
+ uint64_t max_hotzone_size,
+ uint64_t required_device_size,
+ struct volume_key *vks,
+ struct luks2_reencrypt **rh)
+{
+ int r;
+ struct luks2_reencrypt *tmp = NULL;
+ crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
+
+ if (ri == CRYPT_REENCRYPT_NONE) {
+ log_err(cd, _("Device not marked for LUKS2 reencryption."));
+ return -EINVAL;
+ } else if (ri == CRYPT_REENCRYPT_INVALID)
+ return -EINVAL;
+
+ r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
+ if (r < 0)
+ return r;
+
+ if (ri == CRYPT_REENCRYPT_CLEAN)
+ r = reencrypt_load_clean(cd, hdr, device_size, max_hotzone_size, required_device_size, &tmp);
+ else if (ri == CRYPT_REENCRYPT_CRASH)
+ r = reencrypt_load_crashed(cd, hdr, device_size, &tmp);
+ else
+ r = -EINVAL;
+
+ if (r < 0 || !tmp) {
+ log_err(cd, _("Failed to load LUKS2 reencryption context."));
+ return r;
+ }
+
+ *rh = tmp;
+
+ return 0;
+}
+#endif
+static int reencrypt_lock_internal(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock)
+{
+ int r;
+ char *lock_resource;
+
+ if (!crypt_metadata_locking_enabled()) {
+ *reencrypt_lock = NULL;
+ return 0;
+ }
+
+ r = asprintf(&lock_resource, "LUKS2-reencryption-%s", uuid);
+ if (r < 0)
+ return -ENOMEM;
+ if (r < 20) {
+ free(lock_resource);
+ return -EINVAL;
+ }
+
+ r = crypt_write_lock(cd, lock_resource, false, reencrypt_lock);
+
+ free(lock_resource);
+
+ return r;
+}
+
+/* internal only */
+int LUKS2_reencrypt_lock_by_dm_uuid(struct crypt_device *cd, const char *dm_uuid,
+ struct crypt_lock_handle **reencrypt_lock)
+{
+ int r;
+ char hdr_uuid[37];
+ const char *uuid = crypt_get_uuid(cd);
+
+ if (!dm_uuid)
+ return -EINVAL;
+
+ if (!uuid) {
+ r = snprintf(hdr_uuid, sizeof(hdr_uuid), "%.8s-%.4s-%.4s-%.4s-%.12s",
+ dm_uuid + 6, dm_uuid + 14, dm_uuid + 18, dm_uuid + 22, dm_uuid + 26);
+ if (r < 0 || (size_t)r != (sizeof(hdr_uuid) - 1))
+ return -EINVAL;
+ } else if (crypt_uuid_cmp(dm_uuid, uuid))
+ return -EINVAL;
+
+ return reencrypt_lock_internal(cd, uuid, reencrypt_lock);
+}
+
+/* internal only */
+int LUKS2_reencrypt_lock(struct crypt_device *cd, struct crypt_lock_handle **reencrypt_lock)
+{
+ if (!cd || !crypt_get_type(cd) || strcmp(crypt_get_type(cd), CRYPT_LUKS2))
+ return -EINVAL;
+
+ return reencrypt_lock_internal(cd, crypt_get_uuid(cd), reencrypt_lock);
+}
+
+/* internal only */
+void LUKS2_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock)
+{
+ crypt_unlock_internal(cd, reencrypt_lock);
+}
+#if USE_LUKS2_REENCRYPTION
+static int reencrypt_lock_and_verify(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct crypt_lock_handle **reencrypt_lock)
+{
+ int r;
+ crypt_reencrypt_info ri;
+ struct crypt_lock_handle *h;
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_INVALID) {
+ log_err(cd, _("Failed to get reencryption state."));
+ return -EINVAL;
+ }
+ if (ri < CRYPT_REENCRYPT_CLEAN) {
+ log_err(cd, _("Device is not in reencryption."));
+ return -EINVAL;
+ }
+
+ r = LUKS2_reencrypt_lock(cd, &h);
+ if (r < 0) {
+ if (r == -EBUSY)
+ log_err(cd, _("Reencryption process is already running."));
+ else
+ log_err(cd, _("Failed to acquire reencryption lock."));
+ return r;
+ }
+
+ /* With reencryption lock held, reload device context and verify metadata state */
+ r = crypt_load(cd, CRYPT_LUKS2, NULL);
+ if (r) {
+ LUKS2_reencrypt_unlock(cd, h);
+ return r;
+ }
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_CLEAN) {
+ *reencrypt_lock = h;
+ return 0;
+ }
+
+ LUKS2_reencrypt_unlock(cd, h);
+ log_err(cd, _("Cannot proceed with reencryption. Run reencryption recovery first."));
+ return -EINVAL;
+}
+
+static int reencrypt_load_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ const char *passphrase,
+ size_t passphrase_size,
+ int keyslot_old,
+ int keyslot_new,
+ struct volume_key **vks,
+ const struct crypt_params_reencrypt *params)
+{
+ int r, reencrypt_slot;
+ struct luks2_hdr *hdr;
+ struct crypt_lock_handle *reencrypt_lock;
+ struct luks2_reencrypt *rh;
+ const struct volume_key *vk;
+ size_t alignment;
+ uint32_t old_sector_size, new_sector_size, sector_size;
+ struct crypt_dm_active_device dmd_target, dmd_source = {
+ .uuid = crypt_get_uuid(cd),
+ .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
+ };
+ uint64_t minimal_size, device_size, mapping_size = 0, required_size = 0,
+ max_hotzone_size = 0;
+ bool dynamic;
+ uint32_t flags = 0;
+
+ assert(cd);
+
+ hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ if (!hdr)
+ return -EINVAL;
+
+ log_dbg(cd, "Loading LUKS2 reencryption context.");
+
+ old_sector_size = reencrypt_get_sector_size_old(hdr);
+ new_sector_size = reencrypt_get_sector_size_new(hdr);
+ sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size;
+
+ r = reencrypt_verify_resilience_params(cd, params, sector_size,
+ LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0);
+ if (r < 0)
+ return r;
+
+ if (params) {
+ required_size = params->device_size;
+ max_hotzone_size = params->max_hotzone_size;
+ }
+
+ rh = crypt_get_luks2_reencrypt(cd);
+ if (rh) {
+ LUKS2_reencrypt_free(cd, rh);
+ crypt_set_luks2_reencrypt(cd, NULL);
+ rh = NULL;
+ }
+
+ r = reencrypt_lock_and_verify(cd, hdr, &reencrypt_lock);
+ if (r)
+ return r;
+
+ reencrypt_slot = LUKS2_find_keyslot(hdr, "reencrypt");
+ if (reencrypt_slot < 0) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ /* From now on we hold reencryption lock */
+
+ if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic)) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ /* some configurations provides fixed device size */
+ r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, false, dynamic);
+ if (r) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ minimal_size >>= SECTOR_SHIFT;
+
+ r = reencrypt_verify_keys(cd, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
+ if (r == -ENOENT) {
+ log_dbg(cd, "Keys are not ready. Unlocking all volume keys.");
+ r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
+ }
+
+ if (r < 0)
+ goto err;
+
+ if (name) {
+ r = reencrypt_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
+ if (r < 0)
+ goto err;
+
+ r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
+ DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
+ DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
+ if (r < 0)
+ goto err;
+ flags = dmd_target.flags;
+
+ /*
+ * By default reencryption code aims to retain flags from existing dm device.
+ * The keyring activation flag can not be inherited if original cipher is null.
+ *
+ * In this case override the flag based on decision made in reencrypt_upload_keys
+ * above. The code checks if new VK is eligible for keyring.
+ */
+ vk = crypt_volume_key_by_id(*vks, LUKS2_reencrypt_digest_new(hdr));
+ if (vk && vk->key_description && crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr))) {
+ flags |= CRYPT_ACTIVATE_KEYRING_KEY;
+ dmd_source.flags |= CRYPT_ACTIVATE_KEYRING_KEY;
+ }
+
+ r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
+ if (!r) {
+ r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
+ if (r)
+ log_err(cd, _("Mismatching parameters on device %s."), name);
+ }
+
+ dm_targets_free(cd, &dmd_source);
+ dm_targets_free(cd, &dmd_target);
+ free(CONST_CAST(void*)dmd_target.uuid);
+ if (r)
+ goto err;
+ mapping_size = dmd_target.size;
+ }
+
+ r = -EINVAL;
+ if (required_size && mapping_size && (required_size != mapping_size)) {
+ log_err(cd, _("Active device size and requested reencryption size don't match."));
+ goto err;
+ }
+
+ if (mapping_size)
+ required_size = mapping_size;
+
+ if (required_size) {
+ /* TODO: Add support for changing fixed minimal size in reencryption mda where possible */
+ if ((minimal_size && (required_size < minimal_size)) ||
+ (required_size > (device_size >> SECTOR_SHIFT)) ||
+ (!dynamic && (required_size != minimal_size)) ||
+ (old_sector_size > 0 && MISALIGNED(required_size, old_sector_size >> SECTOR_SHIFT)) ||
+ (new_sector_size > 0 && MISALIGNED(required_size, new_sector_size >> SECTOR_SHIFT))) {
+ log_err(cd, _("Illegal device size requested in reencryption parameters."));
+ goto err;
+ }
+ }
+
+ alignment = reencrypt_get_alignment(cd, hdr);
+
+ r = LUKS2_keyslot_reencrypt_update_needed(cd, hdr, reencrypt_slot, params, alignment);
+ if (r > 0) /* metadata update needed */
+ r = LUKS2_keyslot_reencrypt_update(cd, hdr, reencrypt_slot, params, alignment, *vks);
+ if (r < 0)
+ goto err;
+
+ r = reencrypt_load(cd, hdr, device_size, max_hotzone_size, required_size, *vks, &rh);
+ if (r < 0 || !rh)
+ goto err;
+
+ if (name && (r = reencrypt_context_set_names(rh, name)))
+ goto err;
+
+ /* Reassure device is not mounted and there's no dm mapping active */
+ if (!name && (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0)) {
+ log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
+ r = -EBUSY;
+ goto err;
+ }
+ device_release_excl(cd, crypt_data_device(cd));
+
+ /* There's a race for dm device activation not managed by cryptsetup.
+ *
+ * 1) excl close
+ * 2) rogue dm device activation
+ * 3) one or more dm-crypt based wrapper activation
+ * 4) next excl open gets skipped due to 3) device from 2) remains undetected.
+ */
+ r = reencrypt_init_storage_wrappers(cd, hdr, rh, *vks);
+ if (r)
+ goto err;
+
+ /* If one of wrappers is based on dmcrypt fallback it already blocked mount */
+ if (!name && crypt_storage_wrapper_get_type(rh->cw1) != DMCRYPT &&
+ crypt_storage_wrapper_get_type(rh->cw2) != DMCRYPT) {
+ if (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0) {
+ log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
+ r = -EBUSY;
+ goto err;
+ }
+ }
+
+ rh->flags = flags;
+
+ MOVE_REF(rh->vks, *vks);
+ MOVE_REF(rh->reenc_lock, reencrypt_lock);
+
+ crypt_set_luks2_reencrypt(cd, rh);
+
+ return 0;
+err:
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ LUKS2_reencrypt_free(cd, rh);
+ return r;
+}
+
+static int reencrypt_recovery_by_passphrase(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot_old,
+ int keyslot_new,
+ const char *passphrase,
+ size_t passphrase_size)
+{
+ int r;
+ crypt_reencrypt_info ri;
+ struct crypt_lock_handle *reencrypt_lock;
+
+ r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
+ if (r) {
+ if (r == -EBUSY)
+ log_err(cd, _("Reencryption in-progress. Cannot perform recovery."));
+ else
+ log_err(cd, _("Failed to get reencryption lock."));
+ return r;
+ }
+
+ if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) {
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ return r;
+ }
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_INVALID) {
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ return -EINVAL;
+ }
+
+ if (ri == CRYPT_REENCRYPT_CRASH) {
+ r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot_old, keyslot_new,
+ passphrase, passphrase_size, NULL);
+ if (r < 0)
+ log_err(cd, _("LUKS2 reencryption recovery failed."));
+ } else {
+ log_dbg(cd, "No LUKS2 reencryption recovery needed.");
+ r = 0;
+ }
+
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ return r;
+}
+
+static int reencrypt_repair_by_passphrase(
+ struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot_old,
+ int keyslot_new,
+ const char *passphrase,
+ size_t passphrase_size)
+{
+ int r;
+ struct crypt_lock_handle *reencrypt_lock;
+ struct luks2_reencrypt *rh;
+ crypt_reencrypt_info ri;
+ uint8_t requirement_version;
+ const char *resilience;
+ struct volume_key *vks = NULL;
+
+ log_dbg(cd, "Loading LUKS2 reencryption context for metadata repair.");
+
+ rh = crypt_get_luks2_reencrypt(cd);
+ if (rh) {
+ LUKS2_reencrypt_free(cd, rh);
+ crypt_set_luks2_reencrypt(cd, NULL);
+ rh = NULL;
+ }
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_INVALID)
+ return -EINVAL;
+
+ if (ri < CRYPT_REENCRYPT_CLEAN) {
+ log_err(cd, _("Device is not in reencryption."));
+ return -EINVAL;
+ }
+
+ r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
+ if (r < 0) {
+ if (r == -EBUSY)
+ log_err(cd, _("Reencryption process is already running."));
+ else
+ log_err(cd, _("Failed to acquire reencryption lock."));
+ return r;
+ }
+
+ /* With reencryption lock held, reload device context and verify metadata state */
+ r = crypt_load(cd, CRYPT_LUKS2, NULL);
+ if (r)
+ goto out;
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_INVALID) {
+ r = -EINVAL;
+ goto out;
+ }
+ if (ri == CRYPT_REENCRYPT_NONE) {
+ r = 0;
+ goto out;
+ }
+
+ resilience = reencrypt_resilience_type(hdr);
+ if (!resilience) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT &&
+ !strncmp(resilience, "datashift-", 10) &&
+ LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
+ requirement_version = LUKS2_DECRYPT_DATASHIFT_REQ_VERSION;
+ else
+ requirement_version = LUKS2_REENCRYPT_REQ_VERSION;
+
+ r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, &vks);
+ if (r < 0)
+ goto out;
+
+ r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, requirement_version, vks);
+ crypt_free_volume_key(vks);
+ vks = NULL;
+ if (r < 0)
+ goto out;
+
+ /* replaces old online-reencrypt flag with updated version and commits metadata */
+ r = reencrypt_update_flag(cd, requirement_version, true, true);
+out:
+ LUKS2_reencrypt_unlock(cd, reencrypt_lock);
+ crypt_free_volume_key(vks);
+ return r;
+
+}
+#endif
+static int reencrypt_init_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ const char *passphrase,
+ size_t passphrase_size,
+ int keyslot_old,
+ int keyslot_new,
+ const char *cipher,
+ const char *cipher_mode,
+ const struct crypt_params_reencrypt *params)
+{
+#if USE_LUKS2_REENCRYPTION
+ int r;
+ crypt_reencrypt_info ri;
+ struct volume_key *vks = NULL;
+ uint32_t flags = params ? params->flags : 0;
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+
+ /* short-circuit in reencryption metadata update and finish immediately. */
+ if (flags & CRYPT_REENCRYPT_REPAIR_NEEDED)
+ return reencrypt_repair_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
+
+ /* short-circuit in recovery and finish immediately. */
+ if (flags & CRYPT_REENCRYPT_RECOVERY)
+ return reencrypt_recovery_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
+
+ if (cipher && !crypt_cipher_wrapped_key(cipher, cipher_mode)) {
+ r = crypt_keyslot_get_key_size(cd, keyslot_new);
+ if (r < 0)
+ return r;
+ r = LUKS2_check_cipher(cd, r, cipher, cipher_mode);
+ if (r < 0) {
+ log_err(cd, _("Unable to use cipher specification %s-%s for LUKS2."), cipher, cipher_mode);
+ return r;
+ }
+ }
+
+ r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
+ if (r)
+ return r;
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_INVALID) {
+ device_write_unlock(cd, crypt_metadata_device(cd));
+ return -EINVAL;
+ }
+
+ if ((ri > CRYPT_REENCRYPT_NONE) && (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY)) {
+ device_write_unlock(cd, crypt_metadata_device(cd));
+ log_err(cd, _("LUKS2 reencryption already initialized in metadata."));
+ return -EBUSY;
+ }
+
+ if (ri == CRYPT_REENCRYPT_NONE && !(flags & CRYPT_REENCRYPT_RESUME_ONLY)) {
+ r = reencrypt_init(cd, name, hdr, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params, &vks);
+ if (r < 0)
+ log_err(cd, _("Failed to initialize LUKS2 reencryption in metadata."));
+ } else if (ri > CRYPT_REENCRYPT_NONE) {
+ log_dbg(cd, "LUKS2 reencryption already initialized.");
+ r = 0;
+ }
+
+ device_write_unlock(cd, crypt_metadata_device(cd));
+
+ if (r < 0 || (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY))
+ goto out;
+
+ r = reencrypt_load_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, &vks, params);
+out:
+ if (r < 0)
+ crypt_drop_keyring_key(cd, vks);
+ crypt_free_volume_key(vks);
+ return r < 0 ? r : LUKS2_find_keyslot(hdr, "reencrypt");
+#else
+ log_err(cd, _("This operation is not supported for this device type."));
+ return -ENOTSUP;
+#endif
+}
+
+int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
+ const char *name,
+ const char *passphrase_description,
+ int keyslot_old,
+ int keyslot_new,
+ const char *cipher,
+ const char *cipher_mode,
+ const struct crypt_params_reencrypt *params)
+{
+ int r;
+ char *passphrase;
+ size_t passphrase_size;
+
+ if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase_description)
+ return -EINVAL;
+ if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
+ return -EINVAL;
+
+ r = keyring_get_passphrase(passphrase_description, &passphrase, &passphrase_size);
+ if (r < 0) {
+ log_err(cd, _("Failed to read passphrase from keyring (error %d)."), r);
+ return -EINVAL;
+ }
+
+ r = reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
+
+ crypt_safe_memzero(passphrase, passphrase_size);
+ free(passphrase);
+
+ return r;
+}
+
+int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd,
+ const char *name,
+ const char *passphrase,
+ size_t passphrase_size,
+ int keyslot_old,
+ int keyslot_new,
+ const char *cipher,
+ const char *cipher_mode,
+ const struct crypt_params_reencrypt *params)
+{
+ if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase)
+ return -EINVAL;
+ if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
+ return -EINVAL;
+
+ return reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
+}
+
+#if USE_LUKS2_REENCRYPTION
+static reenc_status_t reencrypt_step(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh,
+ uint64_t device_size,
+ bool online)
+{
+ int r;
+ struct reenc_protection *rp;
+
+ assert(hdr);
+ assert(rh);
+
+ rp = &rh->rp;
+
+ /* in memory only */
+ r = reencrypt_make_segments(cd, hdr, rh, device_size);
+ if (r)
+ return REENC_ERR;
+
+ r = reencrypt_assign_segments(cd, hdr, rh, 1, 0);
+ if (r) {
+ log_err(cd, _("Failed to set device segments for next reencryption hotzone."));
+ return REENC_ERR;
+ }
+
+ log_dbg(cd, "Reencrypting chunk starting at offset: %" PRIu64 ", size :%" PRIu64 ".", rh->offset, rh->length);
+ log_dbg(cd, "data_offset: %" PRIu64, crypt_get_data_offset(cd) << SECTOR_SHIFT);
+
+ if (!rh->offset && rp->type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
+ crypt_storage_wrapper_destroy(rh->cw1);
+ log_dbg(cd, "Reinitializing old segment storage wrapper for moved segment.");
+ r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
+ LUKS2_reencrypt_get_data_offset_moved(hdr),
+ crypt_get_iv_offset(cd),
+ reencrypt_get_sector_size_old(hdr),
+ reencrypt_segment_cipher_old(hdr),
+ crypt_volume_key_by_id(rh->vks, rh->digest_old),
+ rh->wflags1);
+ if (r) {
+ log_err(cd, _("Failed to initialize old segment storage wrapper."));
+ return REENC_ROLLBACK;
+ }
+
+ if (rh->rp_moved_segment.type != REENC_PROTECTION_NOT_SET) {
+ log_dbg(cd, "Switching to moved segment resilience type.");
+ rp = &rh->rp_moved_segment;
+ }
+ }
+
+ r = reencrypt_hotzone_protect_ready(cd, rp);
+ if (r) {
+ log_err(cd, _("Failed to initialize hotzone protection."));
+ return REENC_ROLLBACK;
+ }
+
+ if (online) {
+ r = reencrypt_refresh_overlay_devices(cd, hdr, rh->overlay_name, rh->hotzone_name, rh->vks, rh->device_size, rh->flags);
+ /* Teardown overlay devices with dm-error. None bio shall pass! */
+ if (r != REENC_OK)
+ return r;
+ }
+
+ rh->read = crypt_storage_wrapper_read(rh->cw1, rh->offset, rh->reenc_buffer, rh->length);
+ if (rh->read < 0) {
+ /* severity normal */
+ log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset);
+ return REENC_ROLLBACK;
+ }
+
+ /* metadata commit point */
+ r = reencrypt_hotzone_protect_final(cd, hdr, rh->reenc_keyslot, rp, rh->reenc_buffer, rh->read);
+ if (r < 0) {
+ /* severity normal */
+ log_err(cd, _("Failed to write reencryption resilience metadata."));
+ return REENC_ROLLBACK;
+ }
+
+ r = crypt_storage_wrapper_decrypt(rh->cw1, rh->offset, rh->reenc_buffer, rh->read);
+ if (r) {
+ /* severity normal */
+ log_err(cd, _("Decryption failed."));
+ return REENC_ROLLBACK;
+ }
+ if (rh->read != crypt_storage_wrapper_encrypt_write(rh->cw2, rh->offset, rh->reenc_buffer, rh->read)) {
+ /* severity fatal */
+ log_err(cd, _("Failed to write hotzone area starting at %" PRIu64 "."), rh->offset);
+ return REENC_FATAL;
+ }
+
+ if (rp->type != REENC_PROTECTION_NONE && crypt_storage_wrapper_datasync(rh->cw2)) {
+ log_err(cd, _("Failed to sync data."));
+ return REENC_FATAL;
+ }
+
+ /* metadata commit safe point */
+ r = reencrypt_assign_segments(cd, hdr, rh, 0, rp->type != REENC_PROTECTION_NONE);
+ if (r) {
+ /* severity fatal */
+ log_err(cd, _("Failed to update metadata after current reencryption hotzone completed."));
+ return REENC_FATAL;
+ }
+
+ if (online) {
+ /* severity normal */
+ log_dbg(cd, "Resuming device %s", rh->hotzone_name);
+ r = dm_resume_device(cd, rh->hotzone_name, DM_RESUME_PRIVATE);
+ if (r) {
+ log_err(cd, _("Failed to resume device %s."), rh->hotzone_name);
+ return REENC_ERR;
+ }
+ }
+
+ return REENC_OK;
+}
+
+static int reencrypt_erase_backup_segments(struct crypt_device *cd,
+ struct luks2_hdr *hdr)
+{
+ int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
+ if (segment >= 0) {
+ if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
+ return -EINVAL;
+ json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
+ }
+ segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
+ if (segment >= 0) {
+ if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
+ return -EINVAL;
+ json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
+ }
+ segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
+ if (segment >= 0) {
+ if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
+ return -EINVAL;
+ json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
+ }
+
+ return 0;
+}
+
+static int reencrypt_wipe_unused_device_area(struct crypt_device *cd, struct luks2_reencrypt *rh)
+{
+ uint64_t offset, length, dev_size;
+ int r = 0;
+
+ assert(cd);
+ assert(rh);
+
+ if (rh->jobj_segment_moved && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
+ offset = json_segment_get_offset(rh->jobj_segment_moved, 0);
+ length = json_segment_get_size(rh->jobj_segment_moved, 0);
+ log_dbg(cd, "Wiping %" PRIu64 " bytes of backup segment data at offset %" PRIu64,
+ length, offset);
+ r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
+ offset, length, 1024 * 1024, NULL, NULL);
+ }
+
+ if (r < 0)
+ return r;
+
+ if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_FORWARD) {
+ r = device_size(crypt_data_device(cd), &dev_size);
+ if (r < 0)
+ return r;
+
+ if (dev_size < data_shift_value(&rh->rp))
+ return -EINVAL;
+
+ offset = dev_size - data_shift_value(&rh->rp);
+ length = data_shift_value(&rh->rp);
+ log_dbg(cd, "Wiping %" PRIu64 " bytes of data at offset %" PRIu64,
+ length, offset);
+ r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
+ offset, length, 1024 * 1024, NULL, NULL);
+ }
+
+ return r;
+}
+
+static int reencrypt_teardown_ok(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
+{
+ int i, r;
+ uint32_t dmt_flags;
+ bool finished = !(rh->device_size > rh->progress);
+
+ if (rh->rp.type == REENC_PROTECTION_NONE &&
+ LUKS2_hdr_write(cd, hdr)) {
+ log_err(cd, _("Failed to write LUKS2 metadata."));
+ return -EINVAL;
+ }
+
+ if (rh->online) {
+ r = LUKS2_reload(cd, rh->device_name, rh->vks, rh->device_size, rh->flags);
+ if (r)
+ log_err(cd, _("Failed to reload device %s."), rh->device_name);
+ if (!r) {
+ r = dm_resume_device(cd, rh->device_name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
+ if (r)
+ log_err(cd, _("Failed to resume device %s."), rh->device_name);
+ }
+ dm_remove_device(cd, rh->overlay_name, 0);
+ dm_remove_device(cd, rh->hotzone_name, 0);
+
+ if (!r && finished && rh->mode == CRYPT_REENCRYPT_DECRYPT &&
+ !dm_flags(cd, DM_LINEAR, &dmt_flags) && (dmt_flags & DM_DEFERRED_SUPPORTED))
+ dm_remove_device(cd, rh->device_name, CRYPT_DEACTIVATE_DEFERRED);
+ }
+
+ if (finished) {
+ if (reencrypt_wipe_unused_device_area(cd, rh))
+ log_err(cd, _("Failed to wipe unused data device area."));
+ if (reencrypt_get_data_offset_new(hdr) && LUKS2_set_keyslots_size(hdr, reencrypt_get_data_offset_new(hdr)))
+ log_dbg(cd, "Failed to set new keyslots area size.");
+ if (rh->digest_old >= 0 && rh->digest_new != rh->digest_old)
+ for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++)
+ if (LUKS2_digest_by_keyslot(hdr, i) == rh->digest_old && crypt_keyslot_destroy(cd, i))
+ log_err(cd, _("Failed to remove unused (unbound) keyslot %d."), i);
+
+ if (reencrypt_erase_backup_segments(cd, hdr))
+ log_dbg(cd, "Failed to erase backup segments");
+
+ if (reencrypt_update_flag(cd, 0, false, false))
+ log_dbg(cd, "Failed to disable reencryption requirement flag.");
+
+ /* metadata commit point also removing reencryption flag on-disk */
+ if (crypt_keyslot_destroy(cd, rh->reenc_keyslot)) {
+ log_err(cd, _("Failed to remove reencryption keyslot."));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void reencrypt_teardown_fatal(struct crypt_device *cd, struct luks2_reencrypt *rh)
+{
+ log_err(cd, _("Fatal error while reencrypting chunk starting at %" PRIu64 ", %" PRIu64 " sectors long."),
+ (rh->offset >> SECTOR_SHIFT) + crypt_get_data_offset(cd), rh->length >> SECTOR_SHIFT);
+
+ if (rh->online) {
+ log_err(cd, _("Online reencryption failed."));
+ if (dm_status_suspended(cd, rh->hotzone_name) > 0) {
+ log_dbg(cd, "Hotzone device %s suspended, replacing with dm-error.", rh->hotzone_name);
+ if (dm_error_device(cd, rh->hotzone_name)) {
+ log_err(cd, _("Failed to replace suspended device %s with dm-error target."), rh->hotzone_name);
+ log_err(cd, _("Do not resume the device unless replaced with error target manually."));
+ }
+ }
+ }
+}
+
+static int reencrypt_teardown(struct crypt_device *cd, struct luks2_hdr *hdr,
+ struct luks2_reencrypt *rh, reenc_status_t rs, bool interrupted,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
+ void *usrptr)
+{
+ int r;
+
+ switch (rs) {
+ case REENC_OK:
+ if (progress && !interrupted)
+ progress(rh->device_size, rh->progress, usrptr);
+ r = reencrypt_teardown_ok(cd, hdr, rh);
+ break;
+ case REENC_FATAL:
+ reencrypt_teardown_fatal(cd, rh);
+ /* fall-through */
+ default:
+ r = -EIO;
+ }
+
+ /* this frees reencryption lock */
+ LUKS2_reencrypt_free(cd, rh);
+ crypt_set_luks2_reencrypt(cd, NULL);
+
+ return r;
+}
+#endif
+
+int crypt_reencrypt_run(
+ struct crypt_device *cd,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
+ void *usrptr)
+{
+#if USE_LUKS2_REENCRYPTION
+ int r;
+ crypt_reencrypt_info ri;
+ struct luks2_hdr *hdr;
+ struct luks2_reencrypt *rh;
+ reenc_status_t rs;
+ bool quit = false;
+
+ if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
+ return -EINVAL;
+
+ hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri > CRYPT_REENCRYPT_CLEAN) {
+ log_err(cd, _("Cannot proceed with reencryption. Unexpected reencryption status."));
+ return -EINVAL;
+ }
+
+ rh = crypt_get_luks2_reencrypt(cd);
+ if (!rh || (!rh->reenc_lock && crypt_metadata_locking_enabled())) {
+ log_err(cd, _("Missing or invalid reencrypt context."));
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Resuming LUKS2 reencryption.");
+
+ if (rh->online && reencrypt_init_device_stack(cd, rh)) {
+ log_err(cd, _("Failed to initialize reencryption device stack."));
+ return -EINVAL;
+ }
+
+ log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
+
+ rs = REENC_OK;
+
+ if (progress && progress(rh->device_size, rh->progress, usrptr))
+ quit = true;
+
+ while (!quit && (rh->device_size > rh->progress)) {
+ rs = reencrypt_step(cd, hdr, rh, rh->device_size, rh->online);
+ if (rs != REENC_OK)
+ break;
+
+ log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
+ if (progress && progress(rh->device_size, rh->progress, usrptr))
+ quit = true;
+
+ r = reencrypt_context_update(cd, rh);
+ if (r) {
+ log_err(cd, _("Failed to update reencryption context."));
+ rs = REENC_ERR;
+ break;
+ }
+
+ log_dbg(cd, "Next reencryption offset will be %" PRIu64 " sectors.", rh->offset);
+ log_dbg(cd, "Next reencryption chunk size will be %" PRIu64 " sectors).", rh->length);
+ }
+
+ r = reencrypt_teardown(cd, hdr, rh, rs, quit, progress, usrptr);
+ return r;
+#else
+ log_err(cd, _("This operation is not supported for this device type."));
+ return -ENOTSUP;
+#endif
+}
+
+int crypt_reencrypt(
+ struct crypt_device *cd,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
+{
+ return crypt_reencrypt_run(cd, progress, NULL);
+}
+#if USE_LUKS2_REENCRYPTION
+static int reencrypt_recovery(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint64_t device_size,
+ struct volume_key *vks)
+{
+ int r;
+ struct luks2_reencrypt *rh = NULL;
+
+ r = reencrypt_load(cd, hdr, device_size, 0, 0, vks, &rh);
+ if (r < 0) {
+ log_err(cd, _("Failed to load LUKS2 reencryption context."));
+ return r;
+ }
+
+ r = reencrypt_recover_segment(cd, hdr, rh, vks);
+ if (r < 0)
+ goto out;
+
+ if ((r = reencrypt_assign_segments(cd, hdr, rh, 0, 0)))
+ goto out;
+
+ r = reencrypt_context_update(cd, rh);
+ if (r) {
+ log_err(cd, _("Failed to update reencryption context."));
+ goto out;
+ }
+
+ r = reencrypt_teardown_ok(cd, hdr, rh);
+ if (!r)
+ r = LUKS2_hdr_write(cd, hdr);
+out:
+ LUKS2_reencrypt_free(cd, rh);
+
+ return r;
+}
+#endif
+/*
+ * use only for calculation of minimal data device size.
+ * The real data offset is taken directly from segments!
+ */
+int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise)
+{
+ crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
+ uint64_t data_offset = LUKS2_get_data_offset(hdr);
+
+ if (ri == CRYPT_REENCRYPT_CLEAN && reencrypt_direction(hdr) == CRYPT_REENCRYPT_FORWARD)
+ data_offset += reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
+
+ return blockwise ? data_offset : data_offset << SECTOR_SHIFT;
+}
+
+/* internal only */
+int LUKS2_reencrypt_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr,
+ uint64_t check_size, uint64_t *dev_size, bool activation, bool dynamic)
+{
+ int r;
+ uint64_t data_offset, real_size = 0;
+
+ if (reencrypt_direction(hdr) == CRYPT_REENCRYPT_BACKWARD &&
+ (LUKS2_get_segment_by_flag(hdr, "backup-moved-segment") || dynamic))
+ check_size += reencrypt_data_shift(hdr);
+
+ r = device_check_access(cd, crypt_data_device(cd), activation ? DEV_EXCL : DEV_OK);
+ if (r)
+ return r;
+
+ data_offset = LUKS2_reencrypt_data_offset(hdr, false);
+
+ r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
+ if (r)
+ return r;
+
+ r = device_size(crypt_data_device(cd), &real_size);
+ if (r)
+ return r;
+
+ log_dbg(cd, "Required minimal device size: %" PRIu64 " (%" PRIu64 " sectors)"
+ ", real device size: %" PRIu64 " (%" PRIu64 " sectors) "
+ "calculated device size: %" PRIu64 " (%" PRIu64 " sectors)",
+ check_size, check_size >> SECTOR_SHIFT, real_size, real_size >> SECTOR_SHIFT,
+ real_size - data_offset, (real_size - data_offset) >> SECTOR_SHIFT);
+
+ if (real_size < data_offset || (check_size && real_size < check_size)) {
+ log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
+ return -EINVAL;
+ }
+
+ *dev_size = real_size - data_offset;
+
+ return 0;
+}
+#if USE_LUKS2_REENCRYPTION
+/* returns keyslot number on success (>= 0) or negative errnor otherwise */
+int LUKS2_reencrypt_locked_recovery_by_passphrase(struct crypt_device *cd,
+ int keyslot_old,
+ int keyslot_new,
+ const char *passphrase,
+ size_t passphrase_size,
+ struct volume_key **vks)
+{
+ uint64_t minimal_size, device_size;
+ int keyslot, r = -EINVAL;
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ struct volume_key *vk = NULL, *_vks = NULL;
+
+ log_dbg(cd, "Entering reencryption crash recovery.");
+
+ if (LUKS2_get_data_size(hdr, &minimal_size, NULL))
+ return r;
+
+ r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new,
+ passphrase, passphrase_size, &_vks);
+ if (r < 0)
+ goto out;
+ keyslot = r;
+
+ if (crypt_use_keyring_for_vk(cd))
+ vk = _vks;
+
+ while (vk) {
+ r = LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, crypt_volume_key_get_id(vk));
+ if (r < 0)
+ goto out;
+ vk = crypt_volume_key_next(vk);
+ }
+
+ if (LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, false))
+ goto out;
+
+ r = reencrypt_recovery(cd, hdr, device_size, _vks);
+
+ if (!r && vks)
+ MOVE_REF(*vks, _vks);
+out:
+ if (r < 0)
+ crypt_drop_keyring_key(cd, _vks);
+ crypt_free_volume_key(_vks);
+
+ return r < 0 ? r : keyslot;
+}
+#endif
+crypt_reencrypt_info LUKS2_reencrypt_get_params(struct luks2_hdr *hdr,
+ struct crypt_params_reencrypt *params)
+{
+ crypt_reencrypt_info ri;
+ int digest;
+ uint8_t version;
+
+ if (params)
+ memset(params, 0, sizeof(*params));
+
+ ri = LUKS2_reencrypt_status(hdr);
+ if (ri == CRYPT_REENCRYPT_NONE || ri == CRYPT_REENCRYPT_INVALID || !params)
+ return ri;
+
+ digest = LUKS2_digest_by_keyslot(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
+ if (digest < 0 && digest != -ENOENT)
+ return CRYPT_REENCRYPT_INVALID;
+
+ /*
+ * In case there's an old "online-reencrypt" requirement or reencryption
+ * keyslot digest is missing inform caller reencryption metadata requires repair.
+ */
+ if (!LUKS2_config_get_reencrypt_version(hdr, &version) &&
+ (version < 2 || digest == -ENOENT)) {
+ params->flags |= CRYPT_REENCRYPT_REPAIR_NEEDED;
+ return ri;
+ }
+
+ params->mode = reencrypt_mode(hdr);
+ params->direction = reencrypt_direction(hdr);
+ params->resilience = reencrypt_resilience_type(hdr);
+ params->hash = reencrypt_resilience_hash(hdr);
+ params->data_shift = reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
+ params->max_hotzone_size = 0;
+ if (LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
+ params->flags |= CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT;
+
+ return ri;
+}
diff --git a/lib/luks2/luks2_reencrypt_digest.c b/lib/luks2/luks2_reencrypt_digest.c
new file mode 100644
index 0000000..bc86f54
--- /dev/null
+++ b/lib/luks2/luks2_reencrypt_digest.c
@@ -0,0 +1,410 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, reencryption digest helpers
+ *
+ * Copyright (C) 2022-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2022-2023 Ondrej Kozina
+ * Copyright (C) 2022-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+#define MAX_STR 64
+
+struct jtype {
+ enum { JNONE = 0, JSTR, JU64, JX64, JU32 } type;
+ json_object *jobj;
+ const char *id;
+};
+
+static size_t sr(struct jtype *j, uint8_t *ptr)
+{
+ json_object *jobj;
+ size_t len = 0;
+ uint64_t u64;
+ uint32_t u32;
+
+ if (!json_object_is_type(j->jobj, json_type_object))
+ return 0;
+
+ if (!json_object_object_get_ex(j->jobj, j->id, &jobj))
+ return 0;
+
+ switch(j->type) {
+ case JSTR: /* JSON string */
+ if (!json_object_is_type(jobj, json_type_string))
+ return 0;
+ len = strlen(json_object_get_string(jobj));
+ if (len > MAX_STR)
+ return 0;
+ if (ptr)
+ memcpy(ptr, json_object_get_string(jobj), len);
+ break;
+ case JU64: /* Unsigned 64bit integer stored as string */
+ if (!json_object_is_type(jobj, json_type_string))
+ break;
+ len = sizeof(u64);
+ if (ptr) {
+ u64 = cpu_to_be64(crypt_jobj_get_uint64(jobj));
+ memcpy(ptr, &u64, len);
+ }
+ break;
+ case JX64: /* Unsigned 64bit segment size (allows "dynamic") */
+ if (!json_object_is_type(jobj, json_type_string))
+ break;
+ if (!strcmp(json_object_get_string(jobj), "dynamic")) {
+ len = strlen("dynamic");
+ if (ptr)
+ memcpy(ptr, json_object_get_string(jobj), len);
+ } else {
+ len = sizeof(u64);
+ u64 = cpu_to_be64(crypt_jobj_get_uint64(jobj));
+ if (ptr)
+ memcpy(ptr, &u64, len);
+ }
+ break;
+ case JU32: /* Unsigned 32bit integer, stored as JSON int */
+ if (!json_object_is_type(jobj, json_type_int))
+ return 0;
+ len = sizeof(u32);
+ if (ptr) {
+ u32 = cpu_to_be32(crypt_jobj_get_uint32(jobj));
+ memcpy(ptr, &u32, len);
+ }
+ break;
+ case JNONE:
+ return 0;
+ };
+
+ return len;
+}
+
+static size_t srs(struct jtype j[], uint8_t *ptr)
+{
+ size_t l, len = 0;
+
+ while(j->jobj) {
+ l = sr(j, ptr);
+ if (!l)
+ return 0;
+ len += l;
+ if (ptr)
+ ptr += l;
+ j++;
+ }
+ return len;
+}
+
+static size_t segment_linear_serialize(json_object *jobj_segment, uint8_t *buffer)
+{
+ struct jtype j[] = {
+ { JSTR, jobj_segment, "type" },
+ { JU64, jobj_segment, "offset" },
+ { JX64, jobj_segment, "size" },
+ {}
+ };
+ return srs(j, buffer);
+}
+
+static size_t segment_crypt_serialize(json_object *jobj_segment, uint8_t *buffer)
+{
+ struct jtype j[] = {
+ { JSTR, jobj_segment, "type" },
+ { JU64, jobj_segment, "offset" },
+ { JX64, jobj_segment, "size" },
+ { JU64, jobj_segment, "iv_tweak" },
+ { JSTR, jobj_segment, "encryption" },
+ { JU32, jobj_segment, "sector_size" },
+ {}
+ };
+ return srs(j, buffer);
+}
+
+static size_t segment_serialize(json_object *jobj_segment, uint8_t *buffer)
+{
+ json_object *jobj_type;
+ const char *segment_type;
+
+ if (!json_object_object_get_ex(jobj_segment, "type", &jobj_type))
+ return 0;
+
+ if (!(segment_type = json_object_get_string(jobj_type)))
+ return 0;
+
+ if (!strcmp(segment_type, "crypt"))
+ return segment_crypt_serialize(jobj_segment, buffer);
+ else if (!strcmp(segment_type, "linear"))
+ return segment_linear_serialize(jobj_segment, buffer);
+
+ return 0;
+}
+
+static size_t backup_segments_serialize(struct luks2_hdr *hdr, uint8_t *buffer)
+{
+ json_object *jobj_segment;
+ size_t l, len = 0;
+
+ jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-previous");
+ if (!jobj_segment || !(l = segment_serialize(jobj_segment, buffer)))
+ return 0;
+ len += l;
+ if (buffer)
+ buffer += l;
+
+ jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-final");
+ if (!jobj_segment || !(l = segment_serialize(jobj_segment, buffer)))
+ return 0;
+ len += l;
+ if (buffer)
+ buffer += l;
+
+ jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment");
+ if (jobj_segment) {
+ if (!(l = segment_serialize(jobj_segment, buffer)))
+ return 0;
+ len += l;
+ }
+
+ return len;
+}
+
+static size_t reenc_keyslot_serialize(struct luks2_hdr *hdr, uint8_t *buffer)
+{
+ json_object *jobj_keyslot, *jobj_area, *jobj_type;
+ const char *area_type;
+ int keyslot_reencrypt;
+
+ keyslot_reencrypt = LUKS2_find_keyslot(hdr, "reencrypt");
+ if (keyslot_reencrypt < 0)
+ return 0;
+
+ if (!(jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, keyslot_reencrypt)))
+ return 0;
+
+ if (!json_object_object_get_ex(jobj_keyslot, "area", &jobj_area))
+ return 0;
+
+ if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
+ return 0;
+
+ if (!(area_type = json_object_get_string(jobj_type)))
+ return 0;
+
+ struct jtype j[] = {
+ { JSTR, jobj_keyslot, "mode" },
+ { JSTR, jobj_keyslot, "direction" },
+ { JSTR, jobj_area, "type" },
+ { JU64, jobj_area, "offset" },
+ { JU64, jobj_area, "size" },
+ {}
+ };
+ struct jtype j_datashift[] = {
+ { JSTR, jobj_keyslot, "mode" },
+ { JSTR, jobj_keyslot, "direction" },
+ { JSTR, jobj_area, "type" },
+ { JU64, jobj_area, "offset" },
+ { JU64, jobj_area, "size" },
+ { JU64, jobj_area, "shift_size" },
+ {}
+ };
+ struct jtype j_checksum[] = {
+ { JSTR, jobj_keyslot, "mode" },
+ { JSTR, jobj_keyslot, "direction" },
+ { JSTR, jobj_area, "type" },
+ { JU64, jobj_area, "offset" },
+ { JU64, jobj_area, "size" },
+ { JSTR, jobj_area, "hash" },
+ { JU32, jobj_area, "sector_size" },
+ {}
+ };
+ struct jtype j_datashift_checksum[] = {
+ { JSTR, jobj_keyslot, "mode" },
+ { JSTR, jobj_keyslot, "direction" },
+ { JSTR, jobj_area, "type" },
+ { JU64, jobj_area, "offset" },
+ { JU64, jobj_area, "size" },
+ { JSTR, jobj_area, "hash" },
+ { JU32, jobj_area, "sector_size" },
+ { JU64, jobj_area, "shift_size" },
+ {}
+ };
+
+ if (!strcmp(area_type, "datashift-checksum"))
+ return srs(j_datashift_checksum, buffer);
+ else if (!strcmp(area_type, "datashift") ||
+ !strcmp(area_type, "datashift-journal"))
+ return srs(j_datashift, buffer);
+ else if (!strcmp(area_type, "checksum"))
+ return srs(j_checksum, buffer);
+
+ return srs(j, buffer);
+}
+
+static size_t blob_serialize(void *blob, size_t length, uint8_t *buffer)
+{
+ if (buffer)
+ memcpy(buffer, blob, length);
+
+ return length;
+}
+
+static int reencrypt_assembly_verification_data(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct volume_key *vks,
+ uint8_t version,
+ struct volume_key **verification_data)
+{
+ uint8_t *ptr;
+ int digest_new, digest_old;
+ struct volume_key *data = NULL, *vk_old = NULL, *vk_new = NULL;
+ size_t keyslot_data_len, segments_data_len, data_len = 2;
+
+ /*
+ * This works up to (including) version v207.
+ */
+ assert(version < (UINT8_MAX - 0x2F));
+
+ /* Keys - calculate length */
+ digest_new = LUKS2_reencrypt_digest_new(hdr);
+ digest_old = LUKS2_reencrypt_digest_old(hdr);
+
+ if (digest_old >= 0) {
+ vk_old = crypt_volume_key_by_id(vks, digest_old);
+ if (!vk_old) {
+ log_dbg(cd, "Key (digest id %d) required but not unlocked.", digest_old);
+ return -EINVAL;
+ }
+ data_len += blob_serialize(vk_old->key, vk_old->keylength, NULL);
+ }
+
+ if (digest_new >= 0 && digest_old != digest_new) {
+ vk_new = crypt_volume_key_by_id(vks, digest_new);
+ if (!vk_new) {
+ log_dbg(cd, "Key (digest id %d) required but not unlocked.", digest_new);
+ return -EINVAL;
+ }
+ data_len += blob_serialize(vk_new->key, vk_new->keylength, NULL);
+ }
+
+ if (data_len == 2)
+ return -EINVAL;
+
+ /* Metadata - calculate length */
+ if (!(keyslot_data_len = reenc_keyslot_serialize(hdr, NULL)))
+ return -EINVAL;
+ data_len += keyslot_data_len;
+
+ if (!(segments_data_len = backup_segments_serialize(hdr, NULL)))
+ return -EINVAL;
+ data_len += segments_data_len;
+
+ /* Alloc and fill serialization data */
+ data = crypt_alloc_volume_key(data_len, NULL);
+ if (!data)
+ return -ENOMEM;
+
+ ptr = (uint8_t*)data->key;
+
+ *ptr++ = 0x76;
+ *ptr++ = 0x30 + version;
+
+ if (vk_old)
+ ptr += blob_serialize(vk_old->key, vk_old->keylength, ptr);
+
+ if (vk_new)
+ ptr += blob_serialize(vk_new->key, vk_new->keylength, ptr);
+
+ if (!reenc_keyslot_serialize(hdr, ptr))
+ goto bad;
+ ptr += keyslot_data_len;
+
+ if (!backup_segments_serialize(hdr, ptr))
+ goto bad;
+ ptr += segments_data_len;
+
+ assert((size_t)(ptr - (uint8_t*)data->key) == data_len);
+
+ *verification_data = data;
+
+ return 0;
+bad:
+ crypt_free_volume_key(data);
+ return -EINVAL;
+}
+
+int LUKS2_keyslot_reencrypt_digest_create(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ uint8_t version,
+ struct volume_key *vks)
+{
+ int digest_reencrypt, keyslot_reencrypt, r;
+ struct volume_key *data;
+
+ keyslot_reencrypt = LUKS2_find_keyslot(hdr, "reencrypt");
+ if (keyslot_reencrypt < 0)
+ return keyslot_reencrypt;
+
+ r = reencrypt_assembly_verification_data(cd, hdr, vks, version, &data);
+ if (r < 0)
+ return r;
+
+ r = LUKS2_digest_create(cd, "pbkdf2", hdr, data);
+ crypt_free_volume_key(data);
+ if (r < 0)
+ return r;
+
+ digest_reencrypt = r;
+
+ r = LUKS2_digest_assign(cd, hdr, keyslot_reencrypt, CRYPT_ANY_DIGEST, 0, 0);
+ if (r < 0)
+ return r;
+
+ return LUKS2_digest_assign(cd, hdr, keyslot_reencrypt, digest_reencrypt, 1, 0);
+}
+
+int LUKS2_reencrypt_digest_verify(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct volume_key *vks)
+{
+ int r, keyslot_reencrypt;
+ struct volume_key *data;
+ uint8_t version;
+
+ log_dbg(cd, "Verifying reencryption metadata.");
+
+ keyslot_reencrypt = LUKS2_find_keyslot(hdr, "reencrypt");
+ if (keyslot_reencrypt < 0)
+ return keyslot_reencrypt;
+
+ if (LUKS2_config_get_reencrypt_version(hdr, &version))
+ return -EINVAL;
+
+ r = reencrypt_assembly_verification_data(cd, hdr, vks, version, &data);
+ if (r < 0)
+ return r;
+
+ r = LUKS2_digest_verify(cd, hdr, data, keyslot_reencrypt);
+ crypt_free_volume_key(data);
+
+ if (r < 0) {
+ if (r == -ENOENT)
+ log_dbg(cd, "Reencryption digest is missing.");
+ log_err(cd, _("Reencryption metadata is invalid."));
+ } else
+ log_dbg(cd, "Reencryption metadata verified.");
+
+ return r;
+}
diff --git a/lib/luks2/luks2_segment.c b/lib/luks2/luks2_segment.c
new file mode 100644
index 0000000..63e7c14
--- /dev/null
+++ b/lib/luks2/luks2_segment.c
@@ -0,0 +1,426 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, internal segment handling
+ *
+ * Copyright (C) 2018-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2018-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+/* use only on already validated 'segments' object */
+uint64_t json_segments_get_minimal_offset(json_object *jobj_segments, unsigned blockwise)
+{
+ uint64_t tmp, min = blockwise ? UINT64_MAX >> SECTOR_SHIFT : UINT64_MAX;
+
+ if (!jobj_segments)
+ return 0;
+
+ json_object_object_foreach(jobj_segments, key, val) {
+ UNUSED(key);
+
+ if (json_segment_is_backup(val))
+ continue;
+
+ tmp = json_segment_get_offset(val, blockwise);
+
+ if (!tmp)
+ return tmp;
+
+ if (tmp < min)
+ min = tmp;
+ }
+
+ return min;
+}
+
+uint64_t json_segment_get_offset(json_object *jobj_segment, unsigned blockwise)
+{
+ json_object *jobj;
+
+ if (!jobj_segment ||
+ !json_object_object_get_ex(jobj_segment, "offset", &jobj))
+ return 0;
+
+ return blockwise ? crypt_jobj_get_uint64(jobj) >> SECTOR_SHIFT : crypt_jobj_get_uint64(jobj);
+}
+
+const char *json_segment_type(json_object *jobj_segment)
+{
+ json_object *jobj;
+
+ if (!jobj_segment ||
+ !json_object_object_get_ex(jobj_segment, "type", &jobj))
+ return NULL;
+
+ return json_object_get_string(jobj);
+}
+
+uint64_t json_segment_get_iv_offset(json_object *jobj_segment)
+{
+ json_object *jobj;
+
+ if (!jobj_segment ||
+ !json_object_object_get_ex(jobj_segment, "iv_tweak", &jobj))
+ return 0;
+
+ return crypt_jobj_get_uint64(jobj);
+}
+
+uint64_t json_segment_get_size(json_object *jobj_segment, unsigned blockwise)
+{
+ json_object *jobj;
+
+ if (!jobj_segment ||
+ !json_object_object_get_ex(jobj_segment, "size", &jobj))
+ return 0;
+
+ return blockwise ? crypt_jobj_get_uint64(jobj) >> SECTOR_SHIFT : crypt_jobj_get_uint64(jobj);
+}
+
+const char *json_segment_get_cipher(json_object *jobj_segment)
+{
+ json_object *jobj;
+
+ /* FIXME: Pseudo "null" cipher should be handled elsewhere */
+ if (!jobj_segment ||
+ !json_object_object_get_ex(jobj_segment, "encryption", &jobj))
+ return "null";
+
+ return json_object_get_string(jobj);
+}
+
+uint32_t json_segment_get_sector_size(json_object *jobj_segment)
+{
+ json_object *jobj;
+ int i;
+
+ if (!jobj_segment ||
+ !json_object_object_get_ex(jobj_segment, "sector_size", &jobj))
+ return SECTOR_SIZE;
+
+ i = json_object_get_int(jobj);
+ return i < 0 ? SECTOR_SIZE : i;
+}
+
+static json_object *json_segment_get_flags(json_object *jobj_segment)
+{
+ json_object *jobj;
+
+ if (!jobj_segment || !(json_object_object_get_ex(jobj_segment, "flags", &jobj)))
+ return NULL;
+ return jobj;
+}
+
+bool json_segment_contains_flag(json_object *jobj_segment, const char *flag_str, size_t len)
+{
+ int r, i;
+ json_object *jobj, *jobj_flags = json_segment_get_flags(jobj_segment);
+
+ if (!jobj_flags)
+ return false;
+
+ for (i = 0; i < (int)json_object_array_length(jobj_flags); i++) {
+ jobj = json_object_array_get_idx(jobj_flags, i);
+ if (len)
+ r = strncmp(json_object_get_string(jobj), flag_str, len);
+ else
+ r = strcmp(json_object_get_string(jobj), flag_str);
+ if (!r)
+ return true;
+ }
+
+ return false;
+}
+
+bool json_segment_is_backup(json_object *jobj_segment)
+{
+ return json_segment_contains_flag(jobj_segment, "backup-", 7);
+}
+
+json_object *json_segments_get_segment(json_object *jobj_segments, int segment)
+{
+ json_object *jobj;
+ char segment_name[16];
+
+ if (snprintf(segment_name, sizeof(segment_name), "%u", segment) < 1)
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj_segments, segment_name, &jobj))
+ return NULL;
+
+ return jobj;
+}
+
+unsigned json_segments_count(json_object *jobj_segments)
+{
+ unsigned count = 0;
+
+ if (!jobj_segments)
+ return 0;
+
+ json_object_object_foreach(jobj_segments, slot, val) {
+ UNUSED(slot);
+ if (!json_segment_is_backup(val))
+ count++;
+ }
+
+ return count;
+}
+
+static void _get_segment_or_id_by_flag(json_object *jobj_segments, const char *flag, unsigned id, void *retval)
+{
+ json_object *jobj_flags, **jobj_ret = (json_object **)retval;
+ int *ret = (int *)retval;
+
+ if (!flag)
+ return;
+
+ json_object_object_foreach(jobj_segments, key, value) {
+ if (!json_object_object_get_ex(value, "flags", &jobj_flags))
+ continue;
+ if (LUKS2_array_jobj(jobj_flags, flag)) {
+ if (id)
+ *ret = atoi(key);
+ else
+ *jobj_ret = value;
+ return;
+ }
+ }
+}
+
+void json_segment_remove_flag(json_object *jobj_segment, const char *flag)
+{
+ json_object *jobj_flags, *jobj_flags_new;
+
+ if (!jobj_segment)
+ return;
+
+ jobj_flags = json_segment_get_flags(jobj_segment);
+ if (!jobj_flags)
+ return;
+
+ jobj_flags_new = LUKS2_array_remove(jobj_flags, flag);
+ if (!jobj_flags_new)
+ return;
+
+ if (json_object_array_length(jobj_flags_new) <= 0) {
+ json_object_put(jobj_flags_new);
+ json_object_object_del(jobj_segment, "flags");
+ } else
+ json_object_object_add(jobj_segment, "flags", jobj_flags_new);
+}
+
+static json_object *_segment_create_generic(const char *type, uint64_t offset, const uint64_t *length)
+{
+ json_object *jobj = json_object_new_object();
+ if (!jobj)
+ return NULL;
+
+ json_object_object_add(jobj, "type", json_object_new_string(type));
+ json_object_object_add(jobj, "offset", crypt_jobj_new_uint64(offset));
+ json_object_object_add(jobj, "size", length ? crypt_jobj_new_uint64(*length) : json_object_new_string("dynamic"));
+
+ return jobj;
+}
+
+json_object *json_segment_create_linear(uint64_t offset, const uint64_t *length, unsigned reencryption)
+{
+ json_object *jobj = _segment_create_generic("linear", offset, length);
+ if (reencryption)
+ LUKS2_segment_set_flag(jobj, "in-reencryption");
+ return jobj;
+}
+
+json_object *json_segment_create_crypt(uint64_t offset,
+ uint64_t iv_offset, const uint64_t *length,
+ const char *cipher, uint32_t sector_size,
+ unsigned reencryption)
+{
+ json_object *jobj = _segment_create_generic("crypt", offset, length);
+ if (!jobj)
+ return NULL;
+
+ json_object_object_add(jobj, "iv_tweak", crypt_jobj_new_uint64(iv_offset));
+ json_object_object_add(jobj, "encryption", json_object_new_string(cipher));
+ json_object_object_add(jobj, "sector_size", json_object_new_int(sector_size));
+ if (reencryption)
+ LUKS2_segment_set_flag(jobj, "in-reencryption");
+
+ return jobj;
+}
+
+uint64_t LUKS2_segment_offset(struct luks2_hdr *hdr, int segment, unsigned blockwise)
+{
+ return json_segment_get_offset(LUKS2_get_segment_jobj(hdr, segment), blockwise);
+}
+
+int json_segments_segment_in_reencrypt(json_object *jobj_segments)
+{
+ json_object *jobj_flags;
+
+ json_object_object_foreach(jobj_segments, slot, val) {
+ if (!json_object_object_get_ex(val, "flags", &jobj_flags) ||
+ !LUKS2_array_jobj(jobj_flags, "in-reencryption"))
+ continue;
+
+ return atoi(slot);
+ }
+
+ return -1;
+}
+
+uint64_t LUKS2_segment_size(struct luks2_hdr *hdr, int segment, unsigned blockwise)
+{
+ return json_segment_get_size(LUKS2_get_segment_jobj(hdr, segment), blockwise);
+}
+
+int LUKS2_segment_is_type(struct luks2_hdr *hdr, int segment, const char *type)
+{
+ return !strcmp(json_segment_type(LUKS2_get_segment_jobj(hdr, segment)) ?: "", type);
+}
+
+int LUKS2_last_segment_by_type(struct luks2_hdr *hdr, const char *type)
+{
+ json_object *jobj_segments;
+ int last_found = -1;
+
+ if (!type)
+ return -1;
+
+ if (!json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments))
+ return -1;
+
+ json_object_object_foreach(jobj_segments, slot, val) {
+ if (json_segment_is_backup(val))
+ continue;
+ if (strcmp(type, json_segment_type(val) ?: ""))
+ continue;
+
+ if (atoi(slot) > last_found)
+ last_found = atoi(slot);
+ }
+
+ return last_found;
+}
+
+int LUKS2_segment_by_type(struct luks2_hdr *hdr, const char *type)
+{
+ json_object *jobj_segments;
+ int first_found = -1;
+
+ if (!type)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments))
+ return -EINVAL;
+
+ json_object_object_foreach(jobj_segments, slot, val) {
+ if (json_segment_is_backup(val))
+ continue;
+ if (strcmp(type, json_segment_type(val) ?: ""))
+ continue;
+
+ if (first_found < 0)
+ first_found = atoi(slot);
+ else if (atoi(slot) < first_found)
+ first_found = atoi(slot);
+ }
+
+ return first_found;
+}
+
+int LUKS2_segment_first_unused_id(struct luks2_hdr *hdr)
+{
+ json_object *jobj_segments;
+
+ if (!json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments))
+ return -EINVAL;
+
+ return json_object_object_length(jobj_segments);
+}
+
+int LUKS2_segment_set_flag(json_object *jobj_segment, const char *flag)
+{
+ json_object *jobj_flags;
+
+ if (!jobj_segment || !flag)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_segment, "flags", &jobj_flags)) {
+ jobj_flags = json_object_new_array();
+ if (!jobj_flags)
+ return -ENOMEM;
+ json_object_object_add(jobj_segment, "flags", jobj_flags);
+ }
+
+ if (LUKS2_array_jobj(jobj_flags, flag))
+ return 0;
+
+ json_object_array_add(jobj_flags, json_object_new_string(flag));
+
+ return 0;
+}
+
+int LUKS2_segments_set(struct crypt_device *cd, struct luks2_hdr *hdr,
+ json_object *jobj_segments, int commit)
+{
+ json_object_object_add(hdr->jobj, "segments", jobj_segments);
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+int LUKS2_get_segment_id_by_flag(struct luks2_hdr *hdr, const char *flag)
+{
+ int ret = -ENOENT;
+ json_object *jobj_segments = LUKS2_get_segments_jobj(hdr);
+
+ if (jobj_segments)
+ _get_segment_or_id_by_flag(jobj_segments, flag, 1, &ret);
+
+ return ret;
+}
+
+json_object *LUKS2_get_segment_by_flag(struct luks2_hdr *hdr, const char *flag)
+{
+ json_object *jobj_segment = NULL,
+ *jobj_segments = LUKS2_get_segments_jobj(hdr);
+
+ if (jobj_segments)
+ _get_segment_or_id_by_flag(jobj_segments, flag, 0, &jobj_segment);
+
+ return jobj_segment;
+}
+
+/* compares key characteristics of both segments */
+bool json_segment_cmp(json_object *jobj_segment_1, json_object *jobj_segment_2)
+{
+ const char *type = json_segment_type(jobj_segment_1);
+ const char *type2 = json_segment_type(jobj_segment_2);
+
+ if (!type || !type2)
+ return false;
+
+ if (strcmp(type, type2))
+ return false;
+
+ if (!strcmp(type, "crypt"))
+ return (json_segment_get_sector_size(jobj_segment_1) == json_segment_get_sector_size(jobj_segment_2) &&
+ !strcmp(json_segment_get_cipher(jobj_segment_1),
+ json_segment_get_cipher(jobj_segment_2)));
+
+ return true;
+}
diff --git a/lib/luks2/luks2_token.c b/lib/luks2/luks2_token.c
new file mode 100644
index 0000000..5f65918
--- /dev/null
+++ b/lib/luks2/luks2_token.c
@@ -0,0 +1,1043 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, token handling
+ *
+ * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2023 Milan Broz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <ctype.h>
+#include <dlfcn.h>
+
+#include "luks2_internal.h"
+
+#if USE_EXTERNAL_TOKENS
+static bool external_tokens_enabled = true;
+#else
+static bool external_tokens_enabled = false;
+#endif
+
+static struct crypt_token_handler_internal token_handlers[LUKS2_TOKENS_MAX] = {
+ /* keyring builtin token */
+ {
+ .version = 1,
+ .u = {
+ .v1 = { .name = LUKS2_TOKEN_KEYRING,
+ .open = keyring_open,
+ .buffer_free = keyring_buffer_free,
+ .validate = keyring_validate,
+ .dump = keyring_dump }
+ }
+ }
+};
+
+void crypt_token_external_disable(void)
+{
+ external_tokens_enabled = false;
+}
+
+const char *crypt_token_external_path(void)
+{
+ return external_tokens_enabled ? EXTERNAL_LUKS2_TOKENS_PATH : NULL;
+}
+
+#if USE_EXTERNAL_TOKENS
+static void *token_dlvsym(struct crypt_device *cd,
+ void *handle,
+ const char *symbol,
+ const char *version)
+{
+ char *error;
+ void *sym;
+
+#ifdef HAVE_DLVSYM
+ log_dbg(cd, "Loading symbol %s@%s.", symbol, version);
+ sym = dlvsym(handle, symbol, version);
+#else
+ log_dbg(cd, "Loading default version of symbol %s.", symbol);
+ sym = dlsym(handle, symbol);
+#endif
+ error = dlerror();
+
+ if (error)
+ log_dbg(cd, "%s", error);
+
+ return sym;
+}
+#endif
+
+static bool token_validate_v1(struct crypt_device *cd, const crypt_token_handler *h)
+{
+ if (!h)
+ return false;
+
+ if (!h->name) {
+ log_dbg(cd, "Error: token handler does not provide name attribute.");
+ return false;
+ }
+
+ if (!h->open) {
+ log_dbg(cd, "Error: token handler does not provide open function.");
+ return false;
+ }
+
+ return true;
+}
+
+#if USE_EXTERNAL_TOKENS
+static bool token_validate_v2(struct crypt_device *cd, const struct crypt_token_handler_internal *h)
+{
+ if (!h)
+ return false;
+
+ if (!token_validate_v1(cd, &h->u.v1))
+ return false;
+
+ if (!h->u.v2.version) {
+ log_dbg(cd, "Error: token handler does not provide " CRYPT_TOKEN_ABI_VERSION " function.");
+ return false;
+ }
+
+ return true;
+}
+
+static bool external_token_name_valid(const char *name)
+{
+ if (!*name || strlen(name) > LUKS2_TOKEN_NAME_MAX)
+ return false;
+
+ while (*name) {
+ if (!isalnum(*name) && *name != '-' && *name != '_')
+ return false;
+ name++;
+ }
+
+ return true;
+}
+#endif
+
+static int
+crypt_token_load_external(struct crypt_device *cd, const char *name, struct crypt_token_handler_internal *ret)
+{
+#if USE_EXTERNAL_TOKENS
+ struct crypt_token_handler_v2 *token;
+ void *h;
+ char buf[PATH_MAX];
+ int r;
+
+ if (!external_tokens_enabled)
+ return -ENOTSUP;
+
+ if (!ret || !name)
+ return -EINVAL;
+
+ if (!external_token_name_valid(name)) {
+ log_dbg(cd, "External token name (%.*s) invalid.", LUKS2_TOKEN_NAME_MAX, name);
+ return -EINVAL;
+ }
+
+ token = &ret->u.v2;
+
+ r = snprintf(buf, sizeof(buf), "%s/libcryptsetup-token-%s.so", crypt_token_external_path(), name);
+ if (r < 0 || (size_t)r >= sizeof(buf))
+ return -EINVAL;
+
+ assert(*buf == '/');
+
+ log_dbg(cd, "Trying to load %s.", buf);
+
+ h = dlopen(buf, RTLD_LAZY);
+ if (!h) {
+ log_dbg(cd, "%s", dlerror());
+ return -EINVAL;
+ }
+ dlerror();
+
+ token->name = strdup(name);
+ token->open = token_dlvsym(cd, h, CRYPT_TOKEN_ABI_OPEN, CRYPT_TOKEN_ABI_VERSION1);
+ token->buffer_free = token_dlvsym(cd, h, CRYPT_TOKEN_ABI_BUFFER_FREE, CRYPT_TOKEN_ABI_VERSION1);
+ token->validate = token_dlvsym(cd, h, CRYPT_TOKEN_ABI_VALIDATE, CRYPT_TOKEN_ABI_VERSION1);
+ token->dump = token_dlvsym(cd, h, CRYPT_TOKEN_ABI_DUMP, CRYPT_TOKEN_ABI_VERSION1);
+ token->open_pin = token_dlvsym(cd, h, CRYPT_TOKEN_ABI_OPEN_PIN, CRYPT_TOKEN_ABI_VERSION1);
+ token->version = token_dlvsym(cd, h, CRYPT_TOKEN_ABI_VERSION, CRYPT_TOKEN_ABI_VERSION1);
+
+ if (!token_validate_v2(cd, ret)) {
+ free(CONST_CAST(void *)token->name);
+ dlclose(h);
+ memset(token, 0, sizeof(*token));
+ return -EINVAL;
+ }
+
+ /* Token loaded, possible error here means only debug message fail and can be ignored */
+ r = snprintf(buf, sizeof(buf), "%s", token->version() ?: "");
+ if (r < 0 || (size_t)r >= sizeof(buf))
+ *buf = '\0';
+
+ log_dbg(cd, "Token handler %s-%s loaded successfully.", token->name, buf);
+
+ token->dlhandle = h;
+ ret->version = 2;
+
+ return 0;
+#else
+ return -ENOTSUP;
+#endif
+}
+
+static int is_builtin_candidate(const char *type)
+{
+ return !strncmp(type, LUKS2_BUILTIN_TOKEN_PREFIX, LUKS2_BUILTIN_TOKEN_PREFIX_LEN);
+}
+
+static int crypt_token_find_free(struct crypt_device *cd, const char *name, int *index)
+{
+ int i;
+
+ if (is_builtin_candidate(name)) {
+ log_dbg(cd, "'" LUKS2_BUILTIN_TOKEN_PREFIX "' is reserved prefix for builtin tokens.");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < LUKS2_TOKENS_MAX && token_handlers[i].u.v1.name; i++) {
+ if (!strcmp(token_handlers[i].u.v1.name, name)) {
+ log_dbg(cd, "Keyslot handler %s is already registered.", name);
+ return -EINVAL;
+ }
+ }
+
+ if (i == LUKS2_TOKENS_MAX)
+ return -EINVAL;
+
+ if (index)
+ *index = i;
+
+ return 0;
+}
+
+int crypt_token_register(const crypt_token_handler *handler)
+{
+ int i, r;
+
+ if (!token_validate_v1(NULL, handler))
+ return -EINVAL;
+
+ r = crypt_token_find_free(NULL, handler->name, &i);
+ if (r < 0)
+ return r;
+
+ token_handlers[i].version = 1;
+ token_handlers[i].u.v1 = *handler;
+ return 0;
+}
+
+void crypt_token_unload_external_all(struct crypt_device *cd)
+{
+#if USE_EXTERNAL_TOKENS
+ int i;
+
+ for (i = LUKS2_TOKENS_MAX - 1; i >= 0; i--) {
+ if (token_handlers[i].version < 2)
+ continue;
+
+ log_dbg(cd, "Unloading %s token handler.", token_handlers[i].u.v2.name);
+
+ free(CONST_CAST(void *)token_handlers[i].u.v2.name);
+
+ if (dlclose(CONST_CAST(void *)token_handlers[i].u.v2.dlhandle))
+ log_dbg(cd, "%s", dlerror());
+ }
+#endif
+}
+
+static const void
+*LUKS2_token_handler_type(struct crypt_device *cd, const char *type)
+{
+ int i;
+
+ for (i = 0; i < LUKS2_TOKENS_MAX && token_handlers[i].u.v1.name; i++)
+ if (!strcmp(token_handlers[i].u.v1.name, type))
+ return &token_handlers[i].u;
+
+ if (i >= LUKS2_TOKENS_MAX)
+ return NULL;
+
+ if (is_builtin_candidate(type))
+ return NULL;
+
+ if (crypt_token_load_external(cd, type, &token_handlers[i]))
+ return NULL;
+
+ return &token_handlers[i].u;
+}
+
+static const void
+*LUKS2_token_handler(struct crypt_device *cd, int token)
+{
+ struct luks2_hdr *hdr;
+ json_object *jobj1, *jobj2;
+
+ if (token < 0)
+ return NULL;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return NULL;
+
+ if (!(jobj1 = LUKS2_get_token_jobj(hdr, token)))
+ return NULL;
+
+ if (!json_object_object_get_ex(jobj1, "type", &jobj2))
+ return NULL;
+
+ return LUKS2_token_handler_type(cd, json_object_get_string(jobj2));
+}
+
+static int LUKS2_token_find_free(struct luks2_hdr *hdr)
+{
+ int i;
+
+ for (i = 0; i < LUKS2_TOKENS_MAX; i++)
+ if (!LUKS2_get_token_jobj(hdr, i))
+ return i;
+
+ return -EINVAL;
+}
+
+int LUKS2_token_create(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *json,
+ int commit)
+{
+ const crypt_token_handler *h;
+ json_object *jobj_tokens, *jobj_type, *jobj;
+ enum json_tokener_error jerr;
+ char num[16];
+
+ if (token == CRYPT_ANY_TOKEN) {
+ if (!json)
+ return -EINVAL;
+ token = LUKS2_token_find_free(hdr);
+ }
+
+ if (token < 0 || token >= LUKS2_TOKENS_MAX)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(hdr->jobj, "tokens", &jobj_tokens))
+ return -EINVAL;
+
+ if (snprintf(num, sizeof(num), "%d", token) < 0)
+ return -EINVAL;
+
+ /* Remove token */
+ if (!json)
+ json_object_object_del(jobj_tokens, num);
+ else {
+
+ jobj = json_tokener_parse_verbose(json, &jerr);
+ if (!jobj) {
+ log_dbg(cd, "Token JSON parse failed.");
+ return -EINVAL;
+ }
+
+ if (LUKS2_token_validate(cd, hdr->jobj, jobj, num)) {
+ json_object_put(jobj);
+ return -EINVAL;
+ }
+
+ json_object_object_get_ex(jobj, "type", &jobj_type);
+ h = LUKS2_token_handler_type(cd, json_object_get_string(jobj_type));
+
+ if (is_builtin_candidate(json_object_get_string(jobj_type)) && !h) {
+ log_dbg(cd, "%s is builtin token candidate with missing handler",
+ json_object_get_string(jobj_type));
+ json_object_put(jobj);
+ return -EINVAL;
+ }
+
+ if (h && h->validate && h->validate(cd, json)) {
+ json_object_put(jobj);
+ log_dbg(cd, "Token type %s validation failed.", h->name);
+ return -EINVAL;
+ }
+
+ json_object_object_add(jobj_tokens, num, jobj);
+ if (LUKS2_check_json_size(cd, hdr)) {
+ log_dbg(cd, "Not enough space in header json area for new token.");
+ json_object_object_del(jobj_tokens, num);
+ return -ENOSPC;
+ }
+ }
+
+ if (commit)
+ return LUKS2_hdr_write(cd, hdr) ?: token;
+
+ return token;
+}
+
+crypt_token_info LUKS2_token_status(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char **type)
+{
+ const char *tmp;
+ const crypt_token_handler *th;
+ json_object *jobj_type, *jobj_token;
+
+ if (token < 0 || token >= LUKS2_TOKENS_MAX)
+ return CRYPT_TOKEN_INVALID;
+
+ if (!(jobj_token = LUKS2_get_token_jobj(hdr, token)))
+ return CRYPT_TOKEN_INACTIVE;
+
+ json_object_object_get_ex(jobj_token, "type", &jobj_type);
+ tmp = json_object_get_string(jobj_type);
+
+ if ((th = LUKS2_token_handler_type(cd, tmp))) {
+ if (type)
+ *type = th->name;
+ return is_builtin_candidate(tmp) ? CRYPT_TOKEN_INTERNAL : CRYPT_TOKEN_EXTERNAL;
+ }
+
+ if (type)
+ *type = tmp;
+
+ return is_builtin_candidate(tmp) ? CRYPT_TOKEN_INTERNAL_UNKNOWN : CRYPT_TOKEN_EXTERNAL_UNKNOWN;
+}
+
+static const char *token_json_to_string(json_object *jobj_token)
+{
+ return json_object_to_json_string_ext(jobj_token,
+ JSON_C_TO_STRING_PLAIN | JSON_C_TO_STRING_NOSLASHESCAPE);
+}
+
+static int token_is_usable(struct luks2_hdr *hdr, json_object *jobj_token, int segment,
+ crypt_keyslot_priority minimal_priority, bool requires_keyslot)
+{
+ crypt_keyslot_priority keyslot_priority;
+ json_object *jobj_array;
+ int i, keyslot, len, r = -ENOENT;
+
+ if (!jobj_token)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_token, "keyslots", &jobj_array))
+ return -EINVAL;
+
+ if (segment < 0 && segment != CRYPT_ANY_SEGMENT)
+ return -EINVAL;
+
+ /* no assigned keyslot returns -ENOENT even for CRYPT_ANY_SEGMENT */
+ len = json_object_array_length(jobj_array);
+ if (len < 0)
+ return -ENOENT;
+
+ if (!requires_keyslot)
+ return 0;
+
+ if (!len)
+ return -ENOENT;
+
+ for (i = 0; i < len; i++) {
+ keyslot = atoi(json_object_get_string(json_object_array_get_idx(jobj_array, i)));
+
+ keyslot_priority = LUKS2_keyslot_priority_get(hdr, keyslot);
+ if (keyslot_priority == CRYPT_SLOT_PRIORITY_INVALID)
+ return -EINVAL;
+
+ if (keyslot_priority < minimal_priority)
+ continue;
+
+ r = LUKS2_keyslot_for_segment(hdr, keyslot, segment);
+ if (r != -ENOENT)
+ return r;
+ }
+
+ return r;
+}
+
+static int translate_errno(struct crypt_device *cd, int ret_val, const char *type)
+{
+ if ((ret_val > 0 || ret_val == -EINVAL || ret_val == -EPERM) && !is_builtin_candidate(type)) {
+ log_dbg(cd, "%s token handler returned %d. Changing to %d.", type, ret_val, -ENOENT);
+ ret_val = -ENOENT;
+ }
+
+ return ret_val;
+}
+
+static int token_open(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ json_object *jobj_token,
+ const char *type,
+ int segment,
+ crypt_keyslot_priority priority,
+ const char *pin,
+ size_t pin_size,
+ char **buffer,
+ size_t *buffer_len,
+ void *usrptr,
+ bool requires_keyslot)
+{
+ const struct crypt_token_handler_v2 *h;
+ json_object *jobj_type;
+ int r;
+
+ assert(token >= 0);
+ assert(jobj_token);
+ assert(priority >= 0);
+
+ if (type) {
+ if (!json_object_object_get_ex(jobj_token, "type", &jobj_type))
+ return -EINVAL;
+ if (strcmp(type, json_object_get_string(jobj_type)))
+ return -ENOENT;
+ }
+
+ r = token_is_usable(hdr, jobj_token, segment, priority, requires_keyslot);
+ if (r < 0) {
+ if (r == -ENOENT)
+ log_dbg(cd, "Token %d unusable for segment %d with desired keyslot priority %d.",
+ token, segment, priority);
+ return r;
+ }
+
+ if (!(h = LUKS2_token_handler(cd, token)))
+ return -ENOENT;
+
+ if (h->validate && h->validate(cd, token_json_to_string(jobj_token))) {
+ log_dbg(cd, "Token %d (%s) validation failed.", token, h->name);
+ return -ENOENT;
+ }
+
+ if (pin && !h->open_pin)
+ r = -ENOENT;
+ else if (pin)
+ r = translate_errno(cd, h->open_pin(cd, token, pin, pin_size, buffer, buffer_len, usrptr), h->name);
+ else
+ r = translate_errno(cd, h->open(cd, token, buffer, buffer_len, usrptr), h->name);
+ if (r < 0)
+ log_dbg(cd, "Token %d (%s) open failed with %d.", token, h->name, r);
+
+ return r;
+}
+
+static void LUKS2_token_buffer_free(struct crypt_device *cd,
+ int token,
+ void *buffer,
+ size_t buffer_len)
+{
+ const crypt_token_handler *h = LUKS2_token_handler(cd, token);
+
+ if (h && h->buffer_free)
+ h->buffer_free(buffer, buffer_len);
+ else {
+ crypt_safe_memzero(buffer, buffer_len);
+ free(buffer);
+ }
+}
+
+static bool break_loop_retval(int r)
+{
+ if (r == -ENOENT || r == -EPERM || r == -EAGAIN || r == -ENOANO)
+ return false;
+ return true;
+}
+
+static void update_return_errno(int r, int *stored)
+{
+ if (*stored == -ENOANO)
+ return;
+ else if (r == -ENOANO)
+ *stored = r;
+ else if (r == -EAGAIN && *stored != -ENOANO)
+ *stored = r;
+ else if (r == -EPERM && (*stored != -ENOANO && *stored != -EAGAIN))
+ *stored = r;
+}
+
+static int LUKS2_keyslot_open_by_token(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ int segment,
+ crypt_keyslot_priority priority,
+ const char *buffer,
+ size_t buffer_len,
+ struct volume_key **vk)
+{
+ crypt_keyslot_priority keyslot_priority;
+ json_object *jobj_token, *jobj_token_keyslots, *jobj_type, *jobj;
+ unsigned int num = 0;
+ int i, r = -ENOENT, stored_retval = -ENOENT;
+
+ jobj_token = LUKS2_get_token_jobj(hdr, token);
+ if (!jobj_token)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_token, "type", &jobj_type))
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_token, "keyslots", &jobj_token_keyslots);
+ if (!jobj_token_keyslots)
+ return -EINVAL;
+
+ /* Try to open keyslot referenced in token */
+ for (i = 0; i < (int) json_object_array_length(jobj_token_keyslots) && r < 0; i++) {
+ jobj = json_object_array_get_idx(jobj_token_keyslots, i);
+ num = atoi(json_object_get_string(jobj));
+ keyslot_priority = LUKS2_keyslot_priority_get(hdr, num);
+ if (keyslot_priority == CRYPT_SLOT_PRIORITY_INVALID)
+ return -EINVAL;
+ if (keyslot_priority < priority)
+ continue;
+ log_dbg(cd, "Trying to open keyslot %u with token %d (type %s).",
+ num, token, json_object_get_string(jobj_type));
+ r = LUKS2_keyslot_open(cd, num, segment, buffer, buffer_len, vk);
+ /* short circuit on fatal error */
+ if (r < 0 && r != -EPERM && r != -ENOENT)
+ return r;
+ /* save -EPERM in case no other keyslot is usable */
+ if (r == -EPERM)
+ stored_retval = r;
+ }
+
+ if (r < 0)
+ return stored_retval;
+
+ return num;
+}
+
+static bool token_is_blocked(int token, uint32_t *block_list)
+{
+ /* it is safe now, but have assert in case LUKS2_TOKENS_MAX grows */
+ assert(token >= 0 && (size_t)token < BITFIELD_SIZE(block_list));
+
+ return (*block_list & (UINT32_C(1) << token));
+}
+
+static void token_block(int token, uint32_t *block_list)
+{
+ /* it is safe now, but have assert in case LUKS2_TOKENS_MAX grows */
+ assert(token >= 0 && (size_t)token < BITFIELD_SIZE(block_list));
+
+ *block_list |= (UINT32_C(1) << token);
+}
+
+static int token_open_priority(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ json_object *jobj_tokens,
+ const char *type,
+ int segment,
+ crypt_keyslot_priority priority,
+ const char *pin,
+ size_t pin_size,
+ void *usrptr,
+ int *stored_retval,
+ uint32_t *block_list,
+ struct volume_key **vk)
+{
+ char *buffer;
+ size_t buffer_size;
+ int token, r;
+
+ assert(stored_retval);
+ assert(block_list);
+
+ json_object_object_foreach(jobj_tokens, slot, val) {
+ token = atoi(slot);
+ if (token_is_blocked(token, block_list))
+ continue;
+ r = token_open(cd, hdr, token, val, type, segment, priority, pin, pin_size, &buffer, &buffer_size, usrptr, true);
+ if (!r) {
+ r = LUKS2_keyslot_open_by_token(cd, hdr, token, segment, priority,
+ buffer, buffer_size, vk);
+ LUKS2_token_buffer_free(cd, token, buffer, buffer_size);
+ }
+
+ if (r == -ENOANO)
+ token_block(token, block_list);
+
+ if (break_loop_retval(r))
+ return r;
+
+ update_return_errno(r, stored_retval);
+ }
+
+ return *stored_retval;
+}
+
+static int token_open_any(struct crypt_device *cd, struct luks2_hdr *hdr, const char *type, int segment,
+ const char *pin, size_t pin_size, void *usrptr, struct volume_key **vk)
+{
+ json_object *jobj_tokens;
+ int r, retval = -ENOENT;
+ uint32_t blocked = 0; /* bitmap with tokens blocked from loop by returning -ENOANO (wrong/missing pin) */
+
+ json_object_object_get_ex(hdr->jobj, "tokens", &jobj_tokens);
+
+ /* passing usrptr for CRYPT_ANY_TOKEN does not make sense without specific type */
+ if (!type)
+ usrptr = NULL;
+
+ r = token_open_priority(cd, hdr, jobj_tokens, type, segment, CRYPT_SLOT_PRIORITY_PREFER,
+ pin, pin_size, usrptr, &retval, &blocked, vk);
+ if (break_loop_retval(r))
+ return r;
+
+ return token_open_priority(cd, hdr, jobj_tokens, type, segment, CRYPT_SLOT_PRIORITY_NORMAL,
+ pin, pin_size, usrptr, &retval, &blocked, vk);
+}
+
+int LUKS2_token_unlock_key(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ int segment,
+ void *usrptr,
+ struct volume_key **vk)
+{
+ char *buffer;
+ size_t buffer_size;
+ json_object *jobj_token;
+ int r = -ENOENT;
+
+ assert(vk);
+
+ if (segment == CRYPT_DEFAULT_SEGMENT)
+ segment = LUKS2_get_default_segment(hdr);
+
+ if (segment < 0 && segment != CRYPT_ANY_SEGMENT)
+ return -EINVAL;
+
+ if (token >= 0 && token < LUKS2_TOKENS_MAX) {
+ if ((jobj_token = LUKS2_get_token_jobj(hdr, token))) {
+ r = token_open(cd, hdr, token, jobj_token, type, segment, CRYPT_SLOT_PRIORITY_IGNORE,
+ pin, pin_size, &buffer, &buffer_size, usrptr, true);
+ if (!r) {
+ r = LUKS2_keyslot_open_by_token(cd, hdr, token, segment, CRYPT_SLOT_PRIORITY_IGNORE,
+ buffer, buffer_size, vk);
+ LUKS2_token_buffer_free(cd, token, buffer, buffer_size);
+ }
+ }
+ } else if (token == CRYPT_ANY_TOKEN)
+ /*
+ * return priorities (ordered form least to most significant):
+ * ENOENT - unusable for activation (no token handler, invalid token metadata, not assigned to volume segment, etc)
+ * EPERM - usable but token provided passphrase did not unlock any assigned keyslot
+ * EAGAIN - usable but not ready (token HW is missing)
+ * ENOANO - ready, but token pin is wrong or missing
+ *
+ * success (>= 0) or any other negative errno short-circuits token activation loop
+ * immediately
+ */
+ r = token_open_any(cd, hdr, type, segment, pin, pin_size, usrptr, vk);
+ else
+ r = -EINVAL;
+
+ return r;
+}
+
+int LUKS2_token_open_and_activate(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *name,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ uint32_t flags,
+ void *usrptr)
+{
+ bool use_keyring;
+ int keyslot, r, segment;
+ struct volume_key *vk = NULL;
+
+ if (flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY)
+ segment = CRYPT_ANY_SEGMENT;
+ else
+ segment = CRYPT_DEFAULT_SEGMENT;
+
+ r = LUKS2_token_unlock_key(cd, hdr, token, type, pin, pin_size, segment, usrptr, &vk);
+ if (r < 0)
+ return r;
+
+ assert(vk);
+
+ keyslot = r;
+
+ if (!crypt_use_keyring_for_vk(cd))
+ use_keyring = false;
+ else
+ use_keyring = ((name && !crypt_is_cipher_null(crypt_get_cipher(cd))) ||
+ (flags & CRYPT_ACTIVATE_KEYRING_KEY));
+
+ if (use_keyring) {
+ if (!(r = LUKS2_volume_key_load_in_keyring_by_keyslot(cd, hdr, vk, keyslot)))
+ flags |= CRYPT_ACTIVATE_KEYRING_KEY;
+ }
+
+ if (r >= 0 && name)
+ r = LUKS2_activate(cd, name, vk, flags);
+
+ if (r < 0)
+ crypt_drop_keyring_key(cd, vk);
+ crypt_free_volume_key(vk);
+
+ return r < 0 ? r : keyslot;
+}
+
+void LUKS2_token_dump(struct crypt_device *cd, int token)
+{
+ const crypt_token_handler *h;
+ json_object *jobj_token;
+
+ h = LUKS2_token_handler(cd, token);
+ if (h && h->dump) {
+ jobj_token = LUKS2_get_token_jobj(crypt_get_hdr(cd, CRYPT_LUKS2), token);
+ if (jobj_token)
+ h->dump(cd, json_object_to_json_string_ext(jobj_token,
+ JSON_C_TO_STRING_PLAIN | JSON_C_TO_STRING_NOSLASHESCAPE));
+ }
+}
+
+int LUKS2_token_json_get(struct luks2_hdr *hdr, int token, const char **json)
+{
+ json_object *jobj_token;
+
+ jobj_token = LUKS2_get_token_jobj(hdr, token);
+ if (!jobj_token)
+ return -EINVAL;
+
+ *json = token_json_to_string(jobj_token);
+ return 0;
+}
+
+static int assign_one_keyslot(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int token, int keyslot, int assign)
+{
+ json_object *jobj1, *jobj_token, *jobj_token_keyslots;
+ char num[16];
+
+ log_dbg(cd, "Keyslot %i %s token %i.", keyslot, assign ? "assigned to" : "unassigned from", token);
+
+ jobj_token = LUKS2_get_token_jobj(hdr, token);
+ if (!jobj_token)
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_token, "keyslots", &jobj_token_keyslots);
+ if (!jobj_token_keyslots)
+ return -EINVAL;
+
+ if (snprintf(num, sizeof(num), "%d", keyslot) < 0)
+ return -EINVAL;
+
+ if (assign) {
+ jobj1 = LUKS2_array_jobj(jobj_token_keyslots, num);
+ if (!jobj1)
+ json_object_array_add(jobj_token_keyslots, json_object_new_string(num));
+ } else {
+ jobj1 = LUKS2_array_remove(jobj_token_keyslots, num);
+ if (jobj1)
+ json_object_object_add(jobj_token, "keyslots", jobj1);
+ }
+
+ return 0;
+}
+
+static int assign_one_token(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, int token, int assign)
+{
+ json_object *jobj_keyslots;
+ int r = 0;
+
+ if (!LUKS2_get_token_jobj(hdr, token))
+ return -EINVAL;
+
+ if (keyslot == CRYPT_ANY_SLOT) {
+ json_object_object_get_ex(hdr->jobj, "keyslots", &jobj_keyslots);
+
+ json_object_object_foreach(jobj_keyslots, key, val) {
+ UNUSED(val);
+ r = assign_one_keyslot(cd, hdr, token, atoi(key), assign);
+ if (r < 0)
+ break;
+ }
+ } else
+ r = assign_one_keyslot(cd, hdr, token, keyslot, assign);
+
+ return r;
+}
+
+int LUKS2_token_assign(struct crypt_device *cd, struct luks2_hdr *hdr,
+ int keyslot, int token, int assign, int commit)
+{
+ json_object *jobj_tokens;
+ int r = 0;
+
+ if ((keyslot < 0 && keyslot != CRYPT_ANY_SLOT) || keyslot >= LUKS2_KEYSLOTS_MAX ||
+ (token < 0 && token != CRYPT_ANY_TOKEN) || token >= LUKS2_TOKENS_MAX)
+ return -EINVAL;
+
+ if (token == CRYPT_ANY_TOKEN) {
+ json_object_object_get_ex(hdr->jobj, "tokens", &jobj_tokens);
+
+ json_object_object_foreach(jobj_tokens, key, val) {
+ UNUSED(val);
+ r = assign_one_token(cd, hdr, keyslot, atoi(key), assign);
+ if (r < 0)
+ break;
+ }
+ } else
+ r = assign_one_token(cd, hdr, keyslot, token, assign);
+
+ if (r < 0)
+ return r;
+
+ if (commit)
+ return LUKS2_hdr_write(cd, hdr) ?: token;
+
+ return token;
+}
+
+static int token_is_assigned(struct luks2_hdr *hdr, int keyslot, int token)
+{
+ int i;
+ json_object *jobj, *jobj_token_keyslots,
+ *jobj_token = LUKS2_get_token_jobj(hdr, token);
+
+ if (!jobj_token)
+ return -ENOENT;
+
+ json_object_object_get_ex(jobj_token, "keyslots", &jobj_token_keyslots);
+
+ for (i = 0; i < (int) json_object_array_length(jobj_token_keyslots); i++) {
+ jobj = json_object_array_get_idx(jobj_token_keyslots, i);
+ if (keyslot == atoi(json_object_get_string(jobj)))
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int LUKS2_token_is_assigned(struct luks2_hdr *hdr, int keyslot, int token)
+{
+ if (keyslot < 0 || keyslot >= LUKS2_KEYSLOTS_MAX || token < 0 || token >= LUKS2_TOKENS_MAX)
+ return -EINVAL;
+
+ return token_is_assigned(hdr, keyslot, token);
+}
+
+int LUKS2_tokens_count(struct luks2_hdr *hdr)
+{
+ json_object *jobj_tokens = LUKS2_get_tokens_jobj(hdr);
+ if (!jobj_tokens)
+ return -EINVAL;
+
+ return json_object_object_length(jobj_tokens);
+}
+
+int LUKS2_token_assignment_copy(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot_from,
+ int keyslot_to,
+ int commit)
+{
+ int i, r;
+
+ if (keyslot_from < 0 || keyslot_from >= LUKS2_KEYSLOTS_MAX || keyslot_to < 0 || keyslot_to >= LUKS2_KEYSLOTS_MAX)
+ return -EINVAL;
+
+ r = LUKS2_tokens_count(hdr);
+ if (r <= 0)
+ return r;
+
+ for (i = 0; i < LUKS2_TOKENS_MAX; i++) {
+ if (!token_is_assigned(hdr, keyslot_from, i)) {
+ if ((r = assign_one_token(cd, hdr, keyslot_to, i, 1)))
+ return r;
+ }
+ }
+
+ return commit ? LUKS2_hdr_write(cd, hdr) : 0;
+}
+
+int LUKS2_token_unlock_passphrase(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int token,
+ const char *type,
+ const char *pin,
+ size_t pin_size,
+ void *usrptr,
+ char **passphrase,
+ size_t *passphrase_size)
+{
+ char *buffer;
+ size_t buffer_size;
+ json_object *jobj_token, *jobj_tokens;
+ int r = -ENOENT, retval = -ENOENT;
+
+ if (!hdr)
+ return -EINVAL;
+
+ if (token >= 0 && token < LUKS2_TOKENS_MAX) {
+ if ((jobj_token = LUKS2_get_token_jobj(hdr, token)))
+ r = token_open(cd, hdr, token, jobj_token, type, CRYPT_ANY_SEGMENT, CRYPT_SLOT_PRIORITY_IGNORE,
+ pin, pin_size, &buffer, &buffer_size, usrptr, false);
+ } else if (token == CRYPT_ANY_TOKEN) {
+ json_object_object_get_ex(hdr->jobj, "tokens", &jobj_tokens);
+
+ if (!type)
+ usrptr = NULL;
+
+ json_object_object_foreach(jobj_tokens, slot, val) {
+ token = atoi(slot);
+ r = token_open(cd, hdr, token, val, type, CRYPT_ANY_SEGMENT, CRYPT_SLOT_PRIORITY_IGNORE,
+ pin, pin_size, &buffer, &buffer_size, usrptr, false);
+
+ /*
+ * return priorities (ordered form least to most significant):
+ * ENOENT - unusable for activation (no token handler, invalid token metadata, etc)
+ * EAGAIN - usable but not ready (token HW is missing)
+ * ENOANO - ready, but token pin is wrong or missing
+ *
+ * success (>= 0) or any other negative errno short-circuits token activation loop
+ * immediately
+ */
+ if (break_loop_retval(r))
+ goto out;
+
+ update_return_errno(r, &retval);
+ }
+ r = retval;
+ } else
+ r = -EINVAL;
+out:
+ if (!r) {
+ *passphrase = crypt_safe_alloc(buffer_size);
+ if (*passphrase) {
+ memcpy(*passphrase, buffer, buffer_size);
+ *passphrase_size = buffer_size;
+ } else
+ r = -ENOMEM;
+ LUKS2_token_buffer_free(cd, token, buffer, buffer_size);
+ }
+
+ if (!r)
+ return token;
+
+ return r;
+}
diff --git a/lib/luks2/luks2_token_keyring.c b/lib/luks2/luks2_token_keyring.c
new file mode 100644
index 0000000..ad18798
--- /dev/null
+++ b/lib/luks2/luks2_token_keyring.c
@@ -0,0 +1,144 @@
+/*
+ * LUKS - Linux Unified Key Setup v2, kernel keyring token
+ *
+ * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2023 Ondrej Kozina
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "luks2_internal.h"
+
+int keyring_open(struct crypt_device *cd,
+ int token,
+ char **buffer,
+ size_t *buffer_len,
+ void *usrptr __attribute__((unused)))
+{
+ json_object *jobj_token, *jobj_key;
+ struct luks2_hdr *hdr;
+ int r;
+
+ if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
+ return -EINVAL;
+
+ jobj_token = LUKS2_get_token_jobj(hdr, token);
+ if (!jobj_token)
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_token, "key_description", &jobj_key);
+
+ r = keyring_get_passphrase(json_object_get_string(jobj_key), buffer, buffer_len);
+ if (r == -ENOTSUP) {
+ log_dbg(cd, "Kernel keyring features disabled.");
+ return -ENOENT;
+ } else if (r < 0) {
+ log_dbg(cd, "keyring_get_passphrase failed (error %d)", r);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int keyring_validate(struct crypt_device *cd __attribute__((unused)),
+ const char *json)
+{
+ enum json_tokener_error jerr;
+ json_object *jobj_token, *jobj_key;
+ int r = 1;
+
+ log_dbg(cd, "Validating keyring token json");
+
+ jobj_token = json_tokener_parse_verbose(json, &jerr);
+ if (!jobj_token) {
+ log_dbg(cd, "Keyring token JSON parse failed.");
+ return r;
+ }
+
+ if (json_object_object_length(jobj_token) != 3) {
+ log_dbg(cd, "Keyring token is expected to have exactly 3 fields.");
+ goto out;
+ }
+
+ if (!json_object_object_get_ex(jobj_token, "key_description", &jobj_key)) {
+ log_dbg(cd, "missing key_description field.");
+ goto out;
+ }
+
+ if (!json_object_is_type(jobj_key, json_type_string)) {
+ log_dbg(cd, "key_description is not a string.");
+ goto out;
+ }
+
+ /* TODO: perhaps check that key description is in '%s:%s'
+ * format where both strings are not empty */
+ r = !strlen(json_object_get_string(jobj_key));
+out:
+ json_object_put(jobj_token);
+ return r;
+}
+
+void keyring_dump(struct crypt_device *cd, const char *json)
+{
+ enum json_tokener_error jerr;
+ json_object *jobj_token, *jobj_key;
+
+ jobj_token = json_tokener_parse_verbose(json, &jerr);
+ if (!jobj_token)
+ return;
+
+ if (!json_object_object_get_ex(jobj_token, "key_description", &jobj_key)) {
+ json_object_put(jobj_token);
+ return;
+ }
+
+ log_std(cd, "\tKey description: %s\n", json_object_get_string(jobj_key));
+
+ json_object_put(jobj_token);
+}
+
+int LUKS2_token_keyring_json(char *buffer, size_t buffer_size,
+ const struct crypt_token_params_luks2_keyring *keyring_params)
+{
+ int r;
+
+ r = snprintf(buffer, buffer_size, "{ \"type\": \"%s\", \"keyslots\":[],\"key_description\":\"%s\"}",
+ LUKS2_TOKEN_KEYRING, keyring_params->key_description);
+ if (r < 0 || (size_t)r >= buffer_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+int LUKS2_token_keyring_get(struct luks2_hdr *hdr,
+ int token, struct crypt_token_params_luks2_keyring *keyring_params)
+{
+ json_object *jobj_token, *jobj;
+
+ jobj_token = LUKS2_get_token_jobj(hdr, token);
+ json_object_object_get_ex(jobj_token, "type", &jobj);
+ assert(!strcmp(json_object_get_string(jobj), LUKS2_TOKEN_KEYRING));
+
+ json_object_object_get_ex(jobj_token, "key_description", &jobj);
+
+ keyring_params->key_description = json_object_get_string(jobj);
+
+ return token;
+}
+
+void keyring_buffer_free(void *buffer, size_t buffer_len __attribute__((unused)))
+{
+ crypt_safe_free(buffer);
+}