summaryrefslogtreecommitdiffstats
path: root/lib/luks2
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--lib/luks2/hw_opal/hw_opal.c1089
-rw-r--r--lib/luks2/hw_opal/hw_opal.h71
-rw-r--r--lib/luks2/luks2.h46
-rw-r--r--lib/luks2/luks2_digest.c18
-rw-r--r--lib/luks2/luks2_digest_pbkdf2.c16
-rw-r--r--lib/luks2/luks2_disk_metadata.c25
-rw-r--r--lib/luks2/luks2_internal.h37
-rw-r--r--lib/luks2/luks2_json_format.c227
-rw-r--r--lib/luks2/luks2_json_metadata.c303
-rw-r--r--lib/luks2/luks2_keyslot.c35
-rw-r--r--lib/luks2/luks2_keyslot_luks2.c40
-rw-r--r--lib/luks2/luks2_keyslot_reenc.c21
-rw-r--r--lib/luks2/luks2_luks1_convert.c43
-rw-r--r--lib/luks2/luks2_reencrypt.c428
-rw-r--r--lib/luks2/luks2_reencrypt_digest.c22
-rw-r--r--lib/luks2/luks2_segment.c244
-rw-r--r--lib/luks2/luks2_token.c295
-rw-r--r--lib/luks2/luks2_token_keyring.c13
18 files changed, 2558 insertions, 415 deletions
diff --git a/lib/luks2/hw_opal/hw_opal.c b/lib/luks2/hw_opal/hw_opal.c
new file mode 100644
index 0000000..31ef87e
--- /dev/null
+++ b/lib/luks2/hw_opal/hw_opal.c
@@ -0,0 +1,1089 @@
+/*
+ * OPAL utilities
+ *
+ * Copyright (C) 2022-2023 Luca Boccassi <bluca@debian.org>
+ * 2023 Ondrej Kozina <okozina@redhat.com>
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_SYSMACROS_H
+# include <sys/sysmacros.h> /* for major, minor */
+#endif
+
+#include "internal.h"
+#include "libcryptsetup.h"
+#include "luks2/hw_opal/hw_opal.h"
+#include "utils_device_locking.h"
+
+#if HAVE_HW_OPAL
+
+#include <linux/sed-opal.h>
+
+/* Error codes are defined in the specification:
+ * TCG_Storage_Architecture_Core_Spec_v2.01_r1.00
+ * Section 5.1.5: Method Status Codes
+ * Names and values from table 166 */
+typedef enum OpalStatus {
+ OPAL_STATUS_SUCCESS,
+ OPAL_STATUS_NOT_AUTHORIZED,
+ OPAL_STATUS_OBSOLETE0, /* Undefined but possible return values are called 'obsolete' */
+ OPAL_STATUS_SP_BUSY,
+ OPAL_STATUS_SP_FAILED,
+ OPAL_STATUS_SP_DISABLED,
+ OPAL_STATUS_SP_FROZEN,
+ OPAL_STATUS_NO_SESSIONS_AVAILABLE,
+ OPAL_STATUS_UNIQUENESS_CONFLICT,
+ OPAL_STATUS_INSUFFICIENT_SPACE,
+ OPAL_STATUS_INSUFFICIENT_ROWS,
+ OPAL_STATUS_INVALID_PARAMETER,
+ OPAL_STATUS_OBSOLETE1,
+ OPAL_STATUS_OBSOLETE2,
+ OPAL_STATUS_TPER_MALFUNCTION,
+ OPAL_STATUS_TRANSACTION_FAILURE,
+ OPAL_STATUS_RESPONSE_OVERFLOW,
+ OPAL_STATUS_AUTHORITY_LOCKED_OUT,
+ OPAL_STATUS_FAIL = 0x3F, /* As defined by specification */
+ _OPAL_STATUS_MAX,
+ _OPAL_STATUS_INVALID = -EINVAL,
+} OpalStatus;
+
+static const char* const opal_status_table[_OPAL_STATUS_MAX] = {
+ [OPAL_STATUS_SUCCESS] = "success",
+ [OPAL_STATUS_NOT_AUTHORIZED] = "not authorized",
+ [OPAL_STATUS_OBSOLETE0] = "obsolete",
+ [OPAL_STATUS_SP_BUSY] = "SP busy",
+ [OPAL_STATUS_SP_FAILED] = "SP failed",
+ [OPAL_STATUS_SP_DISABLED] = "SP disabled",
+ [OPAL_STATUS_SP_FROZEN] = "SP frozen",
+ [OPAL_STATUS_NO_SESSIONS_AVAILABLE] = "no sessions available",
+ [OPAL_STATUS_UNIQUENESS_CONFLICT] = "uniqueness conflict",
+ [OPAL_STATUS_INSUFFICIENT_SPACE] = "insufficient space",
+ [OPAL_STATUS_INSUFFICIENT_ROWS] = "insufficient rows",
+ [OPAL_STATUS_INVALID_PARAMETER] = "invalid parameter",
+ [OPAL_STATUS_OBSOLETE1] = "obsolete",
+ [OPAL_STATUS_OBSOLETE2] = "obsolete",
+ [OPAL_STATUS_TPER_MALFUNCTION] = "TPer malfunction",
+ [OPAL_STATUS_TRANSACTION_FAILURE] = "transaction failure",
+ [OPAL_STATUS_RESPONSE_OVERFLOW] = "response overflow",
+ [OPAL_STATUS_AUTHORITY_LOCKED_OUT] = "authority locked out",
+ [OPAL_STATUS_FAIL] = "unknown failure",
+};
+
+static const char *opal_status_to_string(int t)
+{
+ if (t < 0)
+ return strerror(-t);
+
+ if (t >= _OPAL_STATUS_MAX)
+ return "unknown error";
+
+ return opal_status_table[t];
+}
+
+static const char *opal_ioctl_to_string(unsigned long rq)
+{
+ switch(rq) {
+ case IOC_OPAL_GET_STATUS: return "GET_STATUS";
+ case IOC_OPAL_GET_GEOMETRY: return "GET_GEOMETRY";
+ case IOC_OPAL_GET_LR_STATUS: return "GET_LR_STATUS";
+ case IOC_OPAL_TAKE_OWNERSHIP: return "TAKE_OWNERSHIP";
+ case IOC_OPAL_ACTIVATE_USR: return "ACTIVATE_USR";
+ case IOC_OPAL_ACTIVATE_LSP: return "ACTIVATE_LSP";
+ case IOC_OPAL_ERASE_LR: return "ERASE_LR";
+ case IOC_OPAL_SECURE_ERASE_LR: return "SECURE_ERASE_LR";
+ case IOC_OPAL_ADD_USR_TO_LR: return "ADD_USR_TO_LR";
+ case IOC_OPAL_SET_PW: return "SET_PW";
+ case IOC_OPAL_LR_SETUP: return "LR_SETUP";
+ case IOC_OPAL_LOCK_UNLOCK: return "LOCK_UNLOCK";
+ case IOC_OPAL_SAVE: return "SAVE";
+ case IOC_OPAL_PSID_REVERT_TPR: return "PSID_REVERT_TPR";
+ }
+
+ assert(false && "unknown OPAL ioctl");
+ return NULL;
+}
+
+static void opal_ioctl_debug(struct crypt_device *cd,
+ unsigned long rq,
+ void *args,
+ bool post,
+ int ret)
+{
+ const char *cmd = opal_ioctl_to_string(rq);
+
+ if (ret) {
+ log_dbg(cd, "OPAL %s failed: %s", cmd, opal_status_to_string(ret));
+ return;
+ }
+
+ if (post) switch(rq) {
+ case IOC_OPAL_GET_STATUS: { /* OUT */
+ struct opal_status *st = args;
+ log_dbg(cd, "OPAL %s: flags:%" PRIu32, cmd, st->flags);
+ };
+ break;
+ case IOC_OPAL_GET_GEOMETRY: { /* OUT */
+ struct opal_geometry *geo = args;
+ log_dbg(cd, "OPAL %s: align:%" PRIu8 ", lb_size:%" PRIu32 ", gran:%" PRIu64 ", lowest_lba:%" PRIu64,
+ cmd, geo->align, geo->logical_block_size, geo->alignment_granularity, geo->lowest_aligned_lba);
+ };
+ break;
+ case IOC_OPAL_GET_LR_STATUS: { /* OUT */
+ struct opal_lr_status *lrs = args;
+ log_dbg(cd, "OPAL %s: sum:%" PRIu32 ", who:%" PRIu32 ", lr:%" PRIu8
+ ", start:%" PRIu64 ", length:%" PRIu64 ", rle:%" PRIu32 ", rwe:%" PRIu32 ", state:%" PRIu32,
+ cmd, lrs->session.sum, lrs->session.who, lrs->session.opal_key.lr,
+ lrs->range_start, lrs->range_length, lrs->RLE, lrs->WLE, lrs->l_state);
+ };
+ break;
+ } else switch (rq) {
+ case IOC_OPAL_TAKE_OWNERSHIP: { /* IN */
+ log_dbg(cd, "OPAL %s", cmd);
+ };
+ break;
+ case IOC_OPAL_ACTIVATE_USR: { /* IN */
+ struct opal_session_info *ui = args;
+ log_dbg(cd, "OPAL %s: sum:%" PRIu32 ", who:%" PRIu32 ", lr:%" PRIu8,
+ cmd, ui->sum, ui->who, ui->opal_key.lr);
+ };
+ break;
+ case IOC_OPAL_ACTIVATE_LSP: { /* IN */
+ struct opal_lr_act *act = args;
+ log_dbg(cd, "OPAL %s: k.lr:%" PRIu8 ", sum:%" PRIu32 ", num_lrs:%" PRIu8 ", lr:"
+ "%"PRIu8"|%"PRIu8"|%"PRIu8"|%"PRIu8"|%"PRIu8"|%"PRIu8"|%"PRIu8"|%"PRIu8"|%"PRIu8,
+ cmd, act->key.lr, act->sum, act->num_lrs,
+ act->lr[0], act->lr[1], act->lr[2], act->lr[3], act->lr[4],
+ act->lr[5], act->lr[6], act->lr[7], act->lr[8]);
+ };
+ break;
+ case IOC_OPAL_ERASE_LR: { /* IN */
+ struct opal_session_info *ui = args;
+ log_dbg(cd, "OPAL %s: sum:%" PRIu32 ", who:%" PRIu32 ", lr:%" PRIu8,
+ cmd, ui->sum, ui->who, ui->opal_key.lr);
+ };
+ break;
+ case IOC_OPAL_SECURE_ERASE_LR: { /* IN */
+ struct opal_session_info *ui = args;
+ log_dbg(cd, "OPAL %s: sum:%" PRIu32 ", who:%" PRIu32 ", lr:%" PRIu8,
+ cmd, ui->sum, ui->who, ui->opal_key.lr);
+ };
+ break;
+ case IOC_OPAL_ADD_USR_TO_LR: { /* IN */
+ struct opal_lock_unlock *lu = args;
+ log_dbg(cd, "OPAL %s: sum:%" PRIu32 ", who:%" PRIu32 ", lr:%" PRIu8
+ ", l_state:%" PRIu32 ", flags:%" PRIu16,
+ cmd, lu->session.sum, lu->session.who, lu->session.opal_key.lr,
+ lu->l_state, lu->flags);
+ };
+ break;
+ case IOC_OPAL_SET_PW: { /* IN */
+ struct opal_new_pw *pw = args;
+ log_dbg(cd, "OPAL %s: sum:%" PRIu32 ", who:%" PRIu32 ", lr:%" PRIu8,
+ cmd, pw->session.sum, pw->session.who, pw->session.opal_key.lr);
+ };
+ break;
+ case IOC_OPAL_LR_SETUP: { /* IN */
+ struct opal_user_lr_setup *lrs = args;
+ log_dbg(cd, "OPAL %s: sum:%" PRIu32 ", who:%" PRIu32 ", lr:%" PRIu8
+ ", start:%" PRIu64 ", length:%" PRIu64 ", rle:%" PRIu32 ", rwe:%" PRIu32,
+ cmd, lrs->session.sum, lrs->session.who, lrs->session.opal_key.lr,
+ lrs->range_start, lrs->range_length, lrs->RLE, lrs->WLE);
+ };
+ break;
+ case IOC_OPAL_LOCK_UNLOCK: { /* IN */
+ struct opal_lock_unlock *lu = args;
+ log_dbg(cd, "OPAL %s: sum:%" PRIu32 ", who:%" PRIu32 ", lr:%" PRIu8
+ ", l_state:%" PRIu32 ", flags:%" PRIu16,
+ cmd, lu->session.sum, lu->session.who, lu->session.opal_key.lr,
+ lu->l_state, lu->flags);
+ };
+ break;
+ case IOC_OPAL_SAVE: { /* IN */
+ struct opal_lock_unlock *lu = args;
+ log_dbg(cd, "OPAL %s: sum:%" PRIu32 ", who:%" PRIu32 ", lr:%" PRIu8
+ ", l_state:%" PRIu32 ", flags:%" PRIu16,
+ cmd, lu->session.sum, lu->session.who, lu->session.opal_key.lr,
+ lu->l_state, lu->flags);
+ };
+ break;
+ case IOC_OPAL_PSID_REVERT_TPR: { /* IN */
+ struct opal_key *key = args;
+ log_dbg(cd, "OPAL %s: lr:%" PRIu8,
+ cmd, key->lr);
+ };
+ break;
+ }
+}
+
+static int opal_ioctl(struct crypt_device *cd, int fd, unsigned long rq, void *args)
+{
+ int r;
+
+ opal_ioctl_debug(cd, rq, args, false, 0);
+ r = ioctl(fd, rq, args);
+ opal_ioctl_debug(cd, rq, args, true, r);
+
+ return r;
+}
+
+static int opal_geometry_fd(struct crypt_device *cd,
+ int fd,
+ bool *ret_align,
+ uint32_t *ret_block_size,
+ uint64_t *ret_alignment_granularity_blocks,
+ uint64_t *ret_lowest_lba_blocks)
+{
+ int r;
+ struct opal_geometry geo;
+
+ assert(fd >= 0);
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_GET_GEOMETRY, &geo);
+ if (r != OPAL_STATUS_SUCCESS)
+ return r;
+
+ if (ret_align)
+ *ret_align = (geo.align == 1);
+ if (ret_block_size)
+ *ret_block_size = geo.logical_block_size;
+ if (ret_alignment_granularity_blocks)
+ *ret_alignment_granularity_blocks = geo.alignment_granularity;
+ if (ret_lowest_lba_blocks)
+ *ret_lowest_lba_blocks = geo.lowest_aligned_lba;
+
+ return r;
+}
+
+static int opal_range_check_attributes_fd(struct crypt_device *cd,
+ int fd,
+ uint32_t segment_number,
+ const struct volume_key *vk,
+ const uint64_t *check_offset_sectors,
+ const uint64_t *check_length_sectors,
+ bool *check_read_locked,
+ bool *check_write_locked,
+ bool *ret_read_locked,
+ bool *ret_write_locked)
+{
+ int r;
+ struct opal_lr_status *lrs;
+ uint32_t opal_block_bytes = 0;
+ uint64_t offset, length;
+ bool read_locked, write_locked;
+
+ assert(fd >= 0);
+ assert(cd);
+ assert(vk);
+
+ if (check_offset_sectors || check_length_sectors) {
+ r = opal_geometry_fd(cd, fd, NULL, &opal_block_bytes, NULL, NULL);
+ if (r != OPAL_STATUS_SUCCESS)
+ return -EINVAL;
+ }
+
+ lrs = crypt_safe_alloc(sizeof(*lrs));
+ if (!lrs)
+ return -ENOMEM;
+
+ *lrs = (struct opal_lr_status) {
+ .session = {
+ .who = segment_number + 1,
+ .opal_key = {
+ .key_len = vk->keylength,
+ .lr = segment_number
+ }
+ }
+ };
+ memcpy(lrs->session.opal_key.key, vk->key, vk->keylength);
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_GET_LR_STATUS, lrs);
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to get locking range status on device '%s'.",
+ crypt_get_device_name(cd));
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = 0;
+
+ if (check_offset_sectors) {
+ offset = lrs->range_start * opal_block_bytes / SECTOR_SIZE;
+ if (offset != *check_offset_sectors) {
+ log_err(cd, _("OPAL range %d offset %" PRIu64 " does not match expected values %" PRIu64 "."),
+ segment_number, offset, *check_offset_sectors);
+ r = -EINVAL;
+ }
+ }
+
+ if (check_length_sectors) {
+ length = lrs->range_length * opal_block_bytes / SECTOR_SIZE;
+ if (length != *check_length_sectors) {
+ log_err(cd, _("OPAL range %d length %" PRIu64" does not match device length %" PRIu64 "."),
+ segment_number, length, *check_length_sectors);
+ r = -EINVAL;
+ }
+ }
+
+ if (!lrs->RLE || !lrs->WLE) {
+ log_err(cd, _("OPAL range %d locking is disabled."), segment_number);
+ r = -EINVAL;
+ }
+
+ read_locked = (lrs->l_state == OPAL_LK);
+ write_locked = !!(lrs->l_state & (OPAL_RO | OPAL_LK));
+
+ if (check_read_locked && (read_locked != *check_read_locked)) {
+ log_dbg(cd, "OPAL range %d read lock is %slocked.",
+ segment_number, *check_read_locked ? "" : "not ");
+ log_err(cd, _("Unexpected OPAL range %d lock state."), segment_number);
+ r = -EINVAL;
+ }
+
+ if (check_write_locked && (write_locked != *check_write_locked)) {
+ log_dbg(cd, "OPAL range %d write lock is %slocked.",
+ segment_number, *check_write_locked ? "" : "not ");
+ log_err(cd, _("Unexpected OPAL range %d lock state."), segment_number);
+ r = -EINVAL;
+ }
+
+ if (ret_read_locked)
+ *ret_read_locked = read_locked;
+ if (ret_write_locked)
+ *ret_write_locked = write_locked;
+out:
+ crypt_safe_free(lrs);
+
+ return r;
+}
+
+static int opal_query_status(struct crypt_device *cd, struct device *dev, unsigned expected)
+{
+ struct opal_status st = { };
+ int fd, r;
+
+ assert(cd);
+ assert(dev);
+
+ fd = device_open(cd, dev, O_RDONLY);
+ if (fd < 0)
+ return -EIO;
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_GET_STATUS, &st);
+
+ return r < 0 ? -EINVAL : (st.flags & expected) ? 1 : 0;
+}
+
+static int opal_enabled(struct crypt_device *cd, struct device *dev)
+{
+ return opal_query_status(cd, dev, OPAL_FL_LOCKING_ENABLED);
+}
+
+/* requires opal lock */
+int opal_setup_ranges(struct crypt_device *cd,
+ struct device *dev,
+ const struct volume_key *vk,
+ uint64_t range_start,
+ uint64_t range_length,
+ uint32_t segment_number,
+ const void *admin_key,
+ size_t admin_key_len)
+{
+ struct opal_lr_act *activate = NULL;
+ struct opal_session_info *user_session = NULL;
+ struct opal_lock_unlock *user_add_to_lr = NULL, *lock = NULL;
+ struct opal_new_pw *new_pw = NULL;
+ struct opal_user_lr_setup *setup = NULL;
+ int r, fd;
+
+ assert(cd);
+ assert(dev);
+ assert(vk);
+ assert(admin_key);
+ assert(vk->keylength <= OPAL_KEY_MAX);
+
+ if (admin_key_len > OPAL_KEY_MAX)
+ return -EINVAL;
+
+ fd = device_open(cd, dev, O_RDONLY);
+ if (fd < 0)
+ return -EIO;
+
+ r = opal_enabled(cd, dev);
+ if (r < 0)
+ return r;
+
+ /* If OPAL has never been enabled, we need to take ownership and do basic setup first */
+ if (r == 0) {
+ activate = crypt_safe_alloc(sizeof(struct opal_lr_act));
+ if (!activate) {
+ r = -ENOMEM;
+ goto out;
+ }
+ *activate = (struct opal_lr_act) {
+ .key = {
+ .key_len = admin_key_len,
+ },
+ .num_lrs = 8,
+ /* A max of 9 segments are supported, enable them all as there's no reason not to
+ * (0 is whole-volume)
+ */
+ .lr = { 1, 2, 3, 4, 5, 6, 7, 8 },
+ };
+ memcpy(activate->key.key, admin_key, admin_key_len);
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_TAKE_OWNERSHIP, &activate->key);
+ if (r < 0) {
+ r = -ENOTSUP;
+ log_dbg(cd, "OPAL not supported on this kernel version, refusing.");
+ goto out;
+ }
+ if (r == OPAL_STATUS_NOT_AUTHORIZED) /* We'll try again with a different key. */ {
+ r = -EPERM;
+ log_dbg(cd, "Failed to take ownership of OPAL device '%s': permission denied",
+ crypt_get_device_name(cd));
+ goto out;
+ }
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to take ownership of OPAL device '%s': %s",
+ crypt_get_device_name(cd), opal_status_to_string(r));
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_ACTIVATE_LSP, activate);
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to activate OPAL device '%s': %s",
+ crypt_get_device_name(cd), opal_status_to_string(r));
+ r = -EINVAL;
+ goto out;
+ }
+ } else {
+ /* If it is already enabled, wipe the locking range first */
+ user_session = crypt_safe_alloc(sizeof(struct opal_session_info));
+ if (!user_session) {
+ r = -ENOMEM;
+ goto out;
+ }
+ *user_session = (struct opal_session_info) {
+ .who = OPAL_ADMIN1,
+ .opal_key = {
+ .lr = segment_number,
+ .key_len = admin_key_len,
+ },
+ };
+ memcpy(user_session->opal_key.key, admin_key, admin_key_len);
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_ERASE_LR, user_session);
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to reset (erase) OPAL locking range %u on device '%s': %s",
+ segment_number, crypt_get_device_name(cd), opal_status_to_string(r));
+ r = opal_ioctl(cd, fd, IOC_OPAL_SECURE_ERASE_LR, user_session);
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to reset (secure erase) OPAL locking range %u on device '%s': %s",
+ segment_number, crypt_get_device_name(cd), opal_status_to_string(r));
+ r = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ crypt_safe_free(user_session);
+
+ user_session = crypt_safe_alloc(sizeof(struct opal_session_info));
+ if (!user_session) {
+ r = -ENOMEM;
+ goto out;
+ }
+ *user_session = (struct opal_session_info) {
+ .who = segment_number + 1,
+ .opal_key = {
+ .key_len = admin_key_len,
+ },
+ };
+ memcpy(user_session->opal_key.key, admin_key, admin_key_len);
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_ACTIVATE_USR, user_session);
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to activate OPAL user on device '%s': %s",
+ crypt_get_device_name(cd), opal_status_to_string(r));
+ r = -EINVAL;
+ goto out;
+ }
+
+ user_add_to_lr = crypt_safe_alloc(sizeof(struct opal_lock_unlock));
+ if (!user_add_to_lr) {
+ r = -ENOMEM;
+ goto out;
+ }
+ *user_add_to_lr = (struct opal_lock_unlock) {
+ .session = {
+ .who = segment_number + 1,
+ .opal_key = {
+ .lr = segment_number,
+ .key_len = admin_key_len,
+ },
+ },
+ .l_state = OPAL_RO,
+ };
+ memcpy(user_add_to_lr->session.opal_key.key, admin_key, admin_key_len);
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_ADD_USR_TO_LR, user_add_to_lr);
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to add OPAL user to locking range %u (RO) on device '%s': %s",
+ segment_number, crypt_get_device_name(cd), opal_status_to_string(r));
+ r = -EINVAL;
+ goto out;
+ }
+ user_add_to_lr->l_state = OPAL_RW;
+ r = opal_ioctl(cd, fd, IOC_OPAL_ADD_USR_TO_LR, user_add_to_lr);
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to add OPAL user to locking range %u (RW) on device '%s': %s",
+ segment_number, crypt_get_device_name(cd), opal_status_to_string(r));
+ r = -EINVAL;
+ goto out;
+ }
+
+ new_pw = crypt_safe_alloc(sizeof(struct opal_new_pw));
+ if (!new_pw) {
+ r = -ENOMEM;
+ goto out;
+ }
+ *new_pw = (struct opal_new_pw) {
+ .session = {
+ .who = OPAL_ADMIN1,
+ .opal_key = {
+ .lr = segment_number,
+ .key_len = admin_key_len,
+ },
+ },
+ .new_user_pw = {
+ .who = segment_number + 1,
+ .opal_key = {
+ .key_len = vk->keylength,
+ .lr = segment_number,
+ },
+ },
+ };
+ memcpy(new_pw->new_user_pw.opal_key.key, vk->key, vk->keylength);
+ memcpy(new_pw->session.opal_key.key, admin_key, admin_key_len);
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_SET_PW, new_pw);
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to set OPAL user password on device '%s': (%d) %s",
+ crypt_get_device_name(cd), r, opal_status_to_string(r));
+ r = -EINVAL;
+ goto out;
+ }
+
+ setup = crypt_safe_alloc(sizeof(struct opal_user_lr_setup));
+ if (!setup) {
+ r = -ENOMEM;
+ goto out;
+ }
+ *setup = (struct opal_user_lr_setup) {
+ .range_start = range_start,
+ .range_length = range_length,
+ /* Some drives do not enable Locking Ranges on setup. This have some
+ * interesting consequences: Lock command called later below will pass,
+ * but locking range will _not_ be locked at all.
+ */
+ .RLE = 1,
+ .WLE = 1,
+ .session = {
+ .who = OPAL_ADMIN1,
+ .opal_key = {
+ .key_len = admin_key_len,
+ .lr = segment_number,
+ },
+ },
+ };
+ memcpy(setup->session.opal_key.key, admin_key, admin_key_len);
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_LR_SETUP, setup);
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to setup locking range of length %llu at offset %llu on OPAL device '%s': %s",
+ setup->range_length, setup->range_start, crypt_get_device_name(cd), opal_status_to_string(r));
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* After setup an OPAL device is unlocked, but the expectation with cryptsetup is that it needs
+ * to be activated separately, so lock it immediately. */
+ lock = crypt_safe_alloc(sizeof(struct opal_lock_unlock));
+ if (!lock) {
+ r = -ENOMEM;
+ goto out;
+ }
+ *lock = (struct opal_lock_unlock) {
+ .l_state = OPAL_LK,
+ .session = {
+ .who = segment_number + 1,
+ .opal_key = {
+ .key_len = vk->keylength,
+ .lr = segment_number,
+ },
+ }
+ };
+ memcpy(lock->session.opal_key.key, vk->key, vk->keylength);
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_LOCK_UNLOCK, lock);
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to lock OPAL device '%s': %s",
+ crypt_get_device_name(cd), opal_status_to_string(r));
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* Double check the locking range is locked and the ranges are set up as configured */
+ r = opal_range_check_attributes_fd(cd, fd, segment_number, vk, &range_start,
+ &range_length, &(bool) {true}, &(bool){true},
+ NULL, NULL);
+out:
+ crypt_safe_free(activate);
+ crypt_safe_free(user_session);
+ crypt_safe_free(user_add_to_lr);
+ crypt_safe_free(new_pw);
+ crypt_safe_free(setup);
+ crypt_safe_free(lock);
+
+ return r;
+}
+
+static int opal_lock_unlock(struct crypt_device *cd,
+ struct device *dev,
+ uint32_t segment_number,
+ const struct volume_key *vk,
+ bool lock)
+{
+ struct opal_lock_unlock unlock = {
+ .l_state = lock ? OPAL_LK : OPAL_RW,
+ .session = {
+ .who = segment_number + 1,
+ .opal_key = {
+ .lr = segment_number,
+ },
+ },
+ };
+ int r, fd;
+
+ if (opal_supported(cd, dev) <= 0)
+ return -ENOTSUP;
+ if (!lock && !vk)
+ return -EINVAL;
+
+ fd = device_open(cd, dev, O_RDONLY);
+ if (fd < 0)
+ return -EIO;
+
+ if (!lock) {
+ assert(vk->keylength <= OPAL_KEY_MAX);
+
+ unlock.session.opal_key.key_len = vk->keylength;
+ memcpy(unlock.session.opal_key.key, vk->key, vk->keylength);
+ }
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_LOCK_UNLOCK, &unlock);
+ if (r < 0) {
+ r = -ENOTSUP;
+ log_dbg(cd, "OPAL not supported on this kernel version, refusing.");
+ goto out;
+ }
+ if (r == OPAL_STATUS_NOT_AUTHORIZED) /* We'll try again with a different key. */ {
+ r = -EPERM;
+ log_dbg(cd, "Failed to %slock OPAL device '%s': permission denied",
+ lock ? "" : "un", crypt_get_device_name(cd));
+ goto out;
+ }
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to %slock OPAL device '%s': %s",
+ lock ? "" : "un", crypt_get_device_name(cd), opal_status_to_string(r));
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* If we are unlocking, also tell the kernel to automatically unlock when resuming
+ * from suspend, otherwise the drive will be locked and everything will go up in flames.
+ * Also set the flag to allow locking without having to pass the key again.
+ * But do not error out if this fails, as the device will already be unlocked.
+ *
+ * On a lock path we have to overwrite the cached key from kernel otherwise the locking range
+ * gets unlocked automatically after system resume even when cryptsetup previously locked it
+ * on purpose (crypt_deactivate* or crypt_suspend)
+ */
+ if (!lock)
+ unlock.flags = OPAL_SAVE_FOR_LOCK;
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_SAVE, &unlock);
+ if (r != OPAL_STATUS_SUCCESS) {
+ if (!lock)
+ log_std(cd, "Failed to prepare OPAL device '%s' for sleep resume, be aware before suspending: %s",
+ crypt_get_device_name(cd), opal_status_to_string(r));
+ else
+ log_std(cd, "Failed to erase OPAL key for device '%s' from kernel: %s",
+ crypt_get_device_name(cd), opal_status_to_string(r));
+ r = 0;
+ }
+out:
+ if (!lock)
+ crypt_safe_memzero(unlock.session.opal_key.key, unlock.session.opal_key.key_len);
+
+ return r;
+}
+
+/* requires opal lock */
+int opal_lock(struct crypt_device *cd, struct device *dev, uint32_t segment_number)
+{
+ return opal_lock_unlock(cd, dev, segment_number, NULL, /* lock= */ true);
+}
+
+/* requires opal lock */
+int opal_unlock(struct crypt_device *cd,
+ struct device *dev,
+ uint32_t segment_number,
+ const struct volume_key *vk)
+{
+ return opal_lock_unlock(cd, dev, segment_number, vk, /* lock= */ false);
+}
+
+/*
+ * It does not require opal lock. This completely destroys
+ * data on whole OPAL block device. Serialization does not
+ * make sense here.
+ */
+int opal_factory_reset(struct crypt_device *cd,
+ struct device *dev,
+ const char *password,
+ size_t password_len)
+{
+ struct opal_key reset = {
+ .key_len = password_len,
+ };
+ int r, fd;
+
+ assert(cd);
+ assert(dev);
+ assert(password);
+
+ if (password_len > OPAL_KEY_MAX)
+ return -EINVAL;
+
+ fd = device_open(cd, dev, O_RDONLY);
+ if (fd < 0)
+ return -EIO;
+
+ memcpy(reset.key, password, password_len);
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_PSID_REVERT_TPR, &reset);
+ if (r < 0) {
+ r = -ENOTSUP;
+ log_dbg(cd, "OPAL not supported on this kernel version, refusing.");
+ goto out;
+ }
+ if (r == OPAL_STATUS_NOT_AUTHORIZED) /* We'll try again with a different key. */ {
+ r = -EPERM;
+ log_dbg(cd, "Failed to reset OPAL device '%s', incorrect PSID?",
+ crypt_get_device_name(cd));
+ goto out;
+ }
+ if (r != OPAL_STATUS_SUCCESS) {
+ r = -EINVAL;
+ log_dbg(cd, "Failed to reset OPAL device '%s' with PSID: %s",
+ crypt_get_device_name(cd), opal_status_to_string(r));
+ goto out;
+ }
+out:
+ crypt_safe_memzero(reset.key, reset.key_len);
+
+ return r;
+}
+
+/* requires opal lock */
+int opal_reset_segment(struct crypt_device *cd,
+ struct device *dev,
+ uint32_t segment_number,
+ const char *password,
+ size_t password_len)
+{
+ struct opal_session_info *user_session = NULL;
+ struct opal_user_lr_setup *setup = NULL;
+ int r, fd;
+
+ assert(cd);
+ assert(dev);
+ assert(password);
+
+ if (password_len > OPAL_KEY_MAX)
+ return -EINVAL;
+
+ if (opal_enabled(cd, dev) <= 0)
+ return -EINVAL;
+
+ user_session = crypt_safe_alloc(sizeof(struct opal_session_info));
+ if (!user_session)
+ return -ENOMEM;
+ *user_session = (struct opal_session_info) {
+ .who = OPAL_ADMIN1,
+ .opal_key = {
+ .lr = segment_number,
+ .key_len = password_len,
+ },
+ };
+ memcpy(user_session->opal_key.key, password, password_len);
+
+ fd = device_open(cd, dev, O_RDONLY);
+ if (fd < 0) {
+ r = -EIO;
+ goto out;
+ }
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_ERASE_LR, user_session);
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to reset (erase) OPAL locking range %u on device '%s': %s",
+ segment_number, crypt_get_device_name(cd), opal_status_to_string(r));
+ r = opal_ioctl(cd, fd, IOC_OPAL_SECURE_ERASE_LR, user_session);
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to reset (secure erase) OPAL locking range %u on device '%s': %s",
+ segment_number, crypt_get_device_name(cd), opal_status_to_string(r));
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* Unlike IOC_OPAL_ERASE_LR, IOC_OPAL_SECURE_ERASE_LR does not disable the locking range,
+ * we have to do that by hand.
+ */
+ setup = crypt_safe_alloc(sizeof(struct opal_user_lr_setup));
+ if (!setup) {
+ r = -ENOMEM;
+ goto out;
+ }
+ *setup = (struct opal_user_lr_setup) {
+ .range_start = 0,
+ .range_length = 0,
+ .session = {
+ .who = OPAL_ADMIN1,
+ .opal_key = user_session->opal_key,
+ },
+ };
+
+ r = opal_ioctl(cd, fd, IOC_OPAL_LR_SETUP, setup);
+ if (r != OPAL_STATUS_SUCCESS) {
+ log_dbg(cd, "Failed to disable locking range on OPAL device '%s': %s",
+ crypt_get_device_name(cd), opal_status_to_string(r));
+ r = -EINVAL;
+ goto out;
+ }
+ }
+out:
+ crypt_safe_free(user_session);
+ crypt_safe_free(setup);
+
+ return r;
+}
+
+/*
+ * Does not require opal lock (immutable).
+ */
+int opal_supported(struct crypt_device *cd, struct device *dev)
+{
+ return opal_query_status(cd, dev, OPAL_FL_SUPPORTED|OPAL_FL_LOCKING_SUPPORTED);
+}
+
+/*
+ * Does not require opal lock (immutable).
+ */
+int opal_geometry(struct crypt_device *cd,
+ struct device *dev,
+ bool *ret_align,
+ uint32_t *ret_block_size,
+ uint64_t *ret_alignment_granularity_blocks,
+ uint64_t *ret_lowest_lba_blocks)
+{
+ int fd;
+
+ assert(cd);
+ assert(dev);
+
+ fd = device_open(cd, dev, O_RDONLY);
+ if (fd < 0)
+ return -EIO;
+
+ return opal_geometry_fd(cd, fd, ret_align, ret_block_size,
+ ret_alignment_granularity_blocks, ret_lowest_lba_blocks);
+}
+
+/* requires opal lock */
+int opal_range_check_attributes_and_get_lock_state(struct crypt_device *cd,
+ struct device *dev,
+ uint32_t segment_number,
+ const struct volume_key *vk,
+ const uint64_t *check_offset_sectors,
+ const uint64_t *check_length_sectors,
+ bool *ret_read_locked,
+ bool *ret_write_locked)
+{
+ int fd;
+
+ assert(cd);
+ assert(dev);
+ assert(vk);
+
+ fd = device_open(cd, dev, O_RDONLY);
+ if (fd < 0)
+ return -EIO;
+
+ return opal_range_check_attributes_fd(cd, fd, segment_number, vk,
+ check_offset_sectors, check_length_sectors, NULL,
+ NULL, ret_read_locked, ret_write_locked);
+}
+
+static int opal_lock_internal(struct crypt_device *cd, struct device *opal_device, struct crypt_lock_handle **opal_lock)
+{
+ char *lock_resource;
+ int devfd, r;
+ struct stat st;
+
+ if (!crypt_metadata_locking_enabled()) {
+ *opal_lock = NULL;
+ return 0;
+ }
+
+ /*
+ * This also asserts we do not hold any metadata lock on the same device to
+ * avoid deadlock (OPAL lock must be taken first)
+ */
+ devfd = device_open(cd, opal_device, O_RDONLY);
+ if (devfd < 0)
+ return -EINVAL;
+
+ if (fstat(devfd, &st) || !S_ISBLK(st.st_mode))
+ return -EINVAL;
+
+ r = asprintf(&lock_resource, "OPAL_%d:%d", major(st.st_rdev), minor(st.st_rdev));
+ if (r < 0)
+ return -ENOMEM;
+
+ r = crypt_write_lock(cd, lock_resource, true, opal_lock);
+
+ free(lock_resource);
+
+ return r;
+}
+
+int opal_exclusive_lock(struct crypt_device *cd, struct device *opal_device, struct crypt_lock_handle **opal_lock)
+{
+ if (!cd || !opal_device || (crypt_get_type(cd) && strcmp(crypt_get_type(cd), CRYPT_LUKS2)))
+ return -EINVAL;
+
+ return opal_lock_internal(cd, opal_device, opal_lock);
+}
+
+void opal_exclusive_unlock(struct crypt_device *cd, struct crypt_lock_handle *opal_lock)
+{
+ crypt_unlock_internal(cd, opal_lock);
+}
+
+#else
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+
+int opal_setup_ranges(struct crypt_device *cd,
+ struct device *dev,
+ const struct volume_key *vk,
+ uint64_t range_start,
+ uint64_t range_length,
+ uint32_t segment_number,
+ const void *admin_key,
+ size_t admin_key_len)
+{
+ return -ENOTSUP;
+}
+
+int opal_lock(struct crypt_device *cd, struct device *dev, uint32_t segment_number)
+{
+ return -ENOTSUP;
+}
+
+int opal_unlock(struct crypt_device *cd,
+ struct device *dev,
+ uint32_t segment_number,
+ const struct volume_key *vk)
+{
+ return -ENOTSUP;
+}
+
+int opal_supported(struct crypt_device *cd, struct device *dev)
+{
+ return -ENOTSUP;
+}
+
+int opal_factory_reset(struct crypt_device *cd,
+ struct device *dev,
+ const char *password,
+ size_t password_len)
+{
+ return -ENOTSUP;
+}
+
+int opal_reset_segment(struct crypt_device *cd,
+ struct device *dev,
+ uint32_t segment_number,
+ const char *password,
+ size_t password_len)
+{
+ return -ENOTSUP;
+}
+
+int opal_geometry(struct crypt_device *cd,
+ struct device *dev,
+ bool *ret_align,
+ uint32_t *ret_block_size,
+ uint64_t *ret_alignment_granularity_blocks,
+ uint64_t *ret_lowest_lba_blocks)
+{
+ return -ENOTSUP;
+}
+
+int opal_range_check_attributes_and_get_lock_state(struct crypt_device *cd,
+ struct device *dev,
+ uint32_t segment_number,
+ const struct volume_key *vk,
+ const uint64_t *check_offset_sectors,
+ const uint64_t *check_length_sectors,
+ bool *ret_read_locked,
+ bool *ret_write_locked)
+{
+ return -ENOTSUP;
+}
+
+int opal_exclusive_lock(struct crypt_device *cd, struct device *opal_device, struct crypt_lock_handle **opal_lock)
+{
+ return -ENOTSUP;
+}
+
+void opal_exclusive_unlock(struct crypt_device *cd, struct crypt_lock_handle *opal_lock)
+{
+}
+
+#endif
diff --git a/lib/luks2/hw_opal/hw_opal.h b/lib/luks2/hw_opal/hw_opal.h
new file mode 100644
index 0000000..f1823bf
--- /dev/null
+++ b/lib/luks2/hw_opal/hw_opal.h
@@ -0,0 +1,71 @@
+/*
+ * OPAL utilities
+ *
+ * Copyright (C) 2022-2023 Luca Boccassi <bluca@debian.org>
+ * 2023 Ondrej Kozina <okozina@redhat.com>
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _UTILS_OPAL
+#define _UTILS_OPAL
+
+#include "internal.h"
+
+struct crypt_lock_handle;
+
+int opal_setup_ranges(struct crypt_device *cd,
+ struct device *dev,
+ const struct volume_key *vk,
+ uint64_t range_start,
+ uint64_t range_length,
+ uint32_t segment_number,
+ const void *admin_key,
+ size_t admin_key_len);
+int opal_lock(struct crypt_device *cd, struct device *dev, uint32_t segment_number);
+int opal_unlock(struct crypt_device *cd,
+ struct device *dev,
+ uint32_t segment_number,
+ const struct volume_key *vk);
+int opal_supported(struct crypt_device *cd, struct device *dev);
+int opal_factory_reset(struct crypt_device *cd,
+ struct device *dev,
+ const char *password,
+ size_t password_len);
+int opal_reset_segment(struct crypt_device *cd,
+ struct device *dev,
+ uint32_t segment_number,
+ const char *password,
+ size_t password_len);
+int opal_geometry(struct crypt_device *cd,
+ struct device *dev,
+ bool *ret_align,
+ uint32_t *ret_block_size,
+ uint64_t *ret_alignment_granularity_blocks,
+ uint64_t *ret_lowest_lba_blocks);
+int opal_range_check_attributes_and_get_lock_state(struct crypt_device *cd,
+ struct device *dev,
+ uint32_t segment_number,
+ const struct volume_key *vk,
+ const uint64_t *check_offset_sectors,
+ const uint64_t *check_length_sectors,
+ bool *ret_read_locked,
+ bool *ret_write_locked);
+int opal_exclusive_lock(struct crypt_device *cd,
+ struct device *opal_device,
+ struct crypt_lock_handle **opal_lock);
+void opal_exclusive_unlock(struct crypt_device *cd, struct crypt_lock_handle *opal_lock);
+
+#endif
diff --git a/lib/luks2/luks2.h b/lib/luks2/luks2.h
index dfccf02..25ae1dd 100644
--- a/lib/luks2/luks2.h
+++ b/lib/luks2/luks2.h
@@ -1,8 +1,8 @@
/*
* LUKS - Linux Unified Key Setup v2
*
- * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2015-2023 Milan Broz
+ * Copyright (C) 2015-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2024 Milan Broz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -224,8 +224,7 @@ int LUKS2_keyslot_store(struct crypt_device *cd,
int LUKS2_keyslot_wipe(struct crypt_device *cd,
struct luks2_hdr *hdr,
- int keyslot,
- int wipe_area_only);
+ int keyslot);
crypt_keyslot_priority LUKS2_keyslot_priority_get(struct luks2_hdr *hdr, int keyslot);
@@ -277,6 +276,7 @@ crypt_token_info LUKS2_token_status(struct crypt_device *cd,
int LUKS2_token_open_and_activate(struct crypt_device *cd,
struct luks2_hdr *hdr,
+ int keyslot,
int token,
const char *name,
const char *type,
@@ -287,6 +287,7 @@ int LUKS2_token_open_and_activate(struct crypt_device *cd,
int LUKS2_token_unlock_key(struct crypt_device *cd,
struct luks2_hdr *hdr,
+ int keyslot,
int token,
const char *type,
const char *pin,
@@ -359,7 +360,8 @@ int LUKS2_digest_create(struct crypt_device *cd,
*/
int LUKS2_activate(struct crypt_device *cd,
const char *name,
- struct volume_key *vk,
+ struct volume_key *crypt_key,
+ struct volume_key *opal_key,
uint32_t flags);
int LUKS2_activate_multi(struct crypt_device *cd,
@@ -378,16 +380,23 @@ int LUKS2_generate_hdr(
struct crypt_device *cd,
struct luks2_hdr *hdr,
const struct volume_key *vk,
- const char *cipherName,
- const char *cipherMode,
+ const char *cipher_spec,
const char *integrity,
const char *uuid,
unsigned int sector_size,
uint64_t data_offset,
- uint64_t align_offset,
- uint64_t required_alignment,
- uint64_t metadata_size,
- uint64_t keyslots_size);
+ uint64_t metadata_size_bytes,
+ uint64_t keyslots_size_bytes,
+ uint64_t device_size_bytes,
+ uint32_t opal_segment_number,
+ uint32_t opal_key_size);
+
+int LUKS2_hdr_get_storage_params(struct crypt_device *cd,
+ uint64_t alignment_offset_bytes,
+ uint64_t alignment_bytes,
+ uint64_t *ret_metadata_size_bytes,
+ uint64_t *ret_keyslots_size_bytes,
+ uint64_t *ret_data_offset_bytes);
int LUKS2_check_metadata_area_size(uint64_t metadata_size);
int LUKS2_check_keyslots_area_size(uint64_t keyslots_size);
@@ -414,6 +423,12 @@ int LUKS2_keyslot_area(struct luks2_hdr *hdr,
uint64_t *length);
int LUKS2_keyslot_pbkdf(struct luks2_hdr *hdr, int keyslot, struct crypt_pbkdf_type *pbkdf);
+int LUKS2_split_crypt_and_opal_keys(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk,
+ struct volume_key **ret_crypt_key,
+ struct volume_key **ret_opal_key);
+
/*
* Permanent activation flags stored in header
*/
@@ -457,6 +472,9 @@ int LUKS2_reencrypt_locked_recovery_by_passphrase(struct crypt_device *cd,
size_t passphrase_size,
struct volume_key **vks);
+int LUKS2_reencrypt_locked_recovery_by_vks(struct crypt_device *cd,
+ struct volume_key *vks);
+
void LUKS2_reencrypt_free(struct crypt_device *cd,
struct luks2_reencrypt *rh);
@@ -479,9 +497,13 @@ int LUKS2_reencrypt_check_device_size(struct crypt_device *cd,
struct luks2_hdr *hdr,
uint64_t check_size,
uint64_t *dev_size,
- bool activation,
+ bool device_exclusive_check,
bool dynamic);
+void LUKS2_reencrypt_lookup_key_ids(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ struct volume_key *vk);
+
int LUKS2_reencrypt_digest_verify(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct volume_key *vks);
diff --git a/lib/luks2/luks2_digest.c b/lib/luks2/luks2_digest.c
index 933b059..293df3e 100644
--- a/lib/luks2/luks2_digest.c
+++ b/lib/luks2/luks2_digest.c
@@ -1,8 +1,8 @@
/*
* LUKS - Linux Unified Key Setup v2, digest handling
*
- * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2015-2023 Milan Broz
+ * Copyright (C) 2015-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2024 Milan Broz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -157,7 +157,7 @@ int LUKS2_digest_dump(struct crypt_device *cd, int digest)
}
int LUKS2_digest_any_matching(struct crypt_device *cd,
- struct luks2_hdr *hdr,
+ struct luks2_hdr *hdr __attribute__((unused)),
const struct volume_key *vk)
{
int digest;
@@ -174,6 +174,18 @@ int LUKS2_digest_verify_by_segment(struct crypt_device *cd,
int segment,
const struct volume_key *vk)
{
+ int r = -EINVAL;
+ unsigned s;
+
+ if (segment == CRYPT_ANY_SEGMENT) {
+ for (s = 0; s < json_segments_count(LUKS2_get_segments_jobj(hdr)); s++) {
+ if ((r = LUKS2_digest_verify_by_digest(cd, LUKS2_digest_by_segment(hdr, s), vk)) >= 0)
+ return r;
+ }
+
+ return -EPERM;
+ }
+
return LUKS2_digest_verify_by_digest(cd, LUKS2_digest_by_segment(hdr, segment), vk);
}
diff --git a/lib/luks2/luks2_digest_pbkdf2.c b/lib/luks2/luks2_digest_pbkdf2.c
index 1009cfb..e8fd00d 100644
--- a/lib/luks2/luks2_digest_pbkdf2.c
+++ b/lib/luks2/luks2_digest_pbkdf2.c
@@ -1,8 +1,8 @@
/*
* LUKS - Linux Unified Key Setup v2, PBKDF2 digest handler (LUKS1 compatible)
*
- * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2015-2023 Milan Broz
+ * Copyright (C) 2015-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2024 Milan Broz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -147,6 +147,9 @@ static int PBKDF2_digest_store(struct crypt_device *cd,
json_object_object_get_ex(hdr->jobj, "digests", &jobj_digests);
}
+ if (!jobj_digest)
+ return -ENOMEM;
+
json_object_object_add(jobj_digest, "type", json_object_new_string("pbkdf2"));
json_object_object_add(jobj_digest, "keyslots", json_object_new_array());
json_object_object_add(jobj_digest, "segments", json_object_new_array());
@@ -169,8 +172,13 @@ static int PBKDF2_digest_store(struct crypt_device *cd,
json_object_object_add(jobj_digest, "digest", json_object_new_string(base64_str));
free(base64_str);
- if (jobj_digests)
- json_object_object_add_by_uint(jobj_digests, digest, jobj_digest);
+ if (jobj_digests) {
+ r = json_object_object_add_by_uint(jobj_digests, digest, jobj_digest);
+ if (r < 0) {
+ json_object_put(jobj_digest);
+ return r;
+ }
+ }
JSON_DBG(cd, jobj_digest, "Digest JSON:");
return 0;
diff --git a/lib/luks2/luks2_disk_metadata.c b/lib/luks2/luks2_disk_metadata.c
index e995959..d7f360c 100644
--- a/lib/luks2/luks2_disk_metadata.c
+++ b/lib/luks2/luks2_disk_metadata.c
@@ -1,8 +1,8 @@
/*
* LUKS - Linux Unified Key Setup v2
*
- * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2015-2023 Milan Broz
+ * Copyright (C) 2015-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2024 Milan Broz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -256,6 +256,7 @@ static int hdr_read_disk(struct crypt_device *cd,
if (read_lseek_blockwise(devfd, device_block_size(cd, device),
device_alignment(device), hdr_disk,
LUKS2_HDR_BIN_LEN, offset) != LUKS2_HDR_BIN_LEN) {
+ memset(hdr_disk, 0, LUKS2_HDR_BIN_LEN);
return -EIO;
}
@@ -537,11 +538,20 @@ static int validate_luks2_json_object(struct crypt_device *cd, json_object *jobj
}
static json_object *parse_and_validate_json(struct crypt_device *cd,
- const char *json_area, uint64_t max_length)
+ const char *json_area, uint64_t hdr_size)
{
int json_len, r;
- json_object *jobj = parse_json_len(cd, json_area, max_length, &json_len);
+ json_object *jobj;
+ uint64_t max_length;
+
+ if (hdr_size <= LUKS2_HDR_BIN_LEN || hdr_size > LUKS2_HDR_OFFSET_MAX) {
+ log_dbg(cd, "LUKS2 header JSON has bogus size 0x%04" PRIx64 ".", hdr_size);
+ return NULL;
+ }
+
+ max_length = hdr_size - LUKS2_HDR_BIN_LEN;
+ jobj = parse_json_len(cd, json_area, max_length, &json_len);
if (!jobj)
return NULL;
@@ -635,7 +645,7 @@ int LUKS2_disk_hdr_read(struct crypt_device *cd, struct luks2_hdr *hdr,
state_hdr1 = HDR_FAIL;
r = hdr_read_disk(cd, device, &hdr_disk1, &json_area1, 0, 0);
if (r == 0) {
- jobj_hdr1 = parse_and_validate_json(cd, json_area1, be64_to_cpu(hdr_disk1.hdr_size) - LUKS2_HDR_BIN_LEN);
+ jobj_hdr1 = parse_and_validate_json(cd, json_area1, be64_to_cpu(hdr_disk1.hdr_size));
state_hdr1 = jobj_hdr1 ? HDR_OK : HDR_OBSOLETE;
} else if (r == -EIO)
state_hdr1 = HDR_FAIL_IO;
@@ -647,7 +657,7 @@ int LUKS2_disk_hdr_read(struct crypt_device *cd, struct luks2_hdr *hdr,
if (state_hdr1 != HDR_FAIL && state_hdr1 != HDR_FAIL_IO) {
r = hdr_read_disk(cd, device, &hdr_disk2, &json_area2, be64_to_cpu(hdr_disk1.hdr_size), 1);
if (r == 0) {
- jobj_hdr2 = parse_and_validate_json(cd, json_area2, be64_to_cpu(hdr_disk2.hdr_size) - LUKS2_HDR_BIN_LEN);
+ jobj_hdr2 = parse_and_validate_json(cd, json_area2, be64_to_cpu(hdr_disk2.hdr_size));
state_hdr2 = jobj_hdr2 ? HDR_OK : HDR_OBSOLETE;
} else if (r == -EIO)
state_hdr2 = HDR_FAIL_IO;
@@ -655,11 +665,12 @@ int LUKS2_disk_hdr_read(struct crypt_device *cd, struct luks2_hdr *hdr,
/*
* No header size, check all known offsets.
*/
+ hdr_disk2.hdr_size = 0;
for (r = -EINVAL,i = 0; r < 0 && i < ARRAY_SIZE(hdr2_offsets); i++)
r = hdr_read_disk(cd, device, &hdr_disk2, &json_area2, hdr2_offsets[i], 1);
if (r == 0) {
- jobj_hdr2 = parse_and_validate_json(cd, json_area2, be64_to_cpu(hdr_disk2.hdr_size) - LUKS2_HDR_BIN_LEN);
+ jobj_hdr2 = parse_and_validate_json(cd, json_area2, be64_to_cpu(hdr_disk2.hdr_size));
state_hdr2 = jobj_hdr2 ? HDR_OK : HDR_OBSOLETE;
} else if (r == -EIO)
state_hdr2 = HDR_FAIL_IO;
diff --git a/lib/luks2/luks2_internal.h b/lib/luks2/luks2_internal.h
index b564a48..aacc75e 100644
--- a/lib/luks2/luks2_internal.h
+++ b/lib/luks2/luks2_internal.h
@@ -1,8 +1,8 @@
/*
* LUKS - Linux Unified Key Setup v2
*
- * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2015-2023 Milan Broz
+ * Copyright (C) 2015-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2024 Milan Broz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -62,6 +62,7 @@ uint32_t crypt_jobj_get_uint32(json_object *jobj);
json_object *crypt_jobj_new_uint64(uint64_t value);
int json_object_object_add_by_uint(json_object *jobj, unsigned key, json_object *jobj_val);
+int json_object_object_add_by_uint_by_ref(json_object *jobj, unsigned key, json_object **jobj_val_ref);
void json_object_object_del_by_uint(json_object *jobj, unsigned key);
int json_object_copy(json_object *jobj_src, json_object **jobj_dst);
@@ -295,13 +296,24 @@ uint64_t json_segment_get_iv_offset(json_object *jobj_segment);
uint64_t json_segment_get_size(json_object *jobj_segment, unsigned blockwise);
const char *json_segment_get_cipher(json_object *jobj_segment);
uint32_t json_segment_get_sector_size(json_object *jobj_segment);
+int json_segment_get_opal_segment_id(json_object *jobj_segment, uint32_t *ret_opal_segment_id);
+int json_segment_get_opal_key_size(json_object *jobj_segment, size_t *ret_key_size);
bool json_segment_is_backup(json_object *jobj_segment);
json_object *json_segments_get_segment(json_object *jobj_segments, int segment);
unsigned json_segments_count(json_object *jobj_segments);
void json_segment_remove_flag(json_object *jobj_segment, const char *flag);
uint64_t json_segments_get_minimal_offset(json_object *jobj_segments, unsigned blockwise);
json_object *json_segment_create_linear(uint64_t offset, const uint64_t *length, unsigned reencryption);
-json_object *json_segment_create_crypt(uint64_t offset, uint64_t iv_offset, const uint64_t *length, const char *cipher, uint32_t sector_size, unsigned reencryption);
+json_object *json_segment_create_crypt(uint64_t offset, uint64_t iv_offset, const uint64_t *length,
+ const char *cipher, const char *integrity,
+ uint32_t sector_size, unsigned reencryption);
+json_object *json_segment_create_opal(uint64_t offset, const uint64_t *length,
+ uint32_t segment_number, uint32_t key_size);
+json_object *json_segment_create_opal_crypt(uint64_t offset, const uint64_t *length,
+ uint32_t segment_number, uint32_t key_size,
+ uint64_t iv_offset, const char *cipher,
+ const char *integrity, uint32_t sector_size,
+ unsigned reencryption);
int json_segments_segment_in_reencrypt(json_object *jobj_segments);
bool json_segment_cmp(json_object *jobj_segment_1, json_object *jobj_segment_2);
bool json_segment_contains_flag(json_object *jobj_segment, const char *flag_str, size_t len);
@@ -338,10 +350,26 @@ uint64_t LUKS2_segment_size(struct luks2_hdr *hdr,
int segment,
unsigned blockwise);
+bool LUKS2_segment_set_size(struct luks2_hdr *hdr,
+ int segment,
+ const uint64_t *segment_size_bytes);
+
+uint64_t LUKS2_opal_segment_size(struct luks2_hdr *hdr,
+ int segment,
+ unsigned blockwise);
+
int LUKS2_segment_is_type(struct luks2_hdr *hdr,
int segment,
const char *type);
+bool LUKS2_segment_is_hw_opal(struct luks2_hdr *hdr, int segment);
+bool LUKS2_segment_is_hw_opal_crypt(struct luks2_hdr *hdr, int segment);
+bool LUKS2_segment_is_hw_opal_only(struct luks2_hdr *hdr, int segment);
+
+int LUKS2_get_opal_segment_number(struct luks2_hdr *hdr, int segment,
+ uint32_t *ret_opal_segment_number);
+int LUKS2_get_opal_key_size(struct luks2_hdr *hdr, int segment);
+
int LUKS2_segment_by_type(struct luks2_hdr *hdr,
const char *type);
@@ -350,8 +378,11 @@ int LUKS2_last_segment_by_type(struct luks2_hdr *hdr,
int LUKS2_get_default_segment(struct luks2_hdr *hdr);
+bool LUKS2_segments_dynamic_size(struct luks2_hdr *hdr);
+
int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr);
int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr);
+unsigned LUKS2_reencrypt_vks_count(struct luks2_hdr *hdr);
int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise);
/*
diff --git a/lib/luks2/luks2_json_format.c b/lib/luks2/luks2_json_format.c
index 4456358..100e026 100644
--- a/lib/luks2/luks2_json_format.c
+++ b/lib/luks2/luks2_json_format.c
@@ -1,8 +1,8 @@
/*
* LUKS - Linux Unified Key Setup v2, LUKS2 header format code
*
- * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2015-2023 Milan Broz
+ * Copyright (C) 2015-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2024 Milan Broz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -204,76 +204,33 @@ int LUKS2_generate_hdr(
struct crypt_device *cd,
struct luks2_hdr *hdr,
const struct volume_key *vk,
- const char *cipherName,
- const char *cipherMode,
+ const char *cipher_spec,
const char *integrity,
const char *uuid,
unsigned int sector_size, /* in bytes */
uint64_t data_offset, /* in bytes */
- uint64_t align_offset, /* in bytes */
- uint64_t required_alignment,
- uint64_t metadata_size,
- uint64_t keyslots_size)
+ uint64_t metadata_size_bytes,
+ uint64_t keyslots_size_bytes,
+ uint64_t device_size_bytes,
+ uint32_t opal_segment_number,
+ uint32_t opal_key_size)
{
- struct json_object *jobj_segment, *jobj_integrity, *jobj_keyslots, *jobj_segments, *jobj_config;
- char cipher[128];
+ struct json_object *jobj_segment, *jobj_keyslots, *jobj_segments, *jobj_config;
uuid_t partitionUuid;
int r, digest;
- uint64_t mdev_size;
- if (!metadata_size)
- metadata_size = LUKS2_HDR_16K_LEN;
- hdr->hdr_size = metadata_size;
+ assert(cipher_spec || (opal_key_size > 0 && device_size_bytes));
- if (data_offset && data_offset < get_min_offset(hdr)) {
- log_err(cd, _("Requested data offset is too small."));
- return -EINVAL;
- }
-
- /* Increase keyslot size according to data offset */
- if (!keyslots_size && data_offset)
- keyslots_size = data_offset - get_min_offset(hdr);
-
- /* keyslots size has to be 4 KiB aligned */
- keyslots_size -= (keyslots_size % 4096);
-
- if (keyslots_size > LUKS2_MAX_KEYSLOTS_SIZE)
- keyslots_size = LUKS2_MAX_KEYSLOTS_SIZE;
-
- if (!keyslots_size) {
- assert(LUKS2_DEFAULT_HDR_SIZE > 2 * LUKS2_HDR_OFFSET_MAX);
- keyslots_size = LUKS2_DEFAULT_HDR_SIZE - get_min_offset(hdr);
- /* Decrease keyslots_size due to metadata device being too small */
- if (!device_size(crypt_metadata_device(cd), &mdev_size) &&
- ((keyslots_size + get_min_offset(hdr)) > mdev_size) &&
- device_fallocate(crypt_metadata_device(cd), keyslots_size + get_min_offset(hdr)) &&
- (get_min_offset(hdr) <= mdev_size))
- keyslots_size = mdev_size - get_min_offset(hdr);
- }
-
- /* Decrease keyslots_size if we have smaller data_offset */
- if (data_offset && (keyslots_size + get_min_offset(hdr)) > data_offset) {
- keyslots_size = data_offset - get_min_offset(hdr);
- log_dbg(cd, "Decreasing keyslot area size to %" PRIu64
- " bytes due to the requested data offset %"
- PRIu64 " bytes.", keyslots_size, data_offset);
- }
-
- /* Data offset has priority */
- if (!data_offset && required_alignment) {
- data_offset = size_round_up(get_min_offset(hdr) + keyslots_size,
- (size_t)required_alignment);
- data_offset += align_offset;
- }
+ hdr->hdr_size = metadata_size_bytes;
log_dbg(cd, "Formatting LUKS2 with JSON metadata area %" PRIu64
" bytes and keyslots area %" PRIu64 " bytes.",
- metadata_size - LUKS2_HDR_BIN_LEN, keyslots_size);
+ metadata_size_bytes - LUKS2_HDR_BIN_LEN, keyslots_size_bytes);
- if (keyslots_size < (LUKS2_HDR_OFFSET_MAX - 2*LUKS2_HDR_16K_LEN))
+ if (keyslots_size_bytes < (LUKS2_HDR_OFFSET_MAX - 2*LUKS2_HDR_16K_LEN))
log_std(cd, _("WARNING: keyslots area (%" PRIu64 " bytes) is very small,"
" available LUKS2 keyslot count is very limited.\n"),
- keyslots_size);
+ keyslots_size_bytes);
hdr->seqid = 1;
hdr->version = 2;
@@ -291,54 +248,81 @@ int LUKS2_generate_hdr(
uuid_unparse(partitionUuid, hdr->uuid);
- if (*cipherMode != '\0')
- r = snprintf(cipher, sizeof(cipher), "%s-%s", cipherName, cipherMode);
- else
- r = snprintf(cipher, sizeof(cipher), "%s", cipherName);
- if (r < 0 || (size_t)r >= sizeof(cipher))
- return -EINVAL;
-
hdr->jobj = json_object_new_object();
+ if (!hdr->jobj) {
+ r = -ENOMEM;
+ goto err;
+ }
jobj_keyslots = json_object_new_object();
+ if (!jobj_keyslots) {
+ r = -ENOMEM;
+ goto err;
+ }
+
json_object_object_add(hdr->jobj, "keyslots", jobj_keyslots);
json_object_object_add(hdr->jobj, "tokens", json_object_new_object());
jobj_segments = json_object_new_object();
+ if (!jobj_segments) {
+ r = -ENOMEM;
+ goto err;
+ }
+
json_object_object_add(hdr->jobj, "segments", jobj_segments);
json_object_object_add(hdr->jobj, "digests", json_object_new_object());
jobj_config = json_object_new_object();
+ if (!jobj_config) {
+ r = -ENOMEM;
+ goto err;
+ }
+
json_object_object_add(hdr->jobj, "config", jobj_config);
digest = LUKS2_digest_create(cd, "pbkdf2", hdr, vk);
- if (digest < 0)
+ if (digest < 0) {
+ r = -EINVAL;
goto err;
+ }
- if (LUKS2_digest_segment_assign(cd, hdr, 0, digest, 1, 0) < 0)
+ if (LUKS2_digest_segment_assign(cd, hdr, 0, digest, 1, 0) < 0) {
+ r = -EINVAL;
goto err;
+ }
- jobj_segment = json_segment_create_crypt(data_offset, 0, NULL, cipher, sector_size, 0);
- if (!jobj_segment)
- goto err;
+ if (!opal_key_size)
+ jobj_segment = json_segment_create_crypt(data_offset, 0,
+ NULL, cipher_spec,
+ integrity, sector_size,
+ 0);
+ else if (opal_key_size && cipher_spec)
+ jobj_segment = json_segment_create_opal_crypt(data_offset, &device_size_bytes,
+ opal_segment_number, opal_key_size, 0,
+ cipher_spec, integrity,
+ sector_size, 0);
+ else
+ jobj_segment = json_segment_create_opal(data_offset, &device_size_bytes,
+ opal_segment_number, opal_key_size);
- if (integrity) {
- jobj_integrity = json_object_new_object();
- json_object_object_add(jobj_integrity, "type", json_object_new_string(integrity));
- json_object_object_add(jobj_integrity, "journal_encryption", json_object_new_string("none"));
- json_object_object_add(jobj_integrity, "journal_integrity", json_object_new_string("none"));
- json_object_object_add(jobj_segment, "integrity", jobj_integrity);
+ if (!jobj_segment) {
+ r = -EINVAL;
+ goto err;
}
- json_object_object_add_by_uint(jobj_segments, 0, jobj_segment);
+ if (json_object_object_add_by_uint(jobj_segments, 0, jobj_segment)) {
+ json_object_put(jobj_segment);
+ r = -ENOMEM;
+ goto err;
+ }
- json_object_object_add(jobj_config, "json_size", crypt_jobj_new_uint64(metadata_size - LUKS2_HDR_BIN_LEN));
- json_object_object_add(jobj_config, "keyslots_size", crypt_jobj_new_uint64(keyslots_size));
+ json_object_object_add(jobj_config, "json_size", crypt_jobj_new_uint64(metadata_size_bytes - LUKS2_HDR_BIN_LEN));
+ json_object_object_add(jobj_config, "keyslots_size", crypt_jobj_new_uint64(keyslots_size_bytes));
JSON_DBG(cd, hdr->jobj, "Header JSON:");
return 0;
err:
json_object_put(hdr->jobj);
hdr->jobj = NULL;
- return -EINVAL;
+ return r;
}
int LUKS2_wipe_header_areas(struct crypt_device *cd,
@@ -379,6 +363,14 @@ int LUKS2_wipe_header_areas(struct crypt_device *cd,
offset = get_min_offset(hdr);
length = LUKS2_keyslots_size(hdr);
+ /*
+ * Skip keyslots area wipe in case it is not defined.
+ * Otherwise we would wipe whole data device (length == 0)
+ * starting at offset get_min_offset(hdr).
+ */
+ if (!length)
+ return 0;
+
log_dbg(cd, "Wiping keyslots area (0x%06" PRIx64 " - 0x%06" PRIx64") with random data.",
offset, length + offset);
@@ -409,3 +401,80 @@ int LUKS2_set_keyslots_size(struct luks2_hdr *hdr, uint64_t data_offset)
json_object_object_add(jobj_config, "keyslots_size", crypt_jobj_new_uint64(keyslots_size));
return 0;
}
+
+int LUKS2_hdr_get_storage_params(struct crypt_device *cd,
+ uint64_t alignment_offset_bytes,
+ uint64_t alignment_bytes,
+ uint64_t *ret_metadata_size_bytes,
+ uint64_t *ret_keyslots_size_bytes,
+ uint64_t *ret_data_offset_bytes)
+{
+ uint64_t data_offset_bytes, keyslots_size_bytes, metadata_size_bytes, mdev_size_bytes;
+
+ assert(cd);
+ assert(ret_metadata_size_bytes);
+ assert(ret_keyslots_size_bytes);
+ assert(ret_data_offset_bytes);
+
+ metadata_size_bytes = crypt_get_metadata_size_bytes(cd);
+ keyslots_size_bytes = crypt_get_keyslots_size_bytes(cd);
+ data_offset_bytes = crypt_get_data_offset_sectors(cd) * SECTOR_SIZE;
+
+ if (!metadata_size_bytes)
+ metadata_size_bytes = LUKS2_HDR_16K_LEN;
+
+ if (data_offset_bytes && data_offset_bytes < 2 * metadata_size_bytes) {
+ log_err(cd, _("Requested data offset is too small."));
+ return -EINVAL;
+ }
+
+ /* Increase keyslot size according to data offset */
+ if (!keyslots_size_bytes && data_offset_bytes)
+ keyslots_size_bytes = data_offset_bytes - 2 * metadata_size_bytes;
+
+ /* keyslots size has to be 4 KiB aligned */
+ keyslots_size_bytes -= (keyslots_size_bytes % 4096);
+
+ if (keyslots_size_bytes > LUKS2_MAX_KEYSLOTS_SIZE)
+ keyslots_size_bytes = LUKS2_MAX_KEYSLOTS_SIZE;
+
+ if (!keyslots_size_bytes) {
+ assert(LUKS2_DEFAULT_HDR_SIZE > 2 * LUKS2_HDR_OFFSET_MAX);
+ keyslots_size_bytes = LUKS2_DEFAULT_HDR_SIZE - 2 * metadata_size_bytes;
+ /* Decrease keyslots_size due to metadata device being too small */
+ if (!device_size(crypt_metadata_device(cd), &mdev_size_bytes) &&
+ ((keyslots_size_bytes + 2 * metadata_size_bytes) > mdev_size_bytes) &&
+ device_fallocate(crypt_metadata_device(cd), keyslots_size_bytes + 2 * metadata_size_bytes) &&
+ ((2 * metadata_size_bytes) <= mdev_size_bytes))
+ keyslots_size_bytes = mdev_size_bytes - 2 * metadata_size_bytes;
+ }
+
+ /* Decrease keyslots_size if we have smaller data_offset */
+ if (data_offset_bytes && (keyslots_size_bytes + 2 * metadata_size_bytes) > data_offset_bytes) {
+ keyslots_size_bytes = data_offset_bytes - 2 * metadata_size_bytes;
+ log_dbg(cd, "Decreasing keyslot area size to %" PRIu64
+ " bytes due to the requested data offset %"
+ PRIu64 " bytes.", keyslots_size_bytes, data_offset_bytes);
+ }
+
+ /* Data offset has priority */
+ if (!data_offset_bytes && alignment_bytes) {
+ data_offset_bytes = size_round_up(2 * metadata_size_bytes + keyslots_size_bytes,
+ (size_t)alignment_bytes);
+ data_offset_bytes += alignment_offset_bytes;
+ }
+
+ if (crypt_get_metadata_size_bytes(cd) && (crypt_get_metadata_size_bytes(cd) != metadata_size_bytes))
+ log_std(cd, _("WARNING: LUKS2 metadata size changed to %" PRIu64 " bytes.\n"),
+ metadata_size_bytes);
+
+ if (crypt_get_keyslots_size_bytes(cd) && (crypt_get_keyslots_size_bytes(cd) != keyslots_size_bytes))
+ log_std(cd, _("WARNING: LUKS2 keyslots area size changed to %" PRIu64 " bytes.\n"),
+ keyslots_size_bytes);
+
+ *ret_metadata_size_bytes = metadata_size_bytes;
+ *ret_keyslots_size_bytes = keyslots_size_bytes;
+ *ret_data_offset_bytes = data_offset_bytes;
+
+ return 0;
+}
diff --git a/lib/luks2/luks2_json_metadata.c b/lib/luks2/luks2_json_metadata.c
index 4771f04..22f3e3d 100644
--- a/lib/luks2/luks2_json_metadata.c
+++ b/lib/luks2/luks2_json_metadata.c
@@ -1,9 +1,9 @@
/*
* LUKS - Linux Unified Key Setup v2
*
- * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2015-2023 Milan Broz
- * Copyright (C) 2015-2023 Ondrej Kozina
+ * Copyright (C) 2015-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2024 Milan Broz
+ * Copyright (C) 2015-2024 Ondrej Kozina
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -21,6 +21,7 @@
*/
#include "luks2_internal.h"
+#include "luks2/hw_opal/hw_opal.h"
#include "../integrity/integrity.h"
#include <ctype.h>
#include <uuid/uuid.h>
@@ -88,6 +89,9 @@ struct json_object *LUKS2_array_remove(struct json_object *array, const char *nu
/* Create new array without jobj_removing. */
array_new = json_object_new_array();
+ if (!array_new)
+ return NULL;
+
for (i = 0; i < (int) json_object_array_length(array); i++) {
jobj1 = json_object_array_get_idx(array, i);
if (jobj1 != jobj_removing)
@@ -478,6 +482,9 @@ static int hdr_validate_json_size(struct crypt_device *cd, json_object *hdr_jobj
json = json_object_to_json_string_ext(hdr_jobj,
JSON_C_TO_STRING_PLAIN | JSON_C_TO_STRING_NOSLASHESCAPE);
+ if (!json)
+ return 1;
+
json_area_size = crypt_jobj_get_uint64(jobj1);
json_size = (uint64_t)strlen(json);
@@ -637,6 +644,11 @@ static int reqs_reencrypt_online(uint32_t reqs)
return reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
}
+static int reqs_opal(uint32_t reqs)
+{
+ return reqs & CRYPT_REQUIREMENT_OPAL;
+}
+
/*
* Config section requirements object must be valid.
* Also general segments section must be validated first.
@@ -697,7 +709,7 @@ static int validate_reencrypt_segments(struct crypt_device *cd, json_object *hdr
static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj)
{
json_object *jobj_segments, *jobj_digests, *jobj_offset, *jobj_size, *jobj_type, *jobj_flags, *jobj;
- uint64_t offset, size;
+ uint64_t offset, size, opal_segment_size;
int i, r, count, first_backup = -1;
struct interval *intervals = NULL;
@@ -777,6 +789,32 @@ static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj)
if (!strcmp(json_object_get_string(jobj_type), "crypt") &&
hdr_validate_crypt_segment(cd, val, key, jobj_digests, size))
return 1;
+
+ /* opal */
+ if (!strncmp(json_object_get_string(jobj_type), "hw-opal", 7)) {
+ if (!size) {
+ log_dbg(cd, "segment type %s does not support dynamic size.",
+ json_object_get_string(jobj_type));
+ return 1;
+ }
+ if (!json_contains(cd, val, key, "Segment", "opal_segment_number", json_type_int) ||
+ !json_contains(cd, val, key, "Segment", "opal_key_size", json_type_int) ||
+ !(jobj_size = json_contains_string(cd, val, key, "Segment", "opal_segment_size")))
+ return 1;
+ if (!numbered(cd, "opal_segment_size", json_object_get_string(jobj_size)))
+ return 1;
+ if (!json_str_to_uint64(jobj_size, &opal_segment_size) || !opal_segment_size) {
+ log_dbg(cd, "Illegal OPAL segment size value.");
+ return 1;
+ }
+ if (size > opal_segment_size) {
+ log_dbg(cd, "segment size overflows OPAL locking range size.");
+ return 1;
+ }
+ if (!strcmp(json_object_get_string(jobj_type), "hw-opal-crypt") &&
+ hdr_validate_crypt_segment(cd, val, key, jobj_digests, size))
+ return 1;
+ }
}
if (first_backup == 0) {
@@ -1575,6 +1613,8 @@ int LUKS2_config_set_flags(struct crypt_device *cd, struct luks2_hdr *hdr, uint3
return 0;
jobj_flags = json_object_new_array();
+ if (!jobj_flags)
+ return -ENOMEM;
for (i = 0; persistent_flags[i].description; i++) {
if (flags & persistent_flags[i].flag) {
@@ -1615,6 +1655,7 @@ static const struct requirement_flag requirements_flags[] = {
{ CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 2, "online-reencrypt-v2" },
{ CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 3, "online-reencrypt-v3" },
{ CRYPT_REQUIREMENT_ONLINE_REENCRYPT, 1, "online-reencrypt" },
+ { CRYPT_REQUIREMENT_OPAL, 1, "opal" },
{ 0, 0, NULL }
};
@@ -1707,7 +1748,7 @@ int LUKS2_config_get_reencrypt_version(struct luks2_hdr *hdr, uint8_t *version)
return -ENOENT;
}
-static const struct requirement_flag *stored_requirement_name_by_id(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t req_id)
+static const struct requirement_flag *stored_requirement_name_by_id(struct luks2_hdr *hdr, uint32_t req_id)
{
json_object *jobj_mandatory, *jobj;
int i, len;
@@ -1786,7 +1827,7 @@ int LUKS2_config_set_requirements(struct crypt_device *cd, struct luks2_hdr *hdr
req_id = reqs & requirements_flags[i].flag;
if (req_id) {
/* retain already stored version of requirement flag */
- req = stored_requirement_name_by_id(cd, hdr, req_id);
+ req = stored_requirement_name_by_id(hdr, req_id);
if (req)
jobj = json_object_new_string(req->description);
else
@@ -2090,6 +2131,8 @@ static void hdr_dump_segments(struct crypt_device *cd, json_object *hdr_jobj)
if (json_object_object_get_ex(jobj_segment, "encryption", &jobj1))
log_std(cd, "\tcipher: %s\n", json_object_get_string(jobj1));
+ else
+ log_std(cd, "\tcipher: (no SW encryption)\n");
if (json_object_object_get_ex(jobj_segment, "sector_size", &jobj1))
log_std(cd, "\tsector: %" PRIu32 " [bytes]\n", crypt_jobj_get_uint32(jobj1));
@@ -2109,6 +2152,18 @@ static void hdr_dump_segments(struct crypt_device *cd, json_object *hdr_jobj)
log_std(cd, "\n");
}
+ json_object_object_get_ex(jobj_segment, "type", &jobj1);
+ if (!strncmp(json_object_get_string(jobj1), "hw-opal", 7)) {
+ log_std(cd, "\tHW OPAL encryption:\n");
+ json_object_object_get_ex(jobj_segment, "opal_segment_number", &jobj1);
+ log_std(cd, "\t\tOPAL segment number: %" PRIu32 "\n", crypt_jobj_get_uint32(jobj1));
+ json_object_object_get_ex(jobj_segment, "opal_key_size", &jobj1);
+ log_std(cd, "\t\tOPAL key: %" PRIu32 " bits\n", crypt_jobj_get_uint32(jobj1) * 8);
+ json_object_object_get_ex(jobj_segment, "opal_segment_size", &jobj1);
+ json_str_to_uint64(jobj1, &value);
+ log_std(cd, "\t\tOPAL segment length: %" PRIu64 " [bytes]\n", value);
+ }
+
log_std(cd, "\n");
}
}
@@ -2584,26 +2639,104 @@ int LUKS2_activate_multi(struct crypt_device *cd,
int LUKS2_activate(struct crypt_device *cd,
const char *name,
- struct volume_key *vk,
+ struct volume_key *crypt_key,
+ struct volume_key *opal_key,
uint32_t flags)
{
int r;
+ bool dynamic, read_lock, write_lock, opal_lock_on_error = false;
+ uint32_t opal_segment_number;
+ uint64_t range_offset_sectors, range_length_sectors, device_length_bytes;
struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
struct crypt_dm_active_device dmdi = {}, dmd = {
.uuid = crypt_get_uuid(cd)
};
+ struct crypt_lock_handle *opal_lh = NULL;
/* do not allow activation when particular requirements detected */
- if ((r = LUKS2_unmet_requirements(cd, hdr, 0, 0)))
+ if ((r = LUKS2_unmet_requirements(cd, hdr, CRYPT_REQUIREMENT_OPAL, 0)))
return r;
- r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
- vk, crypt_get_cipher_spec(cd), crypt_get_iv_offset(cd),
- crypt_get_data_offset(cd), crypt_get_integrity(cd) ?: "none",
- crypt_get_integrity_tag_size(cd), crypt_get_sector_size(cd));
- if (r < 0)
+ /* Check that cipher is in compatible format */
+ if (!crypt_get_cipher(cd)) {
+ log_err(cd, _("No known cipher specification pattern detected in LUKS2 header."));
+ return -EINVAL;
+ }
+
+ if ((r = LUKS2_get_data_size(hdr, &device_length_bytes, &dynamic)))
return r;
+ if (dynamic && opal_key) {
+ log_err(cd, _("OPAL device must have static device size."));
+ return -EINVAL;
+ }
+
+ if (!dynamic)
+ dmd.size = device_length_bytes / SECTOR_SIZE;
+
+ if (opal_key) {
+ r = crypt_opal_supported(cd, crypt_data_device(cd));
+ if (r < 0)
+ return r;
+
+ r = LUKS2_get_opal_segment_number(hdr, CRYPT_DEFAULT_SEGMENT, &opal_segment_number);
+ if (r < 0)
+ return -EINVAL;
+
+ range_length_sectors = LUKS2_opal_segment_size(hdr, CRYPT_DEFAULT_SEGMENT, 1);
+
+ if (crypt_get_integrity_tag_size(cd)) {
+ if (dmd.size >= range_length_sectors) {
+ log_err(cd, _("Encrypted OPAL device with integrity must be smaller than locking range."));
+ return -EINVAL;
+ }
+ } else {
+ if (range_length_sectors != dmd.size) {
+ log_err(cd, _("OPAL device must have same size as locking range."));
+ return -EINVAL;
+ }
+ }
+
+ range_offset_sectors = crypt_get_data_offset(cd) + crypt_dev_partition_offset(device_path(crypt_data_device(cd)));
+ r = opal_exclusive_lock(cd, crypt_data_device(cd), &opal_lh);
+ if (r < 0) {
+ log_err(cd, _("Failed to acquire OPAL lock on device %s."), device_path(crypt_data_device(cd)));
+ return -EINVAL;
+ }
+
+ r = opal_range_check_attributes_and_get_lock_state(cd, crypt_data_device(cd), opal_segment_number,
+ opal_key, &range_offset_sectors, &range_length_sectors,
+ &read_lock, &write_lock);
+ if (r < 0)
+ goto out;
+
+ opal_lock_on_error = read_lock && write_lock;
+ if (!opal_lock_on_error && !(flags & CRYPT_ACTIVATE_REFRESH))
+ log_std(cd, _("OPAL device is %s already unlocked.\n"),
+ device_path(crypt_data_device(cd)));
+
+ r = opal_unlock(cd, crypt_data_device(cd), opal_segment_number, opal_key);
+ if (r < 0)
+ goto out;
+ }
+
+ if (LUKS2_segment_is_type(hdr, CRYPT_DEFAULT_SEGMENT, "crypt") ||
+ LUKS2_segment_is_type(hdr, CRYPT_DEFAULT_SEGMENT, "hw-opal-crypt")) {
+ r = dm_crypt_target_set(&dmd.segment, 0,
+ dmd.size, crypt_data_device(cd),
+ crypt_key, crypt_get_cipher_spec(cd),
+ crypt_get_iv_offset(cd), crypt_get_data_offset(cd),
+ crypt_get_integrity(cd) ?: "none",
+ crypt_get_integrity_tag_size(cd),
+ crypt_get_sector_size(cd));
+ } else
+ r = dm_linear_target_set(&dmd.segment, 0,
+ dmd.size, crypt_data_device(cd),
+ crypt_get_data_offset(cd));
+
+ if (r < 0)
+ goto out;
+
/* Add persistent activation flags */
if (!(flags & CRYPT_ACTIVATE_IGNORE_PERSISTENT))
LUKS2_config_get_flags(cd, hdr, &dmd.flags);
@@ -2613,29 +2746,47 @@ int LUKS2_activate(struct crypt_device *cd,
if (crypt_get_integrity_tag_size(cd)) {
if (!LUKS2_integrity_compatible(hdr)) {
log_err(cd, _("Unsupported device integrity configuration."));
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
if (dmd.flags & CRYPT_ACTIVATE_ALLOW_DISCARDS) {
log_err(cd, _("Discard/TRIM is not supported."));
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
r = INTEGRITY_create_dmd_device(cd, NULL, NULL, NULL, NULL, &dmdi, dmd.flags, 0);
if (r)
- return r;
+ goto out;
+
+ if (!dynamic && dmdi.size != dmd.size) {
+ log_err(cd, _("Underlying dm-integrity device with unexpected provided data sectors."));
+ r = -EINVAL;
+ goto out;
+ }
dmdi.flags |= CRYPT_ACTIVATE_PRIVATE;
dmdi.uuid = dmd.uuid;
dmd.segment.u.crypt.offset = 0;
- dmd.segment.size = dmdi.segment.size;
+ if (dynamic)
+ dmd.segment.size = dmdi.segment.size;
- r = create_or_reload_device_with_integrity(cd, name, CRYPT_LUKS2, &dmd, &dmdi);
+ r = create_or_reload_device_with_integrity(cd, name,
+ opal_key ? CRYPT_LUKS2_HW_OPAL : CRYPT_LUKS2,
+ &dmd, &dmdi);
} else
- r = create_or_reload_device(cd, name, CRYPT_LUKS2, &dmd);
+ r = create_or_reload_device(cd, name,
+ opal_key ? CRYPT_LUKS2_HW_OPAL : CRYPT_LUKS2,
+ &dmd);
dm_targets_free(cd, &dmd);
dm_targets_free(cd, &dmdi);
+out:
+ if (r < 0 && opal_lock_on_error)
+ opal_lock(cd, crypt_data_device(cd), opal_segment_number);
+
+ opal_exclusive_unlock(cd, opal_lh);
return r;
}
@@ -2665,13 +2816,15 @@ static bool contains_reencryption_helper(char **names)
int LUKS2_deactivate(struct crypt_device *cd, const char *name, struct luks2_hdr *hdr, struct crypt_dm_active_device *dmd, uint32_t flags)
{
+ bool dm_opal_uuid;
int r, ret;
struct dm_target *tgt;
crypt_status_info ci;
struct crypt_dm_active_device dmdc;
+ uint32_t opal_segment_number;
char **dep, deps_uuid_prefix[40], *deps[MAX_DM_DEPS+1] = { 0 };
const char *namei = NULL;
- struct crypt_lock_handle *reencrypt_lock = NULL;
+ struct crypt_lock_handle *reencrypt_lock = NULL, *opal_lh = NULL;
if (!dmd || !dmd->uuid || strncmp(CRYPT_LUKS2, dmd->uuid, sizeof(CRYPT_LUKS2)-1))
return -EINVAL;
@@ -2684,6 +2837,11 @@ int LUKS2_deactivate(struct crypt_device *cd, const char *name, struct luks2_hdr
if (r < 0 || (size_t)r != (sizeof(deps_uuid_prefix) - 1))
return -EINVAL;
+ /* check if active device has LUKS2-OPAL dm uuid prefix */
+ dm_opal_uuid = !crypt_uuid_type_cmp(dmd->uuid, CRYPT_LUKS2_HW_OPAL);
+ if (dm_opal_uuid && hdr && !LUKS2_segment_is_hw_opal(hdr, CRYPT_DEFAULT_SEGMENT))
+ return -EINVAL;
+
tgt = &dmd->segment;
/* TODO: We have LUKS2 dependencies now */
@@ -2726,7 +2884,8 @@ int LUKS2_deactivate(struct crypt_device *cd, const char *name, struct luks2_hdr
tgt = &dmdc.segment;
while (tgt) {
if (tgt->type == DM_CRYPT)
- crypt_drop_keyring_key_by_description(cd, tgt->u.crypt.vk->key_description, LOGON_KEY);
+ crypt_drop_keyring_key_by_description(cd, tgt->u.crypt.vk->key_description,
+ LOGON_KEY);
tgt = tgt->next;
}
}
@@ -2761,7 +2920,8 @@ int LUKS2_deactivate(struct crypt_device *cd, const char *name, struct luks2_hdr
tgt = &dmdc.segment;
while (tgt) {
if (tgt->type == DM_CRYPT)
- crypt_drop_keyring_key_by_description(cd, tgt->u.crypt.vk->key_description, LOGON_KEY);
+ crypt_drop_keyring_key_by_description(cd, tgt->u.crypt.vk->key_description,
+ LOGON_KEY);
tgt = tgt->next;
}
}
@@ -2773,7 +2933,35 @@ int LUKS2_deactivate(struct crypt_device *cd, const char *name, struct luks2_hdr
r = ret;
}
+ if (!r && dm_opal_uuid) {
+ if (hdr) {
+ if (LUKS2_get_opal_segment_number(hdr, CRYPT_DEFAULT_SEGMENT, &opal_segment_number)) {
+ log_err(cd, _("Device %s was deactivated but hardware OPAL device cannot be locked."),
+ name);
+ r = -EINVAL;
+ goto out;
+ }
+ } else {
+ /* Guess OPAL range number for LUKS2-OPAL device with missing header */
+ opal_segment_number = 1;
+ ret = crypt_dev_get_partition_number(device_path(crypt_data_device(cd)));
+ if (ret > 0)
+ opal_segment_number = ret;
+ }
+
+ if (crypt_data_device(cd)) {
+ r = opal_exclusive_lock(cd, crypt_data_device(cd), &opal_lh);
+ if (r < 0) {
+ log_err(cd, _("Failed to acquire OPAL lock on device %s."), device_path(crypt_data_device(cd)));
+ goto out;
+ }
+ }
+
+ if (!crypt_data_device(cd) || opal_lock(cd, crypt_data_device(cd), opal_segment_number))
+ log_err(cd, _("Device %s was deactivated but hardware OPAL device cannot be locked."), name);
+ }
out:
+ opal_exclusive_unlock(cd, opal_lh);
LUKS2_reencrypt_unlock(cd, reencrypt_lock);
dep = deps;
while (*dep)
@@ -2807,6 +2995,8 @@ int LUKS2_unmet_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uin
log_err(cd, _("Operation incompatible with device marked for legacy reencryption. Aborting."));
if (reqs_reencrypt_online(reqs) && !quiet)
log_err(cd, _("Operation incompatible with device marked for LUKS2 reencryption. Aborting."));
+ if (reqs_opal(reqs) && !quiet)
+ log_err(cd, _("Operation incompatible with device using OPAL. Aborting."));
/* any remaining unmasked requirement fails the check */
return reqs ? -EINVAL : 0;
@@ -2859,6 +3049,20 @@ int json_object_object_add_by_uint(json_object *jobj, unsigned key, json_object
#endif
}
+int json_object_object_add_by_uint_by_ref(json_object *jobj, unsigned key, json_object **jobj_val_ref)
+{
+ int r;
+
+ assert(jobj);
+ assert(jobj_val_ref);
+
+ r = json_object_object_add_by_uint(jobj, key, *jobj_val_ref);
+ if (!r)
+ *jobj_val_ref = NULL;
+
+ return r;
+}
+
/* jobj_dst must contain pointer initialized to NULL (see json-c json_object_deep_copy API) */
int json_object_copy(json_object *jobj_src, json_object **jobj_dst)
{
@@ -2872,3 +3076,58 @@ int json_object_copy(json_object *jobj_src, json_object **jobj_dst)
return *jobj_dst ? 0 : -1;
#endif
}
+
+int LUKS2_split_crypt_and_opal_keys(struct crypt_device *cd __attribute__((unused)),
+ struct luks2_hdr *hdr,
+ const struct volume_key *vk,
+ struct volume_key **ret_crypt_key,
+ struct volume_key **ret_opal_key)
+{
+ int r;
+ uint32_t opal_segment_number;
+ size_t opal_user_key_size;
+ json_object *jobj_segment;
+ struct volume_key *opal_key, *crypt_key;
+
+ assert(vk);
+ assert(ret_crypt_key);
+ assert(ret_opal_key);
+
+ jobj_segment = LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT);
+ if (!jobj_segment)
+ return -EINVAL;
+
+ r = json_segment_get_opal_segment_id(jobj_segment, &opal_segment_number);
+ if (r < 0)
+ return -EINVAL;
+
+ r = json_segment_get_opal_key_size(jobj_segment, &opal_user_key_size);
+ if (r < 0)
+ return -EINVAL;
+
+ if (vk->keylength < opal_user_key_size)
+ return -EINVAL;
+
+ /* OPAL SEGMENT only */
+ if (vk->keylength == opal_user_key_size) {
+ *ret_crypt_key = NULL;
+ *ret_opal_key = NULL;
+ return 0;
+ }
+
+ opal_key = crypt_alloc_volume_key(opal_user_key_size, vk->key);
+ if (!opal_key)
+ return -ENOMEM;
+
+ crypt_key = crypt_alloc_volume_key(vk->keylength - opal_user_key_size,
+ vk->key + opal_user_key_size);
+ if (!crypt_key) {
+ crypt_free_volume_key(opal_key);
+ return -ENOMEM;
+ }
+
+ *ret_opal_key = opal_key;
+ *ret_crypt_key = crypt_key;
+
+ return 0;
+}
diff --git a/lib/luks2/luks2_keyslot.c b/lib/luks2/luks2_keyslot.c
index 5cf4b83..40816eb 100644
--- a/lib/luks2/luks2_keyslot.c
+++ b/lib/luks2/luks2_keyslot.c
@@ -1,8 +1,8 @@
/*
* LUKS - Linux Unified Key Setup v2, keyslot handling
*
- * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2015-2023 Milan Broz
+ * Copyright (C) 2015-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2024 Milan Broz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -578,6 +578,8 @@ int LUKS2_keyslot_open(struct crypt_device *cd,
int r_prio, r = -EINVAL;
hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ if (!hdr)
+ return -EINVAL;
if (keyslot == CRYPT_ANY_SLOT) {
r_prio = LUKS2_keyslot_open_priority(cd, hdr, CRYPT_SLOT_PRIORITY_PREFER,
@@ -676,8 +678,7 @@ int LUKS2_keyslot_store(struct crypt_device *cd,
int LUKS2_keyslot_wipe(struct crypt_device *cd,
struct luks2_hdr *hdr,
- int keyslot,
- int wipe_area_only)
+ int keyslot)
{
struct device *device = crypt_metadata_device(cd);
uint64_t area_offset, area_length;
@@ -694,9 +695,6 @@ int LUKS2_keyslot_wipe(struct crypt_device *cd,
if (!jobj_keyslot)
return -ENOENT;
- if (wipe_area_only)
- log_dbg(cd, "Wiping keyslot %d area only.", keyslot);
-
r = LUKS2_device_write_lock(cd, hdr, device);
if (r)
return r;
@@ -720,9 +718,6 @@ int LUKS2_keyslot_wipe(struct crypt_device *cd,
}
}
- if (wipe_area_only)
- goto out;
-
/* Slot specific wipe */
if (h) {
r = h->wipe(cd, keyslot);
@@ -803,6 +798,9 @@ int placeholder_keyslot_alloc(struct crypt_device *cd,
return -EINVAL;
jobj_keyslot = json_object_new_object();
+ if (!jobj_keyslot)
+ return -ENOMEM;
+
json_object_object_add(jobj_keyslot, "type", json_object_new_string("placeholder"));
/*
* key_size = -1 makes placeholder keyslot impossible to pass validation.
@@ -813,11 +811,19 @@ int placeholder_keyslot_alloc(struct crypt_device *cd,
/* Area object */
jobj_area = json_object_new_object();
+ if (!jobj_area) {
+ json_object_put(jobj_keyslot);
+ return -ENOMEM;
+ }
+
json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(area_offset));
json_object_object_add(jobj_area, "size", crypt_jobj_new_uint64(area_length));
json_object_object_add(jobj_keyslot, "area", jobj_area);
- json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot);
+ if (json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot)) {
+ json_object_put(jobj_keyslot);
+ return -EINVAL;
+ }
return 0;
}
@@ -899,7 +905,7 @@ int LUKS2_keyslots_validate(struct crypt_device *cd, json_object *hdr_jobj)
return 0;
}
-void LUKS2_keyslots_repair(struct crypt_device *cd, json_object *jobj_keyslots)
+void LUKS2_keyslots_repair(struct crypt_device *cd __attribute__((unused)), json_object *jobj_keyslots)
{
const keyslot_handler *h;
json_object *jobj_type;
@@ -964,14 +970,17 @@ int LUKS2_keyslot_swap(struct crypt_device *cd, struct luks2_hdr *hdr,
json_object_object_del_by_uint(jobj_keyslots, keyslot);
r = json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot2);
if (r < 0) {
+ json_object_put(jobj_keyslot2);
log_dbg(cd, "Failed to swap keyslot %d.", keyslot);
return r;
}
json_object_object_del_by_uint(jobj_keyslots, keyslot2);
r = json_object_object_add_by_uint(jobj_keyslots, keyslot2, jobj_keyslot);
- if (r < 0)
+ if (r < 0) {
+ json_object_put(jobj_keyslot);
log_dbg(cd, "Failed to swap keyslot2 %d.", keyslot2);
+ }
return r;
}
diff --git a/lib/luks2/luks2_keyslot_luks2.c b/lib/luks2/luks2_keyslot_luks2.c
index 491dcad..2c1f400 100644
--- a/lib/luks2/luks2_keyslot_luks2.c
+++ b/lib/luks2/luks2_keyslot_luks2.c
@@ -1,8 +1,8 @@
/*
* LUKS - Linux Unified Key Setup v2, LUKS2 type keyslot handler
*
- * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2015-2023 Milan Broz
+ * Copyright (C) 2015-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2024 Milan Broz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -307,7 +307,7 @@ static int luks2_keyslot_get_key(struct crypt_device *cd,
char *volume_key, size_t volume_key_len)
{
struct volume_key *derived_key = NULL;
- struct crypt_pbkdf_type pbkdf;
+ struct crypt_pbkdf_type pbkdf, *cd_pbkdf;
char *AfKey = NULL;
size_t AFEKSize;
const char *af_hash = NULL;
@@ -361,6 +361,16 @@ static int luks2_keyslot_get_key(struct crypt_device *cd,
}
/*
+ * Print warning when keyslot requires more memory than available
+ * (if maximum memory was adjusted - no swap, not enough memory),
+ * but be silent if user set keyslot memory cost above default limit intentionally.
+ */
+ cd_pbkdf = crypt_get_pbkdf(cd);
+ if (cd_pbkdf->max_memory_kb && pbkdf.max_memory_kb > cd_pbkdf->max_memory_kb &&
+ pbkdf.max_memory_kb <= DEFAULT_LUKS2_MEMORY_KB)
+ log_std(cd, _("Warning: keyslot operation could fail as it requires more than available memory.\n"));
+
+ /*
* If requested, serialize unlocking for memory-hard KDF. Usually NOOP.
*/
if (pbkdf.max_memory_kb > MIN_MEMORY_FOR_SERIALIZE_LOCK_KB)
@@ -512,23 +522,42 @@ static int luks2_keyslot_alloc(struct crypt_device *cd,
}
jobj_keyslot = json_object_new_object();
+ if (!jobj_keyslot) {
+ r = -ENOMEM;
+ goto err;
+ }
+
json_object_object_add(jobj_keyslot, "type", json_object_new_string("luks2"));
json_object_object_add(jobj_keyslot, "key_size", json_object_new_int(volume_key_len));
/* AF object */
jobj_af = json_object_new_object();
+ if (!jobj_af) {
+ r = -ENOMEM;
+ goto err;
+ }
+
json_object_object_add(jobj_af, "type", json_object_new_string("luks1"));
json_object_object_add(jobj_af, "stripes", json_object_new_int(params->af.luks1.stripes));
json_object_object_add(jobj_keyslot, "af", jobj_af);
/* Area object */
jobj_area = json_object_new_object();
+ if (!jobj_area) {
+ r = -ENOMEM;
+ goto err;
+ }
+
json_object_object_add(jobj_area, "type", json_object_new_string("raw"));
json_object_object_add(jobj_area, "offset", crypt_jobj_new_uint64(area_offset));
json_object_object_add(jobj_area, "size", crypt_jobj_new_uint64(area_length));
json_object_object_add(jobj_keyslot, "area", jobj_area);
- json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot);
+ r = json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot);
+ if (r) {
+ json_object_put(jobj_keyslot);
+ return r;
+ }
r = luks2_keyslot_update_json(cd, jobj_keyslot, params);
@@ -541,6 +570,9 @@ static int luks2_keyslot_alloc(struct crypt_device *cd,
json_object_object_del_by_uint(jobj_keyslots, keyslot);
return r;
+err:
+ json_object_put(jobj_keyslot);
+ return r;
}
static int luks2_keyslot_open(struct crypt_device *cd,
diff --git a/lib/luks2/luks2_keyslot_reenc.c b/lib/luks2/luks2_keyslot_reenc.c
index 4291d0c..e847673 100644
--- a/lib/luks2/luks2_keyslot_reenc.c
+++ b/lib/luks2/luks2_keyslot_reenc.c
@@ -1,8 +1,8 @@
/*
* LUKS - Linux Unified Key Setup v2, reencryption keyslot handler
*
- * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2016-2023 Ondrej Kozina
+ * Copyright (C) 2016-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2024 Ondrej Kozina
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -145,7 +145,12 @@ static int reenc_keyslot_alloc(struct crypt_device *cd,
else
json_object_object_add(jobj_keyslot, "direction", json_object_new_string("backward"));
- json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot);
+ r = json_object_object_add_by_uint(jobj_keyslots, keyslot, jobj_keyslot);
+ if (r) {
+ json_object_put(jobj_keyslot);
+ return r;
+ }
+
if (LUKS2_check_json_size(cd, hdr)) {
log_dbg(cd, "New keyslot too large to fit in free metadata space.");
json_object_object_del_by_uint(jobj_keyslots, keyslot);
@@ -371,8 +376,7 @@ static int reenc_keyslot_validate(struct crypt_device *cd, json_object *jobj_key
return 0;
}
-static int reenc_keyslot_update_needed(struct crypt_device *cd,
- json_object *jobj_keyslot,
+static int reenc_keyslot_update_needed(json_object *jobj_keyslot,
const struct crypt_params_reencrypt *params,
size_t alignment)
{
@@ -537,8 +541,7 @@ static int reenc_keyslot_load_resilience(struct crypt_device *cd,
return reenc_keyslot_load_resilience_secondary(cd, type, jobj_area, area_length, rp);
}
-static bool reenc_keyslot_update_is_valid(struct crypt_device *cd,
- json_object *jobj_area,
+static bool reenc_keyslot_update_is_valid(json_object *jobj_area,
const struct crypt_params_reencrypt *params)
{
const char *type;
@@ -589,7 +592,7 @@ static int reenc_keyslot_update(struct crypt_device *cd,
if (!params || !params->resilience)
jobj_area_new = reencrypt_keyslot_area_jobj_update_block_size(cd, jobj_area, alignment);
else {
- if (!reenc_keyslot_update_is_valid(cd, jobj_area, params)) {
+ if (!reenc_keyslot_update_is_valid(jobj_area, params)) {
log_err(cd, _("Invalid reencryption resilience mode change requested."));
return -EINVAL;
}
@@ -661,7 +664,7 @@ int LUKS2_keyslot_reencrypt_update_needed(struct crypt_device *cd,
strcmp(json_object_get_string(jobj_type), "reencrypt"))
return -EINVAL;
- r = reenc_keyslot_update_needed(cd, jobj_keyslot, params, alignment);
+ r = reenc_keyslot_update_needed(jobj_keyslot, params, alignment);
if (!r)
log_dbg(cd, "No update of reencrypt keyslot needed.");
diff --git a/lib/luks2/luks2_luks1_convert.c b/lib/luks2/luks2_luks1_convert.c
index 6d3fa1e..9513217 100644
--- a/lib/luks2/luks2_luks1_convert.c
+++ b/lib/luks2/luks2_luks1_convert.c
@@ -1,9 +1,9 @@
/*
* LUKS - Linux Unified Key Setup v2, LUKS1 conversion code
*
- * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2015-2023 Ondrej Kozina
- * Copyright (C) 2015-2023 Milan Broz
+ * Copyright (C) 2015-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2024 Ondrej Kozina
+ * Copyright (C) 2015-2024 Milan Broz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -67,11 +67,21 @@ static int json_luks1_keyslot(const struct luks_phdr *hdr_v1, int keyslot, struc
int r;
keyslot_obj = json_object_new_object();
+ if (!keyslot_obj) {
+ r = -ENOMEM;
+ goto err;
+ }
+
json_object_object_add(keyslot_obj, "type", json_object_new_string("luks2"));
json_object_object_add(keyslot_obj, "key_size", json_object_new_int64(hdr_v1->keyBytes));
/* KDF */
jobj_kdf = json_object_new_object();
+ if (!jobj_kdf) {
+ r = -ENOMEM;
+ goto err;
+ }
+
json_object_object_add(jobj_kdf, "type", json_object_new_string(CRYPT_KDF_PBKDF2));
json_object_object_add(jobj_kdf, "hash", json_object_new_string(hdr_v1->hashSpec));
json_object_object_add(jobj_kdf, "iterations", json_object_new_int64(hdr_v1->keyblock[keyslot].passwordIterations));
@@ -89,6 +99,11 @@ static int json_luks1_keyslot(const struct luks_phdr *hdr_v1, int keyslot, struc
/* AF */
jobj_af = json_object_new_object();
+ if (!jobj_af) {
+ r = -ENOMEM;
+ goto err;
+ }
+
json_object_object_add(jobj_af, "type", json_object_new_string("luks1"));
json_object_object_add(jobj_af, "hash", json_object_new_string(hdr_v1->hashSpec));
/* stripes field ignored, fixed to LUKS_STRIPES (4000) */
@@ -97,6 +112,11 @@ static int json_luks1_keyslot(const struct luks_phdr *hdr_v1, int keyslot, struc
/* Area */
jobj_area = json_object_new_object();
+ if (!jobj_area) {
+ r = -ENOMEM;
+ goto err;
+ }
+
json_object_object_add(jobj_area, "type", json_object_new_string("raw"));
/* encryption algorithm field */
@@ -124,6 +144,9 @@ static int json_luks1_keyslot(const struct luks_phdr *hdr_v1, int keyslot, struc
*keyslot_object = keyslot_obj;
return 0;
+err:
+ json_object_put(keyslot_obj);
+ return r;
}
static int json_luks1_keyslots(const struct luks_phdr *hdr_v1, struct json_object **keyslots_object)
@@ -143,7 +166,12 @@ static int json_luks1_keyslots(const struct luks_phdr *hdr_v1, struct json_objec
json_object_put(keyslot_obj);
return r;
}
- json_object_object_add_by_uint(keyslot_obj, keyslot, field);
+ r = json_object_object_add_by_uint(keyslot_obj, keyslot, field);
+ if (r) {
+ json_object_put(field);
+ json_object_put(keyslot_obj);
+ return r;
+ }
}
*keyslots_object = keyslot_obj;
@@ -238,7 +266,12 @@ static int json_luks1_segments(const struct luks_phdr *hdr_v1, struct json_objec
json_object_put(segments_obj);
return r;
}
- json_object_object_add_by_uint(segments_obj, 0, field);
+ r = json_object_object_add_by_uint(segments_obj, 0, field);
+ if (r) {
+ json_object_put(field);
+ json_object_put(segments_obj);
+ return r;
+ }
*segments_object = segments_obj;
return 0;
diff --git a/lib/luks2/luks2_reencrypt.c b/lib/luks2/luks2_reencrypt.c
index b0dcd6d..b7af206 100644
--- a/lib/luks2/luks2_reencrypt.c
+++ b/lib/luks2/luks2_reencrypt.c
@@ -1,8 +1,8 @@
/*
* LUKS - Linux Unified Key Setup v2, reencryption helpers
*
- * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2015-2023 Ondrej Kozina
+ * Copyright (C) 2015-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2015-2024 Ondrej Kozina
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -162,6 +162,7 @@ static uint64_t reencrypt_get_data_offset_old(struct luks2_hdr *hdr)
return reencrypt_data_offset(hdr, 0);
}
#endif
+
static int reencrypt_digest(struct luks2_hdr *hdr, unsigned new)
{
int segment = LUKS2_get_segment_id_by_flag(hdr, new ? "backup-final" : "backup-previous");
@@ -182,6 +183,21 @@ int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr)
return reencrypt_digest(hdr, 0);
}
+unsigned LUKS2_reencrypt_vks_count(struct luks2_hdr *hdr)
+{
+ int digest_old, digest_new;
+ unsigned vks_count = 0;
+
+ if ((digest_new = LUKS2_reencrypt_digest_new(hdr)) >= 0)
+ vks_count++;
+ if ((digest_old = LUKS2_reencrypt_digest_old(hdr)) >= 0) {
+ if (digest_old != digest_new)
+ vks_count++;
+ }
+
+ return vks_count;
+}
+
/* none, checksums, journal or shift */
static const char *reencrypt_resilience_type(struct luks2_hdr *hdr)
{
@@ -224,7 +240,7 @@ static const char *reencrypt_resilience_hash(struct luks2_hdr *hdr)
static json_object *_enc_create_segments_shift_after(struct luks2_reencrypt *rh, uint64_t data_offset)
{
int reenc_seg, i = 0;
- json_object *jobj_copy, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object();
+ json_object *jobj, *jobj_copy = NULL, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object();
uint64_t tmp;
if (!rh->jobj_segs_hot || !jobj_segs_post)
@@ -239,17 +255,21 @@ static json_object *_enc_create_segments_shift_after(struct luks2_reencrypt *rh,
while (i < reenc_seg) {
jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, i);
- if (!jobj_copy)
+ if (!jobj_copy || json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy)))
goto err;
- json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
}
+ jobj_copy = NULL;
- if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1), &jobj_seg_new)) {
- if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg), &jobj_seg_new))
+ jobj = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
+ if (!jobj) {
+ jobj = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg);
+ if (!jobj || json_object_copy(jobj, &jobj_seg_new))
goto err;
json_segment_remove_flag(jobj_seg_new, "in-reencryption");
tmp = rh->length;
} else {
+ if (json_object_copy(jobj, &jobj_seg_new))
+ goto err;
json_object_object_add(jobj_seg_new, "offset", crypt_jobj_new_uint64(rh->offset + data_offset));
json_object_object_add(jobj_seg_new, "iv_tweak", crypt_jobj_new_uint64(rh->offset >> SECTOR_SHIFT));
tmp = json_segment_get_size(jobj_seg_new, 0) + rh->length;
@@ -257,10 +277,12 @@ static json_object *_enc_create_segments_shift_after(struct luks2_reencrypt *rh,
/* alter size of new segment, reenc_seg == 0 we're finished */
json_object_object_add(jobj_seg_new, "size", reenc_seg > 0 ? crypt_jobj_new_uint64(tmp) : json_object_new_string("dynamic"));
- json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new);
+ if (!json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new))
+ return jobj_segs_post;
- return jobj_segs_post;
err:
+ json_object_put(jobj_seg_new);
+ json_object_put(jobj_copy);
json_object_put(jobj_segs_post);
return NULL;
}
@@ -271,7 +293,7 @@ static json_object *reencrypt_make_hot_segments_encrypt_shift(struct luks2_hdr *
{
int sg, crypt_seg, i = 0;
uint64_t segment_size;
- json_object *jobj_seg_shrunk, *jobj_seg_new, *jobj_copy, *jobj_enc_seg = NULL,
+ json_object *jobj_seg_shrunk = NULL, *jobj_seg_new = NULL, *jobj_copy = NULL, *jobj_enc_seg = NULL,
*jobj_segs_hot = json_object_new_object();
if (!jobj_segs_hot)
@@ -290,38 +312,41 @@ static json_object *reencrypt_make_hot_segments_encrypt_shift(struct luks2_hdr *
rh->offset >> SECTOR_SHIFT,
&rh->length,
reencrypt_segment_cipher_new(hdr),
+ NULL, /* integrity */
reencrypt_get_sector_size_new(hdr),
1);
while (i < sg) {
jobj_copy = LUKS2_get_segment_jobj(hdr, i);
- if (!jobj_copy)
+ if (!jobj_copy || json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy)))
goto err;
- json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy));
}
+ jobj_copy = NULL;
segment_size = LUKS2_segment_size(hdr, sg, 0);
if (segment_size > rh->length) {
- jobj_seg_shrunk = NULL;
if (json_object_copy(LUKS2_get_segment_jobj(hdr, sg), &jobj_seg_shrunk))
goto err;
json_object_object_add(jobj_seg_shrunk, "size", crypt_jobj_new_uint64(segment_size - rh->length));
- json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_seg_shrunk);
+ if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_seg_shrunk))
+ goto err;
}
- json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_enc_seg);
- jobj_enc_seg = NULL; /* see err: label */
+ if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_enc_seg))
+ goto err;
/* first crypt segment after encryption ? */
if (crypt_seg >= 0) {
jobj_seg_new = LUKS2_get_segment_jobj(hdr, crypt_seg);
- if (!jobj_seg_new)
+ if (!jobj_seg_new || json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new)))
goto err;
- json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new));
}
return jobj_segs_hot;
err:
+ json_object_put(jobj_copy);
+ json_object_put(jobj_seg_new);
+ json_object_put(jobj_seg_shrunk);
json_object_put(jobj_enc_seg);
json_object_put(jobj_segs_hot);
@@ -343,6 +368,7 @@ static json_object *reencrypt_make_segment_new(struct crypt_device *cd,
crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
segment_length,
reencrypt_segment_cipher_new(hdr),
+ NULL, /* integrity */
reencrypt_get_sector_size_new(hdr), 0);
case CRYPT_REENCRYPT_DECRYPT:
return json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
@@ -357,7 +383,7 @@ static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd
uint64_t data_offset)
{
int reenc_seg;
- json_object *jobj_new_seg_after, *jobj_old_seg, *jobj_old_seg_copy = NULL,
+ json_object *jobj_old_seg, *jobj_new_seg_after = NULL, *jobj_old_seg_copy = NULL,
*jobj_segs_post = json_object_new_object();
uint64_t fixed_length = rh->offset + rh->length;
@@ -366,7 +392,7 @@ static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd
reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
if (reenc_seg < 0)
- return NULL;
+ goto err;
jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
@@ -375,24 +401,26 @@ static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd
* Set size to 'dynamic' again.
*/
jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL);
- if (!jobj_new_seg_after)
+ if (!jobj_new_seg_after || json_object_object_add_by_uint_by_ref(jobj_segs_post, 0, &jobj_new_seg_after))
goto err;
- json_object_object_add_by_uint(jobj_segs_post, 0, jobj_new_seg_after);
if (jobj_old_seg) {
if (rh->fixed_length) {
if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy))
goto err;
- jobj_old_seg = jobj_old_seg_copy;
fixed_length = rh->device_size - fixed_length;
- json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(fixed_length));
+ json_object_object_add(jobj_old_seg_copy, "size", crypt_jobj_new_uint64(fixed_length));
} else
- json_object_get(jobj_old_seg);
- json_object_object_add_by_uint(jobj_segs_post, 1, jobj_old_seg);
+ jobj_old_seg_copy = json_object_get(jobj_old_seg);
+
+ if (json_object_object_add_by_uint_by_ref(jobj_segs_post, 1, &jobj_old_seg_copy))
+ goto err;
}
return jobj_segs_post;
err:
+ json_object_put(jobj_new_seg_after);
+ json_object_put(jobj_old_seg_copy);
json_object_put(jobj_segs_post);
return NULL;
}
@@ -405,7 +433,7 @@ static json_object *reencrypt_make_post_segments_backward(struct crypt_device *c
int reenc_seg;
uint64_t fixed_length;
- json_object *jobj_new_seg_after, *jobj_old_seg,
+ json_object *jobj_new_seg_after = NULL, *jobj_old_seg = NULL,
*jobj_segs_post = json_object_new_object();
if (!rh->jobj_segs_hot || !jobj_segs_post)
@@ -413,22 +441,26 @@ static json_object *reencrypt_make_post_segments_backward(struct crypt_device *c
reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
if (reenc_seg < 0)
- return NULL;
+ goto err;
jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg - 1);
- if (jobj_old_seg)
- json_object_object_add_by_uint(jobj_segs_post, reenc_seg - 1, json_object_get(jobj_old_seg));
+ if (jobj_old_seg) {
+ json_object_get(jobj_old_seg);
+ if (json_object_object_add_by_uint_by_ref(jobj_segs_post, reenc_seg - 1, &jobj_old_seg))
+ goto err;
+ }
+
if (rh->fixed_length && rh->offset) {
fixed_length = rh->device_size - rh->offset;
jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length);
} else
jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL);
- if (!jobj_new_seg_after)
- goto err;
- json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after);
- return jobj_segs_post;
+ if (jobj_new_seg_after && !json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after))
+ return jobj_segs_post;
err:
+ json_object_put(jobj_new_seg_after);
+ json_object_put(jobj_old_seg);
json_object_put(jobj_segs_post);
return NULL;
}
@@ -448,6 +480,7 @@ static json_object *reencrypt_make_segment_reencrypt(struct crypt_device *cd,
crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
segment_length,
reencrypt_segment_cipher_new(hdr),
+ NULL, /* integrity */
reencrypt_get_sector_size_new(hdr), 1);
case CRYPT_REENCRYPT_DECRYPT:
return json_segment_create_linear(data_offset + segment_offset, segment_length, 1);
@@ -472,6 +505,7 @@ static json_object *reencrypt_make_segment_old(struct crypt_device *cd,
crypt_get_iv_offset(cd) + (segment_offset >> SECTOR_SHIFT),
segment_length,
reencrypt_segment_cipher_old(hdr),
+ NULL, /* integrity */
reencrypt_get_sector_size_old(hdr),
0);
break;
@@ -488,38 +522,40 @@ static json_object *reencrypt_make_hot_segments_forward(struct crypt_device *cd,
uint64_t device_size,
uint64_t data_offset)
{
- json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
uint64_t fixed_length, tmp = rh->offset + rh->length;
+ json_object *jobj_segs_hot = json_object_new_object(), *jobj_reenc_seg = NULL,
+ *jobj_old_seg = NULL, *jobj_new_seg = NULL;
unsigned int sg = 0;
- jobj_segs_hot = json_object_new_object();
if (!jobj_segs_hot)
return NULL;
if (rh->offset) {
jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, &rh->offset);
- if (!jobj_new_seg)
+ if (!jobj_new_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg))
goto err;
- json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
}
jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
if (!jobj_reenc_seg)
goto err;
- json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
+ if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_reenc_seg))
+ goto err;
if (tmp < device_size) {
fixed_length = device_size - tmp;
jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, data_offset + data_shift_value(&rh->rp),
rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
- if (!jobj_old_seg)
+ if (!jobj_old_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg, &jobj_old_seg))
goto err;
- json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
}
return jobj_segs_hot;
err:
+ json_object_put(jobj_reenc_seg);
+ json_object_put(jobj_old_seg);
+ json_object_put(jobj_new_seg);
json_object_put(jobj_segs_hot);
return NULL;
}
@@ -528,29 +564,31 @@ static json_object *reencrypt_make_hot_segments_decrypt_shift(struct crypt_devic
struct luks2_hdr *hdr, struct luks2_reencrypt *rh,
uint64_t device_size, uint64_t data_offset)
{
- json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
uint64_t fixed_length, tmp = rh->offset + rh->length, linear_length = rh->progress;
+ json_object *jobj, *jobj_segs_hot = json_object_new_object(), *jobj_reenc_seg = NULL,
+ *jobj_old_seg = NULL, *jobj_new_seg = NULL;
unsigned int sg = 0;
- jobj_segs_hot = json_object_new_object();
if (!jobj_segs_hot)
return NULL;
if (rh->offset) {
- jobj_new_seg = LUKS2_get_segment_jobj(hdr, 0);
- if (!jobj_new_seg)
+ jobj = LUKS2_get_segment_jobj(hdr, 0);
+ if (!jobj)
+ goto err;
+
+ jobj_new_seg = json_object_get(jobj);
+ if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg))
goto err;
- json_object_object_add_by_uint(jobj_segs_hot, sg++, json_object_get(jobj_new_seg));
if (linear_length) {
jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh,
data_offset,
- json_segment_get_size(jobj_new_seg, 0),
+ json_segment_get_size(jobj, 0),
0,
&linear_length);
- if (!jobj_new_seg)
+ if (!jobj_new_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg))
goto err;
- json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
}
}
@@ -558,27 +596,29 @@ static json_object *reencrypt_make_hot_segments_decrypt_shift(struct crypt_devic
rh->offset,
rh->offset,
&rh->length);
- if (!jobj_reenc_seg)
+ if (!jobj_reenc_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_reenc_seg))
goto err;
- json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
-
- if (!rh->offset && (jobj_new_seg = LUKS2_get_segment_jobj(hdr, 1)) &&
- !json_segment_is_backup(jobj_new_seg))
- json_object_object_add_by_uint(jobj_segs_hot, sg++, json_object_get(jobj_new_seg));
- else if (tmp < device_size) {
+ if (!rh->offset && (jobj = LUKS2_get_segment_jobj(hdr, 1)) &&
+ !json_segment_is_backup(jobj)) {
+ jobj_new_seg = json_object_get(jobj);
+ if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_new_seg))
+ goto err;
+ } else if (tmp < device_size) {
fixed_length = device_size - tmp;
jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh,
data_offset + data_shift_value(&rh->rp),
rh->offset + rh->length,
rh->fixed_length ? &fixed_length : NULL);
- if (!jobj_old_seg)
+ if (!jobj_old_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg, &jobj_old_seg))
goto err;
- json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
}
return jobj_segs_hot;
err:
+ json_object_put(jobj_reenc_seg);
+ json_object_put(jobj_old_seg);
+ json_object_put(jobj_new_seg);
json_object_put(jobj_segs_hot);
return NULL;
}
@@ -589,7 +629,7 @@ static json_object *_dec_create_segments_shift_after(struct crypt_device *cd,
uint64_t data_offset)
{
int reenc_seg, i = 0;
- json_object *jobj_copy, *jobj_seg_old, *jobj_seg_new,
+ json_object *jobj_seg_old, *jobj_copy = NULL, *jobj_seg_old_copy = NULL, *jobj_seg_new = NULL,
*jobj_segs_post = json_object_new_object();
unsigned segs;
uint64_t tmp;
@@ -607,9 +647,8 @@ static json_object *_dec_create_segments_shift_after(struct crypt_device *cd,
if (reenc_seg == 0) {
jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, NULL);
- if (!jobj_seg_new)
+ if (!jobj_seg_new || json_object_object_add_by_uint(jobj_segs_post, 0, jobj_seg_new))
goto err;
- json_object_object_add_by_uint(jobj_segs_post, 0, jobj_seg_new);
return jobj_segs_post;
}
@@ -617,22 +656,29 @@ static json_object *_dec_create_segments_shift_after(struct crypt_device *cd,
jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, 0);
if (!jobj_copy)
goto err;
- json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
+ json_object_get(jobj_copy);
+ if (json_object_object_add_by_uint_by_ref(jobj_segs_post, i++, &jobj_copy))
+ goto err;
- jobj_seg_old = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
+ if ((jobj_seg_old = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1)))
+ jobj_seg_old_copy = json_object_get(jobj_seg_old);
tmp = rh->length + rh->progress;
jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset,
json_segment_get_size(rh->jobj_segment_moved, 0),
data_shift_value(&rh->rp),
jobj_seg_old ? &tmp : NULL);
- json_object_object_add_by_uint(jobj_segs_post, i++, jobj_seg_new);
+ if (!jobj_seg_new || json_object_object_add_by_uint_by_ref(jobj_segs_post, i++, &jobj_seg_new))
+ goto err;
- if (jobj_seg_old)
- json_object_object_add_by_uint(jobj_segs_post, i, json_object_get(jobj_seg_old));
+ if (jobj_seg_old_copy && json_object_object_add_by_uint(jobj_segs_post, i, jobj_seg_old_copy))
+ goto err;
return jobj_segs_post;
err:
+ json_object_put(jobj_copy);
+ json_object_put(jobj_seg_old_copy);
+ json_object_put(jobj_seg_new);
json_object_put(jobj_segs_post);
return NULL;
}
@@ -643,10 +689,10 @@ static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd
uint64_t device_size,
uint64_t data_offset)
{
- json_object *jobj_reenc_seg, *jobj_new_seg, *jobj_old_seg = NULL,
+ uint64_t fixed_length, tmp = rh->offset + rh->length;
+ json_object *jobj_reenc_seg = NULL, *jobj_new_seg = NULL, *jobj_old_seg = NULL,
*jobj_segs_hot = json_object_new_object();
int sg = 0;
- uint64_t fixed_length, tmp = rh->offset + rh->length;
if (!jobj_segs_hot)
return NULL;
@@ -656,26 +702,27 @@ static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd
goto err;
json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(rh->offset));
- json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_old_seg);
+ if (json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_old_seg))
+ goto err;
}
jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
- if (!jobj_reenc_seg)
+ if (!jobj_reenc_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg++, &jobj_reenc_seg))
goto err;
- json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
-
if (tmp < device_size) {
fixed_length = device_size - tmp;
jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length,
rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
- if (!jobj_new_seg)
+ if (!jobj_new_seg || json_object_object_add_by_uint_by_ref(jobj_segs_hot, sg, &jobj_new_seg))
goto err;
- json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_new_seg);
}
return jobj_segs_hot;
err:
+ json_object_put(jobj_reenc_seg);
+ json_object_put(jobj_new_seg);
+ json_object_put(jobj_old_seg);
json_object_put(jobj_segs_hot);
return NULL;
}
@@ -733,6 +780,7 @@ static int reencrypt_make_post_segments(struct crypt_device *cd,
return rh->jobj_segs_post ? 0 : -EINVAL;
}
#endif
+
static uint64_t reencrypt_data_shift(struct luks2_hdr *hdr)
{
json_object *jobj_keyslot, *jobj_area, *jobj_data_shift;
@@ -847,13 +895,13 @@ void LUKS2_reencrypt_free(struct crypt_device *cd, struct luks2_reencrypt *rh)
free(rh);
}
-int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd,
+#if USE_LUKS2_REENCRYPTION
+int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd __attribute__((unused)),
struct luks2_hdr *hdr,
const struct reenc_protection *rp,
int reencrypt_keyslot,
uint64_t *r_length)
{
-#if USE_LUKS2_REENCRYPTION
int r;
uint64_t dummy, area_length;
@@ -886,11 +934,8 @@ int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd,
}
return -EINVAL;
-#else
- return -ENOTSUP;
-#endif
}
-#if USE_LUKS2_REENCRYPTION
+
static size_t reencrypt_get_alignment(struct crypt_device *cd,
struct luks2_hdr *hdr)
{
@@ -971,7 +1016,6 @@ static int reencrypt_offset_backward_moved(struct luks2_hdr *hdr, json_object *j
}
static int reencrypt_offset_forward_moved(struct luks2_hdr *hdr,
- json_object *jobj_segments,
uint64_t data_shift,
uint64_t *offset)
{
@@ -1049,7 +1093,7 @@ static int reencrypt_offset(struct luks2_hdr *hdr,
if (di == CRYPT_REENCRYPT_FORWARD) {
if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT &&
LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0) {
- r = reencrypt_offset_forward_moved(hdr, jobj_segments, data_shift, offset);
+ r = reencrypt_offset_forward_moved(hdr, data_shift, offset);
if (!r && *offset > device_size)
*offset = device_size;
return r;
@@ -1386,7 +1430,7 @@ static int reencrypt_init_storage_wrappers(struct crypt_device *cd,
static int reencrypt_context_set_names(struct luks2_reencrypt *rh, const char *name)
{
- if (!rh | !name)
+ if (!rh || !name)
return -EINVAL;
if (*name == '/') {
@@ -1964,9 +2008,7 @@ static int reencrypt_set_decrypt_shift_segments(struct crypt_device *cd,
crypt_reencrypt_direction_info di)
{
int r;
- uint64_t first_segment_offset, first_segment_length,
- second_segment_offset, second_segment_length,
- data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
+ uint64_t data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
if (di == CRYPT_REENCRYPT_BACKWARD)
@@ -1976,47 +2018,49 @@ static int reencrypt_set_decrypt_shift_segments(struct crypt_device *cd,
* future data_device layout:
* [encrypted first segment (max data shift size)][gap (data shift size)][second encrypted data segment]
*/
- first_segment_offset = 0;
- first_segment_length = moved_segment_length;
- if (dev_size > moved_segment_length) {
- second_segment_offset = data_offset + first_segment_length;
- second_segment_length = 0;
- }
-
jobj_segments = json_object_new_object();
if (!jobj_segments)
return -ENOMEM;
r = -EINVAL;
- jobj_segment_first = json_segment_create_crypt(first_segment_offset,
- crypt_get_iv_offset(cd), &first_segment_length,
- crypt_get_cipher_spec(cd), crypt_get_sector_size(cd), 0);
+ jobj_segment_first = json_segment_create_crypt(0, crypt_get_iv_offset(cd),
+ &moved_segment_length, crypt_get_cipher_spec(cd),
+ NULL, crypt_get_sector_size(cd), 0);
if (!jobj_segment_first) {
log_dbg(cd, "Failed generate 1st segment.");
- return r;
+ goto err;
}
+ r = json_object_object_add_by_uint_by_ref(jobj_segments, 0, &jobj_segment_first);
+ if (r)
+ goto err;
+
if (dev_size > moved_segment_length) {
- jobj_segment_second = json_segment_create_crypt(second_segment_offset,
- crypt_get_iv_offset(cd) + (first_segment_length >> SECTOR_SHIFT),
- second_segment_length ? &second_segment_length : NULL,
+ jobj_segment_second = json_segment_create_crypt(data_offset + moved_segment_length,
+ crypt_get_iv_offset(cd) + (moved_segment_length >> SECTOR_SHIFT),
+ NULL,
crypt_get_cipher_spec(cd),
+ NULL, /* integrity */
crypt_get_sector_size(cd), 0);
if (!jobj_segment_second) {
- json_object_put(jobj_segment_first);
+ r = -EINVAL;
log_dbg(cd, "Failed generate 2nd segment.");
- return r;
+ goto err;
}
- }
-
- json_object_object_add(jobj_segments, "0", jobj_segment_first);
- if (jobj_segment_second)
- json_object_object_add(jobj_segments, "1", jobj_segment_second);
- r = LUKS2_segments_set(cd, hdr, jobj_segments, 0);
+ r = json_object_object_add_by_uint_by_ref(jobj_segments, 1, &jobj_segment_second);
+ if (r)
+ goto err;
+ }
- return r ?: LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, 0, 1, 0);
+ if (!(r = LUKS2_segments_set(cd, hdr, jobj_segments, 0)))
+ return LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, 0, 1, 0);
+err:
+ json_object_put(jobj_segment_first);
+ json_object_put(jobj_segment_second);
+ json_object_put(jobj_segments);
+ return r;
}
static int reencrypt_make_targets(struct crypt_device *cd,
@@ -2429,6 +2473,7 @@ static int reencrypt_make_backup_segments(struct crypt_device *cd,
uint64_t data_offset,
const struct crypt_params_reencrypt *params)
{
+ const char *type;
int r, segment, moved_segment = -1, digest_old = -1, digest_new = -1;
json_object *jobj_tmp, *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL;
uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
@@ -2460,9 +2505,17 @@ static int reencrypt_make_backup_segments(struct crypt_device *cd,
if (r)
goto err;
moved_segment = segment++;
- json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), moved_segment, jobj_segment_bcp);
- if (!strcmp(json_segment_type(jobj_segment_bcp), "crypt"))
- LUKS2_digest_segment_assign(cd, hdr, moved_segment, digest_old, 1, 0);
+ r = json_object_object_add_by_uint_by_ref(LUKS2_get_segments_jobj(hdr), moved_segment, &jobj_segment_bcp);
+ if (r)
+ goto err;
+
+ if (!(type = json_segment_type(LUKS2_get_segment_jobj(hdr, moved_segment)))) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ if (!strcmp(type, "crypt") && ((r = LUKS2_digest_segment_assign(cd, hdr, moved_segment, digest_old, 1, 0))))
+ goto err;
}
/* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */
@@ -2478,6 +2531,7 @@ static int reencrypt_make_backup_segments(struct crypt_device *cd,
json_segment_get_iv_offset(jobj_tmp),
device_size ? &device_size : NULL,
json_segment_get_cipher(jobj_tmp),
+ NULL, /* integrity */
json_segment_get_sector_size(jobj_tmp),
0);
} else {
@@ -2505,10 +2559,14 @@ static int reencrypt_make_backup_segments(struct crypt_device *cd,
r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous");
if (r)
goto err;
- json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_old);
- jobj_segment_old = NULL;
- if (digest_old >= 0)
- LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0);
+
+ r = json_object_object_add_by_uint_by_ref(LUKS2_get_segments_jobj(hdr), segment, &jobj_segment_old);
+ if (r)
+ goto err;
+
+ if (digest_old >= 0 && (r = LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0)))
+ goto err;
+
segment++;
if (digest_new >= 0) {
@@ -2520,7 +2578,7 @@ static int reencrypt_make_backup_segments(struct crypt_device *cd,
}
jobj_segment_new = json_segment_create_crypt(segment_offset,
crypt_get_iv_offset(cd),
- NULL, cipher, sector_size, 0);
+ NULL, cipher, NULL, sector_size, 0);
} else if (params->mode == CRYPT_REENCRYPT_DECRYPT) {
segment_offset = data_offset;
if (modify_offset(&segment_offset, data_shift, params->direction)) {
@@ -2538,10 +2596,13 @@ static int reencrypt_make_backup_segments(struct crypt_device *cd,
r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final");
if (r)
goto err;
- json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_new);
- jobj_segment_new = NULL;
- if (digest_new >= 0)
- LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0);
+
+ r = json_object_object_add_by_uint_by_ref(LUKS2_get_segments_jobj(hdr), segment, &jobj_segment_new);
+ if (r)
+ goto err;
+
+ if (digest_new >= 0 && (r = LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0)))
+ goto err;
/* FIXME: also check occupied space by keyslot in shrunk area */
if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift &&
@@ -2556,6 +2617,7 @@ static int reencrypt_make_backup_segments(struct crypt_device *cd,
err:
json_object_put(jobj_segment_new);
json_object_put(jobj_segment_old);
+ json_object_put(jobj_segment_bcp);
return r;
}
@@ -2590,7 +2652,6 @@ static int reencrypt_verify_keys(struct crypt_device *cd,
}
static int reencrypt_upload_single_key(struct crypt_device *cd,
- struct luks2_hdr *hdr,
int digest,
struct volume_key *vks)
{
@@ -2615,11 +2676,11 @@ static int reencrypt_upload_keys(struct crypt_device *cd,
return 0;
if (digest_new >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_new(hdr)) &&
- (r = reencrypt_upload_single_key(cd, hdr, digest_new, vks)))
+ (r = reencrypt_upload_single_key(cd, digest_new, vks)))
return r;
if (digest_old >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr)) &&
- (r = reencrypt_upload_single_key(cd, hdr, digest_old, vks))) {
+ (r = reencrypt_upload_single_key(cd, digest_old, vks))) {
crypt_drop_keyring_key(cd, vks);
return r;
}
@@ -3256,7 +3317,17 @@ static int reencrypt_load(struct crypt_device *cd, struct luks2_hdr *hdr,
return 0;
}
+#else
+int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd __attribute__((unused)),
+ struct luks2_hdr *hdr __attribute__((unused)),
+ const struct reenc_protection *rp __attribute__((unused)),
+ int reencrypt_keyslot __attribute__((unused)),
+ uint64_t *r_length __attribute__((unused)))
+{
+ return -ENOTSUP;
+}
#endif
+
static int reencrypt_lock_internal(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock)
{
int r;
@@ -3705,7 +3776,7 @@ out:
return r;
}
-#endif
+
static int reencrypt_init_by_passphrase(struct crypt_device *cd,
const char *name,
const char *passphrase,
@@ -3716,7 +3787,6 @@ static int reencrypt_init_by_passphrase(struct crypt_device *cd,
const char *cipher_mode,
const struct crypt_params_reencrypt *params)
{
-#if USE_LUKS2_REENCRYPTION
int r;
crypt_reencrypt_info ri;
struct volume_key *vks = NULL;
@@ -3778,11 +3848,22 @@ out:
crypt_drop_keyring_key(cd, vks);
crypt_free_volume_key(vks);
return r < 0 ? r : LUKS2_find_keyslot(hdr, "reencrypt");
+}
#else
+static int reencrypt_init_by_passphrase(struct crypt_device *cd,
+ const char *name __attribute__((unused)),
+ const char *passphrase __attribute__((unused)),
+ size_t passphrase_size __attribute__((unused)),
+ int keyslot_old __attribute__((unused)),
+ int keyslot_new __attribute__((unused)),
+ const char *cipher __attribute__((unused)),
+ const char *cipher_mode __attribute__((unused)),
+ const struct crypt_params_reencrypt *params __attribute__((unused)))
+{
log_err(cd, _("This operation is not supported for this device type."));
return -ENOTSUP;
-#endif
}
+#endif
int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
const char *name,
@@ -3797,14 +3878,20 @@ int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
char *passphrase;
size_t passphrase_size;
- if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase_description)
+ if (onlyLUKS2reencrypt(cd) || !passphrase_description)
return -EINVAL;
if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
return -EINVAL;
- r = keyring_get_passphrase(passphrase_description, &passphrase, &passphrase_size);
+ if (device_is_dax(crypt_data_device(cd)) > 0) {
+ log_err(cd, _("Reencryption is not supported for DAX (persistent memory) devices."));
+ return -EINVAL;
+ }
+
+ r = crypt_keyring_get_user_key(cd, passphrase_description, &passphrase, &passphrase_size);
if (r < 0) {
- log_err(cd, _("Failed to read passphrase from keyring (error %d)."), r);
+ log_dbg(cd, "crypt_keyring_get_user_key failed (error %d)", r);
+ log_err(cd, _("Failed to read passphrase from keyring."));
return -EINVAL;
}
@@ -3826,11 +3913,16 @@ int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd,
const char *cipher_mode,
const struct crypt_params_reencrypt *params)
{
- if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase)
+ if (onlyLUKS2reencrypt(cd) || !passphrase)
return -EINVAL;
if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
return -EINVAL;
+ if (device_is_dax(crypt_data_device(cd)) > 0) {
+ log_err(cd, _("Reencryption is not supported for DAX (persistent memory) devices."));
+ return -EINVAL;
+ }
+
return reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
}
@@ -4112,14 +4204,12 @@ static int reencrypt_teardown(struct crypt_device *cd, struct luks2_hdr *hdr,
return r;
}
-#endif
int crypt_reencrypt_run(
struct crypt_device *cd,
int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
void *usrptr)
{
-#if USE_LUKS2_REENCRYPTION
int r;
crypt_reencrypt_info ri;
struct luks2_hdr *hdr;
@@ -4127,7 +4217,7 @@ int crypt_reencrypt_run(
reenc_status_t rs;
bool quit = false;
- if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
+ if (onlyLUKS2reencrypt(cd))
return -EINVAL;
hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
@@ -4180,19 +4270,9 @@ int crypt_reencrypt_run(
r = reencrypt_teardown(cd, hdr, rh, rs, quit, progress, usrptr);
return r;
-#else
- log_err(cd, _("This operation is not supported for this device type."));
- return -ENOTSUP;
-#endif
}
-int crypt_reencrypt(
- struct crypt_device *cd,
- int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
-{
- return crypt_reencrypt_run(cd, progress, NULL);
-}
-#if USE_LUKS2_REENCRYPTION
+
static int reencrypt_recovery(struct crypt_device *cd,
struct luks2_hdr *hdr,
uint64_t device_size,
@@ -4228,7 +4308,27 @@ out:
return r;
}
+#else /* USE_LUKS2_REENCRYPTION */
+int crypt_reencrypt_run(
+ struct crypt_device *cd,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
+ void *usrptr)
+{
+ UNUSED(progress);
+ UNUSED(usrptr);
+
+ log_err(cd, _("This operation is not supported for this device type."));
+ return -ENOTSUP;
+}
#endif
+
+int crypt_reencrypt(
+ struct crypt_device *cd,
+ int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
+{
+ return crypt_reencrypt_run(cd, progress, NULL);
+}
+
/*
* use only for calculation of minimal data device size.
* The real data offset is taken directly from segments!
@@ -4246,7 +4346,7 @@ int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise)
/* internal only */
int LUKS2_reencrypt_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr,
- uint64_t check_size, uint64_t *dev_size, bool activation, bool dynamic)
+ uint64_t check_size, uint64_t *dev_size, bool device_exclusive_check, bool dynamic)
{
int r;
uint64_t data_offset, real_size = 0;
@@ -4255,7 +4355,8 @@ int LUKS2_reencrypt_check_device_size(struct crypt_device *cd, struct luks2_hdr
(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment") || dynamic))
check_size += reencrypt_data_shift(hdr);
- r = device_check_access(cd, crypt_data_device(cd), activation ? DEV_EXCL : DEV_OK);
+ r = device_check_access(cd, crypt_data_device(cd),
+ device_exclusive_check ? DEV_EXCL : DEV_OK);
if (r)
return r;
@@ -4333,6 +4434,39 @@ out:
return r < 0 ? r : keyslot;
}
+
+int LUKS2_reencrypt_locked_recovery_by_vks(struct crypt_device *cd,
+ struct volume_key *vks)
+{
+ uint64_t minimal_size, device_size;
+ int r = -EINVAL;
+ struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
+ struct volume_key *vk = NULL;
+
+ log_dbg(cd, "Entering reencryption crash recovery.");
+
+ if (LUKS2_get_data_size(hdr, &minimal_size, NULL))
+ return r;
+
+ if (crypt_use_keyring_for_vk(cd))
+ vk = vks;
+ while (vk) {
+ r = LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, crypt_volume_key_get_id(vk));
+ if (r < 0)
+ goto out;
+ vk = crypt_volume_key_next(vk);
+ }
+
+ if (LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, false))
+ goto out;
+
+ r = reencrypt_recovery(cd, hdr, device_size, vks);
+
+out:
+ if (r < 0)
+ crypt_drop_keyring_key(cd, vks);
+ return r;
+}
#endif
crypt_reencrypt_info LUKS2_reencrypt_get_params(struct luks2_hdr *hdr,
struct crypt_params_reencrypt *params)
diff --git a/lib/luks2/luks2_reencrypt_digest.c b/lib/luks2/luks2_reencrypt_digest.c
index bc86f54..fcdad12 100644
--- a/lib/luks2/luks2_reencrypt_digest.c
+++ b/lib/luks2/luks2_reencrypt_digest.c
@@ -1,9 +1,9 @@
/*
* LUKS - Linux Unified Key Setup v2, reencryption digest helpers
*
- * Copyright (C) 2022-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2022-2023 Ondrej Kozina
- * Copyright (C) 2022-2023 Milan Broz
+ * Copyright (C) 2022-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2022-2024 Ondrej Kozina
+ * Copyright (C) 2022-2024 Milan Broz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -375,6 +375,22 @@ int LUKS2_keyslot_reencrypt_digest_create(struct crypt_device *cd,
return LUKS2_digest_assign(cd, hdr, keyslot_reencrypt, digest_reencrypt, 1, 0);
}
+void LUKS2_reencrypt_lookup_key_ids(struct crypt_device *cd, struct luks2_hdr *hdr, struct volume_key *vk)
+{
+ int digest_old, digest_new;
+
+ digest_old = LUKS2_reencrypt_digest_old(hdr);
+ digest_new = LUKS2_reencrypt_digest_new(hdr);
+
+ while (vk) {
+ if (digest_old >= 0 && LUKS2_digest_verify_by_digest(cd, digest_old, vk) == digest_old)
+ crypt_volume_key_set_id(vk, digest_old);
+ if (digest_new >= 0 && LUKS2_digest_verify_by_digest(cd, digest_new, vk) == digest_new)
+ crypt_volume_key_set_id(vk, digest_new);
+ vk = vk->next;
+ }
+}
+
int LUKS2_reencrypt_digest_verify(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct volume_key *vks)
diff --git a/lib/luks2/luks2_segment.c b/lib/luks2/luks2_segment.c
index 63e7c14..af87f4f 100644
--- a/lib/luks2/luks2_segment.c
+++ b/lib/luks2/luks2_segment.c
@@ -1,8 +1,8 @@
/*
* LUKS - Linux Unified Key Setup v2, internal segment handling
*
- * Copyright (C) 2018-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2018-2023 Ondrej Kozina
+ * Copyright (C) 2018-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2018-2024 Ondrej Kozina
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -91,6 +91,33 @@ uint64_t json_segment_get_size(json_object *jobj_segment, unsigned blockwise)
return blockwise ? crypt_jobj_get_uint64(jobj) >> SECTOR_SHIFT : crypt_jobj_get_uint64(jobj);
}
+static uint64_t json_segment_get_opal_size(json_object *jobj_segment, unsigned blockwise)
+{
+ json_object *jobj;
+
+ if (!jobj_segment ||
+ !json_object_object_get_ex(jobj_segment, "opal_segment_size", &jobj))
+ return 0;
+
+ return blockwise ? crypt_jobj_get_uint64(jobj) >> SECTOR_SHIFT : crypt_jobj_get_uint64(jobj);
+}
+
+static bool json_segment_set_size(json_object *jobj_segment, const uint64_t *size_bytes)
+{
+ json_object *jobj;
+
+ if (!jobj_segment)
+ return false;
+
+ jobj = size_bytes ? crypt_jobj_new_uint64(*size_bytes) : json_object_new_string("dynamic");
+ if (!jobj)
+ return false;
+
+ json_object_object_add(jobj_segment, "size", jobj);
+
+ return true;
+}
+
const char *json_segment_get_cipher(json_object *jobj_segment)
{
json_object *jobj;
@@ -116,6 +143,37 @@ uint32_t json_segment_get_sector_size(json_object *jobj_segment)
return i < 0 ? SECTOR_SIZE : i;
}
+int json_segment_get_opal_segment_id(json_object *jobj_segment, uint32_t *ret_opal_segment_id)
+{
+ json_object *jobj_segment_id;
+
+ assert(ret_opal_segment_id);
+
+ if (!json_object_object_get_ex(jobj_segment, "opal_segment_number", &jobj_segment_id))
+ return -EINVAL;
+
+ *ret_opal_segment_id = json_object_get_int(jobj_segment_id);
+
+ return 0;
+}
+
+int json_segment_get_opal_key_size(json_object *jobj_segment, size_t *ret_key_size)
+{
+ json_object *jobj_key_size;
+
+ assert(ret_key_size);
+
+ if (!jobj_segment)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_segment, "opal_key_size", &jobj_key_size))
+ return -EINVAL;
+
+ *ret_key_size = json_object_get_int(jobj_key_size);
+
+ return 0;
+}
+
static json_object *json_segment_get_flags(json_object *jobj_segment)
{
json_object *jobj;
@@ -245,24 +303,94 @@ json_object *json_segment_create_linear(uint64_t offset, const uint64_t *length,
return jobj;
}
+static bool json_add_crypt_fields(json_object *jobj_segment, uint64_t iv_offset,
+ const char *cipher, const char *integrity,
+ uint32_t sector_size, unsigned reencryption)
+{
+ json_object *jobj_integrity;
+
+ assert(cipher);
+
+ json_object_object_add(jobj_segment, "iv_tweak", crypt_jobj_new_uint64(iv_offset));
+ json_object_object_add(jobj_segment, "encryption", json_object_new_string(cipher));
+ json_object_object_add(jobj_segment, "sector_size", json_object_new_int(sector_size));
+
+ if (integrity) {
+ jobj_integrity = json_object_new_object();
+ if (!jobj_integrity)
+ return false;
+
+ json_object_object_add(jobj_integrity, "type", json_object_new_string(integrity));
+ json_object_object_add(jobj_integrity, "journal_encryption", json_object_new_string("none"));
+ json_object_object_add(jobj_integrity, "journal_integrity", json_object_new_string("none"));
+ json_object_object_add(jobj_segment, "integrity", jobj_integrity);
+ }
+
+ if (reencryption)
+ LUKS2_segment_set_flag(jobj_segment, "in-reencryption");
+
+ return true;
+}
+
json_object *json_segment_create_crypt(uint64_t offset,
uint64_t iv_offset, const uint64_t *length,
- const char *cipher, uint32_t sector_size,
- unsigned reencryption)
+ const char *cipher, const char *integrity,
+ uint32_t sector_size, unsigned reencryption)
{
json_object *jobj = _segment_create_generic("crypt", offset, length);
+
if (!jobj)
return NULL;
- json_object_object_add(jobj, "iv_tweak", crypt_jobj_new_uint64(iv_offset));
- json_object_object_add(jobj, "encryption", json_object_new_string(cipher));
- json_object_object_add(jobj, "sector_size", json_object_new_int(sector_size));
- if (reencryption)
- LUKS2_segment_set_flag(jobj, "in-reencryption");
+ if (json_add_crypt_fields(jobj, iv_offset, cipher, integrity, sector_size, reencryption))
+ return jobj;
+
+ json_object_put(jobj);
+ return NULL;
+}
+
+static void json_add_opal_fields(json_object *jobj_segment, const uint64_t *length,
+ uint32_t segment_number, uint32_t key_size)
+{
+ assert(jobj_segment);
+ assert(length);
+
+ json_object_object_add(jobj_segment, "opal_segment_number", json_object_new_int(segment_number));
+ json_object_object_add(jobj_segment, "opal_key_size", json_object_new_int(key_size));
+ json_object_object_add(jobj_segment, "opal_segment_size", crypt_jobj_new_uint64(*length));
+}
+
+json_object *json_segment_create_opal(uint64_t offset, const uint64_t *length,
+ uint32_t segment_number, uint32_t key_size)
+{
+ json_object *jobj = _segment_create_generic("hw-opal", offset, length);
+ if (!jobj)
+ return NULL;
+
+ json_add_opal_fields(jobj, length, segment_number, key_size);
return jobj;
}
+json_object *json_segment_create_opal_crypt(uint64_t offset, const uint64_t *length,
+ uint32_t segment_number, uint32_t key_size,
+ uint64_t iv_offset, const char *cipher,
+ const char *integrity, uint32_t sector_size,
+ unsigned reencryption)
+{
+ json_object *jobj = _segment_create_generic("hw-opal-crypt", offset, length);
+ if (!jobj)
+ return NULL;
+
+ json_add_opal_fields(jobj, length, segment_number, key_size);
+
+ if (json_add_crypt_fields(jobj, iv_offset, cipher, integrity, sector_size, reencryption))
+ return jobj;
+
+ json_object_put(jobj);
+ return NULL;
+}
+
uint64_t LUKS2_segment_offset(struct luks2_hdr *hdr, int segment, unsigned blockwise)
{
return json_segment_get_offset(LUKS2_get_segment_jobj(hdr, segment), blockwise);
@@ -288,11 +416,85 @@ uint64_t LUKS2_segment_size(struct luks2_hdr *hdr, int segment, unsigned blockwi
return json_segment_get_size(LUKS2_get_segment_jobj(hdr, segment), blockwise);
}
+uint64_t LUKS2_opal_segment_size(struct luks2_hdr *hdr, int segment, unsigned blockwise)
+{
+ return json_segment_get_opal_size(LUKS2_get_segment_jobj(hdr, segment), blockwise);
+}
+
+bool LUKS2_segment_set_size(struct luks2_hdr *hdr, int segment, const uint64_t *segment_size_bytes)
+{
+ return json_segment_set_size(LUKS2_get_segment_jobj(hdr, segment), segment_size_bytes);
+}
+
int LUKS2_segment_is_type(struct luks2_hdr *hdr, int segment, const char *type)
{
return !strcmp(json_segment_type(LUKS2_get_segment_jobj(hdr, segment)) ?: "", type);
}
+static bool json_segment_is_hw_opal_only(json_object *jobj_segment)
+{
+ const char *type = json_segment_type(jobj_segment);
+
+ if (!type)
+ return false;
+
+ return !strcmp(type, "hw-opal");
+}
+
+static bool json_segment_is_hw_opal_crypt(json_object *jobj_segment)
+{
+ const char *type = json_segment_type(jobj_segment);
+
+ if (!type)
+ return false;
+
+ return !strcmp(type, "hw-opal-crypt");
+}
+
+static bool json_segment_is_hw_opal(json_object *jobj_segment)
+{
+ return json_segment_is_hw_opal_crypt(jobj_segment) ||
+ json_segment_is_hw_opal_only(jobj_segment);
+}
+
+bool LUKS2_segment_is_hw_opal_only(struct luks2_hdr *hdr, int segment)
+{
+ return json_segment_is_hw_opal_only(LUKS2_get_segment_jobj(hdr, segment));
+}
+
+bool LUKS2_segment_is_hw_opal_crypt(struct luks2_hdr *hdr, int segment)
+{
+ return json_segment_is_hw_opal_crypt(LUKS2_get_segment_jobj(hdr, segment));
+}
+
+bool LUKS2_segment_is_hw_opal(struct luks2_hdr *hdr, int segment)
+{
+ return json_segment_is_hw_opal(LUKS2_get_segment_jobj(hdr, segment));
+}
+
+int LUKS2_get_opal_segment_number(struct luks2_hdr *hdr, int segment, uint32_t *ret_opal_segment_number)
+{
+ json_object *jobj_segment = LUKS2_get_segment_jobj(hdr, segment);
+
+ assert(ret_opal_segment_number);
+
+ if (!json_segment_is_hw_opal(jobj_segment))
+ return -ENOENT;
+
+ return json_segment_get_opal_segment_id(jobj_segment, ret_opal_segment_number);
+}
+
+int LUKS2_get_opal_key_size(struct luks2_hdr *hdr, int segment)
+{
+ size_t key_size = 0;
+ json_object *jobj_segment = LUKS2_get_segment_jobj(hdr, segment);
+
+ if (json_segment_get_opal_key_size(jobj_segment, &key_size) < 0)
+ return 0;
+
+ return key_size;
+}
+
int LUKS2_last_segment_by_type(struct luks2_hdr *hdr, const char *type)
{
json_object *jobj_segments;
@@ -424,3 +626,27 @@ bool json_segment_cmp(json_object *jobj_segment_1, json_object *jobj_segment_2)
return true;
}
+
+bool LUKS2_segments_dynamic_size(struct luks2_hdr *hdr)
+{
+ json_object *jobj_segments, *jobj_size;
+
+ assert(hdr);
+
+ jobj_segments = LUKS2_get_segments_jobj(hdr);
+ if (!jobj_segments)
+ return false;
+
+ json_object_object_foreach(jobj_segments, key, val) {
+ UNUSED(key);
+
+ if (json_segment_is_backup(val))
+ continue;
+
+ if (json_object_object_get_ex(val, "size", &jobj_size) &&
+ !strcmp(json_object_get_string(jobj_size), "dynamic"))
+ return true;
+ }
+
+ return false;
+}
diff --git a/lib/luks2/luks2_token.c b/lib/luks2/luks2_token.c
index 5f65918..9c09be2 100644
--- a/lib/luks2/luks2_token.c
+++ b/lib/luks2/luks2_token.c
@@ -1,8 +1,8 @@
/*
* LUKS - Linux Unified Key Setup v2, token handling
*
- * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2016-2023 Milan Broz
+ * Copyright (C) 2016-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2024 Milan Broz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -25,7 +25,9 @@
#include "luks2_internal.h"
#if USE_EXTERNAL_TOKENS
+#define TOKENS_PATH_MAX PATH_MAX
static bool external_tokens_enabled = true;
+static char external_tokens_path[TOKENS_PATH_MAX] = EXTERNAL_LUKS2_TOKENS_PATH;
#else
static bool external_tokens_enabled = false;
#endif
@@ -51,31 +53,37 @@ void crypt_token_external_disable(void)
const char *crypt_token_external_path(void)
{
- return external_tokens_enabled ? EXTERNAL_LUKS2_TOKENS_PATH : NULL;
+#if USE_EXTERNAL_TOKENS
+ return external_tokens_enabled ? external_tokens_path : NULL;
+#else
+ return NULL;
+#endif
}
#if USE_EXTERNAL_TOKENS
-static void *token_dlvsym(struct crypt_device *cd,
- void *handle,
- const char *symbol,
- const char *version)
+int crypt_token_set_external_path(const char *path)
{
- char *error;
- void *sym;
+ int r;
+ char tokens_path[TOKENS_PATH_MAX];
-#ifdef HAVE_DLVSYM
- log_dbg(cd, "Loading symbol %s@%s.", symbol, version);
- sym = dlvsym(handle, symbol, version);
-#else
- log_dbg(cd, "Loading default version of symbol %s.", symbol);
- sym = dlsym(handle, symbol);
-#endif
- error = dlerror();
+ if (!path)
+ path = EXTERNAL_LUKS2_TOKENS_PATH;
+ else if (*path != '/')
+ return -EINVAL;
- if (error)
- log_dbg(cd, "%s", error);
+ r = snprintf(tokens_path, sizeof(tokens_path), "%s", path);
+ if (r < 0 || (size_t)r >= sizeof(tokens_path))
+ return -EINVAL;
- return sym;
+ (void)strcpy(external_tokens_path, tokens_path);
+
+ return 0;
+}
+#else
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+int crypt_token_set_external_path(const char *path)
+{
+ return -ENOTSUP;
}
#endif
@@ -98,6 +106,29 @@ static bool token_validate_v1(struct crypt_device *cd, const crypt_token_handler
}
#if USE_EXTERNAL_TOKENS
+static void *token_dlvsym(struct crypt_device *cd,
+ void *handle,
+ const char *symbol,
+ const char *version)
+{
+ char *error;
+ void *sym;
+
+#ifdef HAVE_DLVSYM
+ log_dbg(cd, "Loading symbol %s@%s.", symbol, version);
+ sym = dlvsym(handle, symbol, version);
+#else
+ log_dbg(cd, "Loading default version of symbol %s.", symbol);
+ sym = dlsym(handle, symbol);
+#endif
+ error = dlerror();
+
+ if (error)
+ log_dbg(cd, "%s", error);
+
+ return sym;
+}
+
static bool token_validate_v2(struct crypt_device *cd, const struct crypt_token_handler_internal *h)
{
if (!h)
@@ -127,12 +158,10 @@ static bool external_token_name_valid(const char *name)
return true;
}
-#endif
static int
crypt_token_load_external(struct crypt_device *cd, const char *name, struct crypt_token_handler_internal *ret)
{
-#if USE_EXTERNAL_TOKENS
struct crypt_token_handler_v2 *token;
void *h;
char buf[PATH_MAX];
@@ -192,11 +221,40 @@ crypt_token_load_external(struct crypt_device *cd, const char *name, struct cryp
ret->version = 2;
return 0;
-#else
+}
+
+void crypt_token_unload_external_all(struct crypt_device *cd)
+{
+ int i;
+
+ for (i = LUKS2_TOKENS_MAX - 1; i >= 0; i--) {
+ if (token_handlers[i].version < 2)
+ continue;
+
+ log_dbg(cd, "Unloading %s token handler.", token_handlers[i].u.v2.name);
+
+ free(CONST_CAST(void *)token_handlers[i].u.v2.name);
+
+ if (dlclose(CONST_CAST(void *)token_handlers[i].u.v2.dlhandle))
+ log_dbg(cd, "%s", dlerror());
+ }
+}
+
+#else /* USE_EXTERNAL_TOKENS */
+
+static int crypt_token_load_external(struct crypt_device *cd __attribute__((unused)),
+ const char *name __attribute__((unused)),
+ struct crypt_token_handler_internal *ret __attribute__((unused)))
+{
return -ENOTSUP;
-#endif
}
+void crypt_token_unload_external_all(struct crypt_device *cd __attribute__((unused)))
+{
+}
+
+#endif
+
static int is_builtin_candidate(const char *type)
{
return !strncmp(type, LUKS2_BUILTIN_TOKEN_PREFIX, LUKS2_BUILTIN_TOKEN_PREFIX_LEN);
@@ -243,25 +301,6 @@ int crypt_token_register(const crypt_token_handler *handler)
return 0;
}
-void crypt_token_unload_external_all(struct crypt_device *cd)
-{
-#if USE_EXTERNAL_TOKENS
- int i;
-
- for (i = LUKS2_TOKENS_MAX - 1; i >= 0; i--) {
- if (token_handlers[i].version < 2)
- continue;
-
- log_dbg(cd, "Unloading %s token handler.", token_handlers[i].u.v2.name);
-
- free(CONST_CAST(void *)token_handlers[i].u.v2.name);
-
- if (dlclose(CONST_CAST(void *)token_handlers[i].u.v2.dlhandle))
- log_dbg(cd, "%s", dlerror());
- }
-#endif
-}
-
static const void
*LUKS2_token_handler_type(struct crypt_device *cd, const char *type)
{
@@ -423,12 +462,12 @@ static const char *token_json_to_string(json_object *jobj_token)
JSON_C_TO_STRING_PLAIN | JSON_C_TO_STRING_NOSLASHESCAPE);
}
-static int token_is_usable(struct luks2_hdr *hdr, json_object *jobj_token, int segment,
+static int token_is_usable(struct luks2_hdr *hdr, json_object *jobj_token, int keyslot, int segment,
crypt_keyslot_priority minimal_priority, bool requires_keyslot)
{
crypt_keyslot_priority keyslot_priority;
json_object *jobj_array;
- int i, keyslot, len, r = -ENOENT;
+ int i, slot, len, r = -ENOENT;
if (!jobj_token)
return -EINVAL;
@@ -451,16 +490,19 @@ static int token_is_usable(struct luks2_hdr *hdr, json_object *jobj_token, int s
return -ENOENT;
for (i = 0; i < len; i++) {
- keyslot = atoi(json_object_get_string(json_object_array_get_idx(jobj_array, i)));
+ slot = atoi(json_object_get_string(json_object_array_get_idx(jobj_array, i)));
+
+ if (keyslot != CRYPT_ANY_SLOT && slot != keyslot)
+ continue;
- keyslot_priority = LUKS2_keyslot_priority_get(hdr, keyslot);
+ keyslot_priority = LUKS2_keyslot_priority_get(hdr, slot);
if (keyslot_priority == CRYPT_SLOT_PRIORITY_INVALID)
return -EINVAL;
if (keyslot_priority < minimal_priority)
continue;
- r = LUKS2_keyslot_for_segment(hdr, keyslot, segment);
+ r = LUKS2_keyslot_for_segment(hdr, slot, segment);
if (r != -ENOENT)
return r;
}
@@ -480,6 +522,7 @@ static int translate_errno(struct crypt_device *cd, int ret_val, const char *typ
static int token_open(struct crypt_device *cd,
struct luks2_hdr *hdr,
+ int keyslot,
int token,
json_object *jobj_token,
const char *type,
@@ -507,7 +550,7 @@ static int token_open(struct crypt_device *cd,
return -ENOENT;
}
- r = token_is_usable(hdr, jobj_token, segment, priority, requires_keyslot);
+ r = token_is_usable(hdr, jobj_token, keyslot, segment, priority, requires_keyslot);
if (r < 0) {
if (r == -ENOENT)
log_dbg(cd, "Token %d unusable for segment %d with desired keyslot priority %d.",
@@ -569,32 +612,22 @@ static void update_return_errno(int r, int *stored)
*stored = r;
}
-static int LUKS2_keyslot_open_by_token(struct crypt_device *cd,
+static int try_token_keyslot_unlock(struct crypt_device *cd,
struct luks2_hdr *hdr,
+ const char *type,
+ json_object *jobj_token_keyslots,
int token,
int segment,
crypt_keyslot_priority priority,
const char *buffer,
size_t buffer_len,
- struct volume_key **vk)
+ struct volume_key **r_vk)
{
+ json_object *jobj;
crypt_keyslot_priority keyslot_priority;
- json_object *jobj_token, *jobj_token_keyslots, *jobj_type, *jobj;
- unsigned int num = 0;
int i, r = -ENOENT, stored_retval = -ENOENT;
+ unsigned int num = 0;
- jobj_token = LUKS2_get_token_jobj(hdr, token);
- if (!jobj_token)
- return -EINVAL;
-
- if (!json_object_object_get_ex(jobj_token, "type", &jobj_type))
- return -EINVAL;
-
- json_object_object_get_ex(jobj_token, "keyslots", &jobj_token_keyslots);
- if (!jobj_token_keyslots)
- return -EINVAL;
-
- /* Try to open keyslot referenced in token */
for (i = 0; i < (int) json_object_array_length(jobj_token_keyslots) && r < 0; i++) {
jobj = json_object_array_get_idx(jobj_token_keyslots, i);
num = atoi(json_object_get_string(jobj));
@@ -604,8 +637,8 @@ static int LUKS2_keyslot_open_by_token(struct crypt_device *cd,
if (keyslot_priority < priority)
continue;
log_dbg(cd, "Trying to open keyslot %u with token %d (type %s).",
- num, token, json_object_get_string(jobj_type));
- r = LUKS2_keyslot_open(cd, num, segment, buffer, buffer_len, vk);
+ num, token, type);
+ r = LUKS2_keyslot_open(cd, num, segment, buffer, buffer_len, r_vk);
/* short circuit on fatal error */
if (r < 0 && r != -EPERM && r != -ENOENT)
return r;
@@ -620,6 +653,53 @@ static int LUKS2_keyslot_open_by_token(struct crypt_device *cd,
return num;
}
+static int LUKS2_keyslot_open_by_token(struct crypt_device *cd,
+ struct luks2_hdr *hdr,
+ int keyslot,
+ int token,
+ int segment,
+ crypt_keyslot_priority min_priority,
+ const char *buffer,
+ size_t buffer_len,
+ struct volume_key **vk)
+{
+ json_object *jobj_token, *jobj_token_keyslots, *jobj_type;
+ crypt_keyslot_priority priority = CRYPT_SLOT_PRIORITY_PREFER;
+ int r = -ENOENT, stored_retval = -ENOENT;
+
+ jobj_token = LUKS2_get_token_jobj(hdr, token);
+ if (!jobj_token)
+ return -EINVAL;
+
+ if (!json_object_object_get_ex(jobj_token, "type", &jobj_type))
+ return -EINVAL;
+
+ json_object_object_get_ex(jobj_token, "keyslots", &jobj_token_keyslots);
+ if (!jobj_token_keyslots)
+ return -EINVAL;
+
+ /* with specific keyslot just ignore priorities and unlock */
+ if (keyslot != CRYPT_ANY_SLOT) {
+ log_dbg(cd, "Trying to open keyslot %u with token %d (type %s).",
+ keyslot, token, json_object_get_string(jobj_type));
+ return LUKS2_keyslot_open(cd, keyslot, segment, buffer, buffer_len, vk);
+ }
+
+ /* Try to open keyslot referenced in token */
+ while (priority >= min_priority) {
+ r = try_token_keyslot_unlock(cd, hdr, json_object_get_string(jobj_type),
+ jobj_token_keyslots, token, segment,
+ priority, buffer, buffer_len, vk);
+ if (r == -EINVAL || r >= 0)
+ return r;
+ if (r == -EPERM)
+ stored_retval = r;
+ priority--;
+ }
+
+ return stored_retval;
+}
+
static bool token_is_blocked(int token, uint32_t *block_list)
{
/* it is safe now, but have assert in case LUKS2_TOKENS_MAX grows */
@@ -640,6 +720,7 @@ static int token_open_priority(struct crypt_device *cd,
struct luks2_hdr *hdr,
json_object *jobj_tokens,
const char *type,
+ int keyslot,
int segment,
crypt_keyslot_priority priority,
const char *pin,
@@ -660,9 +741,10 @@ static int token_open_priority(struct crypt_device *cd,
token = atoi(slot);
if (token_is_blocked(token, block_list))
continue;
- r = token_open(cd, hdr, token, val, type, segment, priority, pin, pin_size, &buffer, &buffer_size, usrptr, true);
+ r = token_open(cd, hdr, keyslot, token, val, type, segment, priority, pin, pin_size,
+ &buffer, &buffer_size, usrptr, true);
if (!r) {
- r = LUKS2_keyslot_open_by_token(cd, hdr, token, segment, priority,
+ r = LUKS2_keyslot_open_by_token(cd, hdr, keyslot, token, segment, priority,
buffer, buffer_size, vk);
LUKS2_token_buffer_free(cd, token, buffer, buffer_size);
}
@@ -679,8 +761,9 @@ static int token_open_priority(struct crypt_device *cd,
return *stored_retval;
}
-static int token_open_any(struct crypt_device *cd, struct luks2_hdr *hdr, const char *type, int segment,
- const char *pin, size_t pin_size, void *usrptr, struct volume_key **vk)
+static int token_open_any(struct crypt_device *cd, struct luks2_hdr *hdr, const char *type,
+ int keyslot, int segment, const char *pin, size_t pin_size, void *usrptr,
+ struct volume_key **vk)
{
json_object *jobj_tokens;
int r, retval = -ENOENT;
@@ -692,17 +775,22 @@ static int token_open_any(struct crypt_device *cd, struct luks2_hdr *hdr, const
if (!type)
usrptr = NULL;
- r = token_open_priority(cd, hdr, jobj_tokens, type, segment, CRYPT_SLOT_PRIORITY_PREFER,
+ if (keyslot != CRYPT_ANY_SLOT)
+ return token_open_priority(cd, hdr, jobj_tokens, type, keyslot, segment, CRYPT_SLOT_PRIORITY_IGNORE,
+ pin, pin_size, usrptr, &retval, &blocked, vk);
+
+ r = token_open_priority(cd, hdr, jobj_tokens, type, keyslot, segment, CRYPT_SLOT_PRIORITY_PREFER,
pin, pin_size, usrptr, &retval, &blocked, vk);
if (break_loop_retval(r))
return r;
- return token_open_priority(cd, hdr, jobj_tokens, type, segment, CRYPT_SLOT_PRIORITY_NORMAL,
+ return token_open_priority(cd, hdr, jobj_tokens, type, keyslot, segment, CRYPT_SLOT_PRIORITY_NORMAL,
pin, pin_size, usrptr, &retval, &blocked, vk);
}
int LUKS2_token_unlock_key(struct crypt_device *cd,
struct luks2_hdr *hdr,
+ int keyslot,
int token,
const char *type,
const char *pin,
@@ -714,6 +802,7 @@ int LUKS2_token_unlock_key(struct crypt_device *cd,
char *buffer;
size_t buffer_size;
json_object *jobj_token;
+ crypt_keyslot_priority min_priority;
int r = -ENOENT;
assert(vk);
@@ -724,13 +813,27 @@ int LUKS2_token_unlock_key(struct crypt_device *cd,
if (segment < 0 && segment != CRYPT_ANY_SEGMENT)
return -EINVAL;
+ if (keyslot != CRYPT_ANY_SLOT || token != CRYPT_ANY_TOKEN)
+ min_priority = CRYPT_SLOT_PRIORITY_IGNORE;
+ else
+ min_priority = CRYPT_SLOT_PRIORITY_NORMAL;
+
+ if (keyslot != CRYPT_ANY_SLOT) {
+ r = LUKS2_keyslot_for_segment(hdr, keyslot, segment);
+ if (r < 0) {
+ if (r == -ENOENT)
+ log_dbg(cd, "Keyslot %d unusable for segment %d.", keyslot, segment);
+ return r;
+ }
+ }
+
if (token >= 0 && token < LUKS2_TOKENS_MAX) {
if ((jobj_token = LUKS2_get_token_jobj(hdr, token))) {
- r = token_open(cd, hdr, token, jobj_token, type, segment, CRYPT_SLOT_PRIORITY_IGNORE,
+ r = token_open(cd, hdr, keyslot, token, jobj_token, type, segment, min_priority,
pin, pin_size, &buffer, &buffer_size, usrptr, true);
if (!r) {
- r = LUKS2_keyslot_open_by_token(cd, hdr, token, segment, CRYPT_SLOT_PRIORITY_IGNORE,
- buffer, buffer_size, vk);
+ r = LUKS2_keyslot_open_by_token(cd, hdr, keyslot, token, segment,
+ min_priority, buffer, buffer_size, vk);
LUKS2_token_buffer_free(cd, token, buffer, buffer_size);
}
}
@@ -745,7 +848,7 @@ int LUKS2_token_unlock_key(struct crypt_device *cd,
* success (>= 0) or any other negative errno short-circuits token activation loop
* immediately
*/
- r = token_open_any(cd, hdr, type, segment, pin, pin_size, usrptr, vk);
+ r = token_open_any(cd, hdr, type, keyslot, segment, pin, pin_size, usrptr, vk);
else
r = -EINVAL;
@@ -754,6 +857,7 @@ int LUKS2_token_unlock_key(struct crypt_device *cd,
int LUKS2_token_open_and_activate(struct crypt_device *cd,
struct luks2_hdr *hdr,
+ int keyslot,
int token,
const char *name,
const char *type,
@@ -763,15 +867,15 @@ int LUKS2_token_open_and_activate(struct crypt_device *cd,
void *usrptr)
{
bool use_keyring;
- int keyslot, r, segment;
- struct volume_key *vk = NULL;
+ int r, segment;
+ struct volume_key *p_crypt, *p_opal, *crypt_key = NULL, *opal_key = NULL, *vk = NULL;
if (flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY)
segment = CRYPT_ANY_SEGMENT;
else
segment = CRYPT_DEFAULT_SEGMENT;
- r = LUKS2_token_unlock_key(cd, hdr, token, type, pin, pin_size, segment, usrptr, &vk);
+ r = LUKS2_token_unlock_key(cd, hdr, keyslot, token, type, pin, pin_size, segment, usrptr, &vk);
if (r < 0)
return r;
@@ -779,23 +883,39 @@ int LUKS2_token_open_and_activate(struct crypt_device *cd,
keyslot = r;
- if (!crypt_use_keyring_for_vk(cd))
+ if (LUKS2_segment_is_hw_opal(hdr, CRYPT_DEFAULT_SEGMENT)) {
+ r = LUKS2_split_crypt_and_opal_keys(cd, hdr, vk, &crypt_key, &opal_key);
+ if (r < 0) {
+ crypt_free_volume_key(vk);
+ return r;
+ }
+
+ p_crypt = crypt_key;
+ p_opal = opal_key ?: vk;
+ } else {
+ p_crypt = vk;
+ p_opal = NULL;
+ }
+
+ if (!crypt_use_keyring_for_vk(cd) || !p_crypt)
use_keyring = false;
else
use_keyring = ((name && !crypt_is_cipher_null(crypt_get_cipher(cd))) ||
(flags & CRYPT_ACTIVATE_KEYRING_KEY));
if (use_keyring) {
- if (!(r = LUKS2_volume_key_load_in_keyring_by_keyslot(cd, hdr, vk, keyslot)))
+ if (!(r = LUKS2_volume_key_load_in_keyring_by_keyslot(cd, hdr, p_crypt, keyslot)))
flags |= CRYPT_ACTIVATE_KEYRING_KEY;
}
if (r >= 0 && name)
- r = LUKS2_activate(cd, name, vk, flags);
+ r = LUKS2_activate(cd, name, p_crypt, p_opal, flags);
if (r < 0)
- crypt_drop_keyring_key(cd, vk);
+ crypt_drop_keyring_key(cd, p_crypt);
crypt_free_volume_key(vk);
+ crypt_free_volume_key(crypt_key);
+ crypt_free_volume_key(opal_key);
return r < 0 ? r : keyslot;
}
@@ -995,8 +1115,9 @@ int LUKS2_token_unlock_passphrase(struct crypt_device *cd,
if (token >= 0 && token < LUKS2_TOKENS_MAX) {
if ((jobj_token = LUKS2_get_token_jobj(hdr, token)))
- r = token_open(cd, hdr, token, jobj_token, type, CRYPT_ANY_SEGMENT, CRYPT_SLOT_PRIORITY_IGNORE,
- pin, pin_size, &buffer, &buffer_size, usrptr, false);
+ r = token_open(cd, hdr, CRYPT_ANY_SLOT, token, jobj_token, type,
+ CRYPT_ANY_SEGMENT, CRYPT_SLOT_PRIORITY_IGNORE, pin, pin_size,
+ &buffer, &buffer_size, usrptr, false);
} else if (token == CRYPT_ANY_TOKEN) {
json_object_object_get_ex(hdr->jobj, "tokens", &jobj_tokens);
@@ -1005,7 +1126,7 @@ int LUKS2_token_unlock_passphrase(struct crypt_device *cd,
json_object_object_foreach(jobj_tokens, slot, val) {
token = atoi(slot);
- r = token_open(cd, hdr, token, val, type, CRYPT_ANY_SEGMENT, CRYPT_SLOT_PRIORITY_IGNORE,
+ r = token_open(cd, hdr, CRYPT_ANY_SLOT, token, val, type, CRYPT_ANY_SEGMENT, CRYPT_SLOT_PRIORITY_IGNORE,
pin, pin_size, &buffer, &buffer_size, usrptr, false);
/*
diff --git a/lib/luks2/luks2_token_keyring.c b/lib/luks2/luks2_token_keyring.c
index ad18798..1d141b9 100644
--- a/lib/luks2/luks2_token_keyring.c
+++ b/lib/luks2/luks2_token_keyring.c
@@ -1,8 +1,8 @@
/*
* LUKS - Linux Unified Key Setup v2, kernel keyring token
*
- * Copyright (C) 2016-2023 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2016-2023 Ondrej Kozina
+ * Copyright (C) 2016-2024 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2016-2024 Ondrej Kozina
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -40,14 +40,11 @@ int keyring_open(struct crypt_device *cd,
json_object_object_get_ex(jobj_token, "key_description", &jobj_key);
- r = keyring_get_passphrase(json_object_get_string(jobj_key), buffer, buffer_len);
- if (r == -ENOTSUP) {
- log_dbg(cd, "Kernel keyring features disabled.");
+ r = crypt_keyring_get_user_key(cd, json_object_get_string(jobj_key), buffer, buffer_len);
+ if (r == -ENOTSUP)
return -ENOENT;
- } else if (r < 0) {
- log_dbg(cd, "keyring_get_passphrase failed (error %d)", r);
+ else if (r < 0)
return -EPERM;
- }
return 0;
}