summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 17:13:42 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 17:13:42 +0000
commit119fcc3f6b480eb6791dfb79b6f9eae641705db0 (patch)
tree103ae60fcd710bfa9b07791630a4230ba4a6396d
parentInitial commit. (diff)
downloadgolang-github-containers-luksy-upstream.tar.xz
golang-github-containers-luksy-upstream.zip
Adding upstream version 0.0~git20231017.6a3592c+ds1.upstream/0.0_git20231017.6a3592c+ds1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.cirrus.yml32
-rw-r--r--.dockerignore2
-rw-r--r--.github/dependabot.yml11
-rw-r--r--.gitignore21
-rw-r--r--Dockerfile7
-rw-r--r--LICENSE201
-rw-r--r--Makefile14
-rw-r--r--OWNERS4
-rw-r--r--README.md10
-rw-r--r--cmd/luksy/decrypt.go126
-rw-r--r--cmd/luksy/encrypt.go131
-rw-r--r--cmd/luksy/inspect.go154
-rw-r--r--cmd/luksy/luksy.go47
-rw-r--r--decrypt.go255
-rw-r--r--encrypt.go421
-rw-r--r--encryption.go572
-rw-r--r--encryption_test.go309
-rw-r--r--go.mod22
-rw-r--r--go.sum35
-rw-r--r--luks.go75
-rwxr-xr-xtests/passwords.bats238
-rwxr-xr-xtests/wrapping.bats181
-rw-r--r--tune.go55
-rw-r--r--v1header.go321
-rw-r--r--v2header.go203
-rw-r--r--v2json.go157
26 files changed, 3604 insertions, 0 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
new file mode 100644
index 0000000..b639575
--- /dev/null
+++ b/.cirrus.yml
@@ -0,0 +1,32 @@
+docker_builder:
+ name: CI
+ env:
+ HOME: /root
+ DEBIAN_FRONTEND: noninteractive
+ CIRRUS_LOG_TIMESTAMP: true
+ setup_script: |
+ apt-get -q update
+ apt-get -q install -y bats cryptsetup golang
+ go version
+ make
+ unit_test_script: |
+ go test -timeout 45m -v -cover
+ case $(go env GOARCH) in
+ amd64)
+ otherarch=386;;
+ arm64)
+ otherarch=arm;;
+ mips64)
+ otherarch=mips;;
+ mips64le)
+ otherarch=mipsle;;
+ esac
+ if test -n "$otherarch" ; then
+ echo running unit tests again with GOARCH=$otherarch
+ GOARCH=$otherarch go test -timeout 45m -v -cover
+ fi
+ :
+ defaults_script: |
+ bats -f defaults ./tests
+ aes_script: |
+ bats -f aes ./tests
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..2427630
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,2 @@
+lukstool
+lukstool.test
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..e0871f9
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,11 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
+
+version: 2
+updates:
+ - package-ecosystem: "gomod" # See documentation for possible values
+ directory: "/" # Location of package manifests
+ schedule:
+ interval: "weekly"
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..3b735ec
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,21 @@
+# If you prefer the allow list template instead of the deny list, see community template:
+# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
+#
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+# Go workspace file
+go.work
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..16ca5c7
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,7 @@
+FROM registry.fedoraproject.org/fedora
+RUN dnf -y install golang make
+WORKDIR /go/src/github.com/containers/luksy/
+COPY / /go/src/github.com/containers/luksy/
+RUN make clean all
+FROM registry.fedoraproject.org/fedora-minimal
+COPY --from=0 /go/src/github.com/containers/luksy/ /usr/local/bin/
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..b3563c2
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,14 @@
+GO = go
+BATS = bats
+
+all: luksy
+
+luksy: cmd/luksy/*.go *.go
+ $(GO) build -o luksy$(shell go env GOEXE) ./cmd/luksy
+
+clean:
+ $(RM) luksy$(shell go env GOEXE) luksy.test
+
+test:
+ $(GO) test -timeout 45m -v -cover
+ $(BATS) ./tests
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..eca8673
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,4 @@
+approvers:
+ - nalind
+reviewers:
+ - nalind
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..2bf3a43
--- /dev/null
+++ b/README.md
@@ -0,0 +1,10 @@
+luksy: offline encryption/decryption using LUKS formats [![Cirrus CI Status](https://img.shields.io/cirrus/github/containers/luksy/main)](https://cirrus-ci.com/github/containers/luksy/main)
+-
+luksy implements encryption and decryption using LUKSv1 and LUKSv2 formats.
+Think of it as a clunkier cousin of gzip/bzip2/xz that doesn't actually produce
+smaller output than input, but it encrypts, and that's nice.
+
+* The main goal is to be able to encrypt/decrypt when we don't have access to
+ the Linux device mapper. Duplicating functions of cryptsetup that it can
+ perform without accessing the Linux device mapper is not a priority.
+* If you can use cryptsetup instead, use cryptsetup instead.
diff --git a/cmd/luksy/decrypt.go b/cmd/luksy/decrypt.go
new file mode 100644
index 0000000..f7ca7f5
--- /dev/null
+++ b/cmd/luksy/decrypt.go
@@ -0,0 +1,126 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/containers/luksy"
+ "github.com/spf13/cobra"
+ "golang.org/x/term"
+)
+
+var (
+ decryptPasswordFd = -1
+ decryptPasswordFile = ""
+ decryptForce = false
+)
+
+func init() {
+ decryptCommand := &cobra.Command{
+ Use: "decrypt",
+ Short: "Check a password for a LUKS-formatted file or device, and decrypt it",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return decryptCmd(cmd, args)
+ },
+ Args: cobra.RangeArgs(1, 2),
+ Example: `luksy decrypt /dev/mapper/encrypted-lv [plaintext.img]`,
+ }
+
+ flags := decryptCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.IntVar(&decryptPasswordFd, "password-fd", -1, "read password from file descriptor")
+ flags.StringVar(&decryptPasswordFile, "password-file", "", "read password from file")
+ flags.BoolVarP(&decryptForce, "force-overwrite", "f", false, "forcibly overwrite existing output files")
+ rootCmd.AddCommand(decryptCommand)
+}
+
+func decryptCmd(cmd *cobra.Command, args []string) error {
+ if len(args) >= 2 {
+ _, err := os.Stat(args[1])
+ if (err == nil || !os.IsNotExist(err)) && !decryptForce {
+ if err != nil {
+ return fmt.Errorf("checking if %q exists: %w", args[1], err)
+ }
+ return fmt.Errorf("-f not specified, and %q exists", args[1])
+ }
+ }
+ input, err := os.Open(args[0])
+ if err != nil {
+ return err
+ }
+ defer input.Close()
+ v1header, v2header, v2header2, v2json, err := luksy.ReadHeaders(input, luksy.ReadHeaderOptions{})
+ if err != nil {
+ return err
+ }
+ if v2header != nil && v2header2 != nil && v2header2.SequenceID() > v2header.SequenceID() {
+ v2header = v2header2
+ }
+ var password string
+ if decryptPasswordFd != -1 {
+ f := os.NewFile(uintptr(decryptPasswordFd), fmt.Sprintf("FD %d", decryptPasswordFd))
+ passBytes, err := io.ReadAll(f)
+ if err != nil {
+ return fmt.Errorf("reading from descriptor %d: %w", decryptPasswordFd, err)
+ }
+ password = string(passBytes)
+ } else if decryptPasswordFile != "" {
+ passBytes, err := os.ReadFile(decryptPasswordFile)
+ if err != nil {
+ return err
+ }
+ password = string(passBytes)
+ } else {
+ if term.IsTerminal(int(os.Stdin.Fd())) {
+ fmt.Fprintf(os.Stdout, "Password: ")
+ os.Stdout.Sync()
+ passBytes, err := term.ReadPassword(int(os.Stdin.Fd()))
+ if err != nil {
+ return fmt.Errorf("reading from stdin: %w", err)
+ }
+ password = string(passBytes)
+ fmt.Fprintln(os.Stdout)
+ } else {
+ passBytes, err := io.ReadAll(os.Stdin)
+ if err != nil {
+ return fmt.Errorf("reading from stdin: %w", err)
+ }
+ password = string(passBytes)
+ }
+ }
+ password = strings.TrimRightFunc(password, func(r rune) bool { return r == '\r' || r == '\n' })
+ var decryptStream func([]byte) ([]byte, error)
+ var payloadOffset, payloadSize int64
+ var decryptSectorSize int
+ switch {
+ case v1header != nil:
+ decryptStream, decryptSectorSize, payloadOffset, payloadSize, err = v1header.Decrypt(password, input)
+ case v2header != nil:
+ decryptStream, decryptSectorSize, payloadOffset, payloadSize, err = v2header.Decrypt(password, input, *v2json)
+ default:
+ err = errors.New("internal error: unknown format")
+ }
+ if err == nil && len(args) >= 2 {
+ var output *os.File
+ output, err = os.Create(args[1])
+ if err != nil {
+ return err
+ }
+ defer output.Close()
+ _, err = input.Seek(payloadOffset, io.SeekStart)
+ if err != nil {
+ return err
+ }
+ rc := luksy.DecryptReader(decryptStream, input, decryptSectorSize)
+ defer rc.Close()
+ reader := io.Reader(rc)
+ if payloadSize >= 0 {
+ reader = io.LimitReader(reader, payloadSize)
+ }
+ _, err = io.Copy(output, reader)
+ }
+ return err
+}
diff --git a/cmd/luksy/encrypt.go b/cmd/luksy/encrypt.go
new file mode 100644
index 0000000..4ebc8b1
--- /dev/null
+++ b/cmd/luksy/encrypt.go
@@ -0,0 +1,131 @@
+package main
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/containers/luksy"
+ "github.com/spf13/cobra"
+ "golang.org/x/term"
+)
+
+var (
+ encryptPasswordFds = []int{}
+ encryptPasswordFiles = []string{}
+ encryptSectorSize = 0
+ encryptCipher = ""
+ encryptv1 = false
+ encryptForce = false
+)
+
+func init() {
+ encryptCommand := &cobra.Command{
+ Use: "encrypt",
+ Short: "Create a LUKS-formatted file or device",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return encryptCmd(cmd, args)
+ },
+ Args: cobra.ExactArgs(2),
+ Example: `luksy - encrypt /tmp/plaintext.img /tmp/encrypted.img`,
+ }
+
+ flags := encryptCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.IntSliceVar(&encryptPasswordFds, "password-fd", nil, "read password from file descriptor `number`s")
+ flags.StringSliceVar(&encryptPasswordFiles, "password-file", nil, "read password from `file`s")
+ flags.BoolVarP(&encryptv1, "luks1", "1", false, "create LUKSv1 instead of LUKSv2")
+ flags.IntVar(&encryptSectorSize, "sector-size", 0, "sector size for LUKSv2")
+ flags.StringVarP(&encryptCipher, "cipher", "c", "", "encryption algorithm")
+ flags.BoolVarP(&encryptForce, "force-overwrite", "f", false, "forcibly overwrite existing output files")
+ rootCmd.AddCommand(encryptCommand)
+}
+
+func encryptCmd(cmd *cobra.Command, args []string) error {
+ _, err := os.Stat(args[1])
+ if (err == nil || !os.IsNotExist(err)) && !encryptForce {
+ if err != nil {
+ return fmt.Errorf("checking if %q exists: %w", args[1], err)
+ }
+ return fmt.Errorf("-f not specified, and %q exists", args[1])
+ }
+ input, err := os.Open(args[0])
+ if err != nil {
+ return fmt.Errorf("open %q: %w", args[0], err)
+ }
+ defer input.Close()
+ st, err := input.Stat()
+ if err != nil {
+ return err
+ }
+ if st.Size()%luksy.V1SectorSize != 0 {
+ return fmt.Errorf("%q is not of a suitable size, expected a multiple of %d bytes", input.Name(), luksy.V1SectorSize)
+ }
+ var passwords []string
+ for _, encryptPasswordFd := range encryptPasswordFds {
+ passFile := os.NewFile(uintptr(encryptPasswordFd), fmt.Sprintf("FD %d", encryptPasswordFd))
+ passBytes, err := io.ReadAll(passFile)
+ if err != nil {
+ return fmt.Errorf("reading from descriptor %d: %w", encryptPasswordFd, err)
+ }
+ passwords = append(passwords, string(passBytes))
+ }
+ for _, encryptPasswordFile := range encryptPasswordFiles {
+ passBytes, err := os.ReadFile(encryptPasswordFile)
+ if err != nil {
+ return err
+ }
+ passwords = append(passwords, string(passBytes))
+ }
+ if len(passwords) == 0 {
+ if term.IsTerminal(int(os.Stdin.Fd())) {
+ fmt.Fprintf(os.Stdout, "Password: ")
+ os.Stdout.Sync()
+ passBytes, err := term.ReadPassword(int(os.Stdin.Fd()))
+ if err != nil {
+ return fmt.Errorf("reading from stdin: %w", err)
+ }
+ passwords = append(passwords, string(passBytes))
+ fmt.Fprintln(os.Stdout)
+ } else {
+ passBytes, err := io.ReadAll(os.Stdin)
+ if err != nil {
+ return fmt.Errorf("reading from stdin: %w", err)
+ }
+ passwords = append(passwords, string(passBytes))
+ }
+ }
+ for i := range passwords {
+ passwords[i] = strings.TrimRightFunc(passwords[i], func(r rune) bool { return r == '\r' || r == '\n' })
+ }
+ var header []byte
+ var encryptStream func([]byte) ([]byte, error)
+ if encryptv1 {
+ header, encryptStream, encryptSectorSize, err = luksy.EncryptV1(passwords, encryptCipher)
+ if err != nil {
+ return fmt.Errorf("creating luksv1 data: %w", err)
+ }
+ } else {
+ header, encryptStream, encryptSectorSize, err = luksy.EncryptV2(passwords, encryptCipher, encryptSectorSize)
+ if err != nil {
+ return fmt.Errorf("creating luksv2 data: %w", err)
+ }
+ }
+ output, err := os.Create(args[1])
+ if err != nil {
+ return fmt.Errorf("create %q: %w", args[1], err)
+ }
+ defer output.Close()
+ n, err := output.Write(header)
+ if err != nil {
+ return err
+ }
+ if n != len(header) {
+ return fmt.Errorf("short write while writing header to %q", output.Name())
+ }
+ wc := luksy.EncryptWriter(encryptStream, output, encryptSectorSize)
+ defer wc.Close()
+ _, err = io.Copy(wc, input)
+ return err
+}
diff --git a/cmd/luksy/inspect.go b/cmd/luksy/inspect.go
new file mode 100644
index 0000000..eea29d6
--- /dev/null
+++ b/cmd/luksy/inspect.go
@@ -0,0 +1,154 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "text/tabwriter"
+
+ "github.com/containers/luksy"
+ "github.com/spf13/cobra"
+)
+
+var all bool
+
+func init() {
+ inspectCommand := &cobra.Command{
+ Use: "inspect",
+ Short: "Inspect a LUKS-formatted file or device",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return inspectCmd(cmd, args)
+ },
+ Args: cobra.ExactArgs(1),
+ Example: `luksy - inspect /dev/mapper/encrypted-lv`,
+ }
+
+ flags := inspectCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.BoolVarP(&all, "all", "a", false, "include information about inactive key slots")
+ rootCmd.AddCommand(inspectCommand)
+}
+
+func inspectCmd(cmd *cobra.Command, args []string) error {
+ f, err := os.Open(args[0])
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ v1header, v2header, _, v2json, err := luksy.ReadHeaders(f, luksy.ReadHeaderOptions{})
+ if err != nil {
+ return err
+ }
+ tw := tabwriter.NewWriter(os.Stdout, 0, 8, 1, ' ', 0)
+ defer tw.Flush()
+ if v1header != nil {
+ if v1header.Version() != 1 {
+ return fmt.Errorf("internal error: magic/version mismatch (%d)", v1header.Version())
+ }
+ fmt.Fprintf(tw, "Magic\t%q\n", v1header.Magic())
+ fmt.Fprintf(tw, "Version\t%d\n", v1header.Version())
+ fmt.Fprintf(tw, "Cipher\t%s, %s\n", v1header.CipherName(), v1header.CipherMode())
+ fmt.Fprintf(tw, "Hash\t%s\n", v1header.HashSpec())
+ fmt.Fprintf(tw, "Payload offset sectors\t%d\n", v1header.PayloadOffset())
+ fmt.Fprintf(tw, "Main key\tlength %d\n", v1header.KeyBytes())
+ fmt.Fprintf(tw, "\tdigest %q\n", v1header.MKDigest())
+ fmt.Fprintf(tw, "\tsalt %q\n", v1header.MKDigestSalt())
+ fmt.Fprintf(tw, "\t%d rounds\n", v1header.MKDigestIter())
+ fmt.Fprintf(tw, "UUID\t%s\n", v1header.UUID())
+ for i := 0; i < 8; i++ {
+ ks, err := v1header.KeySlot(i)
+ if err != nil {
+ return fmt.Errorf("reading key slot %d: %w", i, err)
+ }
+ active, err := ks.Active()
+ if err != nil {
+ return fmt.Errorf("reading key slot %d status: %w", i, err)
+ }
+ if active || all {
+ active, err := ks.Active()
+ activeStr := fmt.Sprintf("%t", active)
+ if err != nil {
+ activeStr = fmt.Sprintf("unknown (corrupted?): %v", err)
+ }
+ fmt.Fprintf(tw, "Slot %d\tactive\t%s\n", i, activeStr)
+ fmt.Fprintf(tw, "\titerations\t%d\n", ks.Iterations())
+ fmt.Fprintf(tw, "\tsalt\t%q\n", ks.KeySlotSalt())
+ fmt.Fprintf(tw, "\tkey material offset sectors\t%d\n", ks.KeyMaterialOffset())
+ fmt.Fprintf(tw, "\tstripes\t%d\n", ks.Stripes())
+ }
+ }
+ }
+ if v2header != nil {
+ if v2header.Version() != 2 {
+ return fmt.Errorf("internal error: magic/version mismatch (%d)", v2header.Version())
+ }
+ fmt.Fprintf(tw, "Magic\t%q\n", v2header.Magic())
+ fmt.Fprintf(tw, "Version\t%d\n", v2header.Version())
+ fmt.Fprintf(tw, "Header size\t%d\n", v2header.HeaderSize())
+ fmt.Fprintf(tw, "Header offset\t%d\n", v2header.HeaderOffset())
+ fmt.Fprintf(tw, "Checksum\t%q, algorithm %q\n", v2header.Checksum(), v2header.ChecksumAlgorithm())
+ fmt.Fprintf(tw, "UUID\t%s\n", v2header.UUID())
+ fmt.Fprintf(tw, "Requirements\t%v\n", v2json.Config.Requirements)
+ for key, segment := range v2json.Segments {
+ fmt.Fprintf(tw, "Segment %s\ttype %q, offset %s, size %s, flags %v\n", key, segment.Type, segment.Offset, segment.Size, segment.Flags)
+ switch segment.Type {
+ case "crypt":
+ fmt.Fprintf(tw, "\tcrypt encryption %s, sector size %d, IV tweak %d\n", segment.Encryption, segment.SectorSize, segment.IVTweak)
+ if segment.Integrity != nil {
+ fmt.Fprintf(tw, "\tcrypt integrity type %s, journal encryption %s, journal integrity %s\n", segment.Integrity.Type, segment.Integrity.JournalEncryption, segment.Integrity.JournalIntegrity)
+ }
+ }
+ }
+ for key, slot := range v2json.Keyslots {
+ fmt.Fprintf(tw, "Slot %s \ttype %s\n", key, slot.Type)
+ switch slot.Type {
+ case "luks2":
+ fmt.Fprintf(tw, "\tluks2 AF type %s\n", slot.AF.Type)
+ switch slot.AF.Type {
+ case "luks1":
+ fmt.Fprintf(tw, "\tluks1 AF stripes %d, hash %s\n", slot.AF.Stripes, slot.AF.Hash)
+ }
+ fmt.Fprintf(tw, "\tluks2 KDF type %s, salt %q\n", slot.Kdf.Type, slot.Kdf.Salt)
+ switch slot.Kdf.Type {
+ case "argon2i":
+ fmt.Fprintf(tw, "\targon2i time %d, memory %d, cpus %d\n", slot.Kdf.Time, slot.Kdf.Memory, slot.Kdf.CPUs)
+ case "pbkdf2":
+ fmt.Fprintf(tw, "\tpbkdf2 hash %s, iterations %d\n", slot.Kdf.Hash, slot.Kdf.Iterations)
+ }
+ case "reencrypt":
+ fmt.Fprintf(tw, "\treencrypt mode %s, direction %s\n", slot.Mode, slot.Direction)
+ }
+ fmt.Fprintf(tw, "\tarea type %q, offset %d, size %d\n", slot.Area.Type, slot.Area.Offset, slot.Area.Size)
+ switch slot.Area.Type {
+ case "raw":
+ fmt.Fprintf(tw, "\traw encryption %q, key size %d\n", slot.Area.Encryption, slot.Area.KeySize)
+ case "checksum":
+ fmt.Fprintf(tw, "\tchecksum hash %q, sector size %d\n", slot.Area.Hash, slot.Area.SectorSize)
+ case "datashift":
+ fmt.Fprintf(tw, "\tdatashift shift size %d\n", slot.Area.ShiftSize)
+ case "datashift-checksum":
+ fmt.Fprintf(tw, "\tdatashift-checksum hash %q, sector size %d, shift size %d\n", slot.Area.Hash, slot.Area.SectorSize, slot.Area.ShiftSize)
+ }
+ if slot.Priority != nil {
+ fmt.Fprintf(tw, "\tpriority %s\n", slot.Priority.String())
+ }
+ }
+ for key, digest := range v2json.Digests {
+ fmt.Fprintf(tw, "Digest %s\tdigest %q\n", key, digest.Digest)
+ fmt.Fprintf(tw, "\tsalt\t%q\n", digest.Salt)
+ fmt.Fprintf(tw, "\ttype\t%q\n", digest.Type)
+ fmt.Fprintf(tw, "\tsegments\t%v\n", digest.Segments)
+ switch digest.Type {
+ case "pbkdf2":
+ fmt.Fprintf(tw, "\thash %s, iterations %d\n", digest.Hash, digest.Iterations)
+ }
+ }
+ for key, token := range v2json.Tokens {
+ fmt.Fprintf(tw, "Token %s\ttype %s, keyslots %v\n", key, token.Type, token.Keyslots)
+ switch token.Type {
+ case "luks2-keyring":
+ fmt.Fprintf(tw, "\tdescription %q\n", token.KeyDescription)
+ }
+ }
+ }
+ return nil
+}
diff --git a/cmd/luksy/luksy.go b/cmd/luksy/luksy.go
new file mode 100644
index 0000000..54a3a4b
--- /dev/null
+++ b/cmd/luksy/luksy.go
@@ -0,0 +1,47 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "syscall"
+
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+var rootCmd = &cobra.Command{
+ Use: "luksy",
+ Long: "A tool for creating and decrypting LUKS-encrypted disk images",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return cmd.Help()
+ },
+ PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
+ return nil
+ },
+ PersistentPostRunE: func(cmd *cobra.Command, args []string) error {
+ return nil
+ },
+ SilenceUsage: true,
+ SilenceErrors: true,
+}
+
+func main() {
+ var exitCode int
+ if err := rootCmd.Execute(); err != nil {
+ if logrus.IsLevelEnabled(logrus.TraceLevel) {
+ fmt.Fprintf(os.Stderr, "Error: %+v\n", err)
+ } else {
+ fmt.Fprintf(os.Stderr, "Error: %v\n", err)
+ }
+ exitCode = 1
+ var ee *exec.ExitError
+ if errors.As(err, &ee) {
+ if w, ok := ee.Sys().(syscall.WaitStatus); ok {
+ exitCode = w.ExitStatus()
+ }
+ }
+ }
+ os.Exit(exitCode)
+}
diff --git a/decrypt.go b/decrypt.go
new file mode 100644
index 0000000..b36c4f5
--- /dev/null
+++ b/decrypt.go
@@ -0,0 +1,255 @@
+package luksy
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+
+ "golang.org/x/crypto/argon2"
+ "golang.org/x/crypto/pbkdf2"
+)
+
+// ReaderAtSeekCloser is a combination of io.ReaderAt, io.Seeker, and io.Closer,
+// which is all we really need from an encrypted file.
+type ReaderAtSeekCloser interface {
+ io.ReaderAt
+ io.Seeker
+ io.Closer
+}
+
+// Decrypt attempts to verify the specified password using information from the
+// header and read from the specified file.
+//
+// Returns a function which will decrypt payload blocks in succession, the size
+// of chunks of data that the function expects, the offset in the file where
+// the payload begins, and the size of the payload, assuming the payload runs
+// to the end of the file.
+func (h V1Header) Decrypt(password string, f ReaderAtSeekCloser) (func([]byte) ([]byte, error), int, int64, int64, error) {
+ size, err := f.Seek(0, io.SeekEnd)
+ if err != nil {
+ return nil, -1, -1, -1, err
+ }
+ hasher, err := hasherByName(h.HashSpec())
+ if err != nil {
+ return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", h.HashSpec(), err)
+ }
+
+ activeKeys := 0
+ for k := 0; k < v1NumKeys; k++ {
+ keyslot, err := h.KeySlot(k)
+ if err != nil {
+ return nil, -1, -1, -1, fmt.Errorf("reading key slot %d: %w", k, err)
+ }
+ active, err := keyslot.Active()
+ if err != nil {
+ return nil, -1, -1, -1, fmt.Errorf("checking if key slot %d is active: %w", k, err)
+ }
+ if !active {
+ continue
+ }
+ activeKeys++
+
+ passwordDerived := pbkdf2.Key([]byte(password), keyslot.KeySlotSalt(), int(keyslot.Iterations()), int(h.KeyBytes()), hasher)
+ striped := make([]byte, h.KeyBytes()*keyslot.Stripes())
+ n, err := f.ReadAt(striped, int64(keyslot.KeyMaterialOffset())*V1SectorSize)
+ if err != nil {
+ return nil, -1, -1, -1, fmt.Errorf("reading diffuse material for keyslot %d: %w", k, err)
+ }
+ if n != len(striped) {
+ return nil, -1, -1, -1, fmt.Errorf("short read while reading diffuse material for keyslot %d: expected %d, got %d", k, len(striped), n)
+ }
+ splitKey, err := v1decrypt(h.CipherName(), h.CipherMode(), 0, passwordDerived, striped, V1SectorSize, false)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error attempting to decrypt main key: %v\n", err)
+ continue
+ }
+ mkCandidate, err := afMerge(splitKey, hasher(), int(h.KeyBytes()), int(keyslot.Stripes()))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error attempting to compute main key: %v\n", err)
+ continue
+ }
+ mkcandidateDerived := pbkdf2.Key(mkCandidate, h.MKDigestSalt(), int(h.MKDigestIter()), v1DigestSize, hasher)
+ ivTweak := 0
+ decryptStream := func(ciphertext []byte) ([]byte, error) {
+ plaintext, err := v1decrypt(h.CipherName(), h.CipherMode(), ivTweak, mkCandidate, ciphertext, V1SectorSize, false)
+ ivTweak += len(ciphertext) / V1SectorSize
+ return plaintext, err
+ }
+ if bytes.Equal(mkcandidateDerived, h.MKDigest()) {
+ payloadOffset := int64(h.PayloadOffset() * V1SectorSize)
+ return decryptStream, V1SectorSize, payloadOffset, size - payloadOffset, nil
+ }
+ }
+ if activeKeys == 0 {
+ return nil, -1, -1, -1, errors.New("no passwords set on LUKS1 volume")
+ }
+ return nil, -1, -1, -1, errors.New("decryption error: incorrect password")
+}
+
+// Decrypt attempts to verify the specified password using information from the
+// header, JSON block, and read from the specified file.
+//
+// Returns a function which will decrypt payload blocks in succession, the size
+// of chunks of data that the function expects, the offset in the file where
+// the payload begins, and the size of the payload, assuming the payload runs
+// to the end of the file.
+func (h V2Header) Decrypt(password string, f ReaderAtSeekCloser, j V2JSON) (func([]byte) ([]byte, error), int, int64, int64, error) {
+ foundDigests := 0
+ for d, digest := range j.Digests {
+ if digest.Type != "pbkdf2" {
+ continue
+ }
+ if digest.V2JSONDigestPbkdf2 == nil {
+ return nil, -1, -1, -1, fmt.Errorf("digest %q is corrupt: no pbkdf2 parameters", d)
+ }
+ foundDigests++
+ if len(digest.Segments) == 0 || len(digest.Digest) == 0 {
+ continue
+ }
+ payloadOffset := int64(-1)
+ payloadSectorSize := V1SectorSize
+ payloadEncryption := ""
+ payloadSize := int64(0)
+ ivTweak := 0
+ for _, segmentID := range digest.Segments {
+ segment, ok := j.Segments[segmentID]
+ if !ok {
+ continue // well, that was misleading
+ }
+ if segment.Type != "crypt" {
+ continue
+ }
+ tmp, err := strconv.ParseInt(segment.Offset, 10, 64)
+ if err != nil {
+ continue
+ }
+ payloadOffset = tmp
+ if segment.Size == "dynamic" {
+ size, err := f.Seek(0, io.SeekEnd)
+ if err != nil {
+ continue
+ }
+ payloadSize = size - payloadOffset
+ } else {
+ payloadSize, err = strconv.ParseInt(segment.Size, 10, 64)
+ if err != nil {
+ continue
+ }
+ }
+ payloadSectorSize = segment.SectorSize
+ payloadEncryption = segment.Encryption
+ ivTweak = segment.IVTweak
+ break
+ }
+ if payloadEncryption == "" {
+ continue
+ }
+ activeKeys := 0
+ for k, keyslot := range j.Keyslots {
+ if keyslot.Priority != nil && *keyslot.Priority == V2JSONKeyslotPriorityIgnore {
+ continue
+ }
+ applicable := true
+ if len(digest.Keyslots) > 0 {
+ applicable = false
+ for i := 0; i < len(digest.Keyslots); i++ {
+ if k == digest.Keyslots[i] {
+ applicable = true
+ break
+ }
+ }
+ }
+ if !applicable {
+ continue
+ }
+ if keyslot.Type != "luks2" {
+ continue
+ }
+ if keyslot.V2JSONKeyslotLUKS2 == nil {
+ return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt", k)
+ }
+ if keyslot.V2JSONKeyslotLUKS2.AF.Type != "luks1" {
+ continue
+ }
+ if keyslot.V2JSONKeyslotLUKS2.AF.V2JSONAFLUKS1 == nil {
+ return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no AF parameters", k)
+ }
+ if keyslot.Area.Type != "raw" {
+ return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: key data area is not raw", k)
+ }
+ if keyslot.Area.KeySize*V2SectorSize < keyslot.KeySize*keyslot.AF.Stripes {
+ return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: key data area is too small (%d < %d)", k, keyslot.Area.KeySize*V2SectorSize, keyslot.KeySize*keyslot.AF.Stripes)
+ }
+ var passwordDerived []byte
+ switch keyslot.V2JSONKeyslotLUKS2.Kdf.Type {
+ default:
+ continue
+ case "pbkdf2":
+ if keyslot.V2JSONKeyslotLUKS2.Kdf.V2JSONKdfPbkdf2 == nil {
+ return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no pbkdf2 parameters", k)
+ }
+ hasher, err := hasherByName(keyslot.Kdf.Hash)
+ if err != nil {
+ return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", keyslot.Kdf.Hash, err)
+ }
+ passwordDerived = pbkdf2.Key([]byte(password), keyslot.Kdf.Salt, keyslot.Kdf.Iterations, keyslot.KeySize, hasher)
+ case "argon2i":
+ if keyslot.V2JSONKeyslotLUKS2.Kdf.V2JSONKdfArgon2i == nil {
+ return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no argon2i parameters", k)
+ }
+ passwordDerived = argon2.Key([]byte(password), keyslot.Kdf.Salt, uint32(keyslot.Kdf.Time), uint32(keyslot.Kdf.Memory), uint8(keyslot.Kdf.CPUs), uint32(keyslot.KeySize))
+ case "argon2id":
+ if keyslot.V2JSONKeyslotLUKS2.Kdf.V2JSONKdfArgon2i == nil {
+ return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no argon2id parameters", k)
+ }
+ passwordDerived = argon2.IDKey([]byte(password), keyslot.Kdf.Salt, uint32(keyslot.Kdf.Time), uint32(keyslot.Kdf.Memory), uint8(keyslot.Kdf.CPUs), uint32(keyslot.KeySize))
+ }
+ striped := make([]byte, keyslot.KeySize*keyslot.AF.Stripes)
+ n, err := f.ReadAt(striped, int64(keyslot.Area.Offset))
+ if err != nil {
+ return nil, -1, -1, -1, fmt.Errorf("reading diffuse material for keyslot %q: %w", k, err)
+ }
+ if n != len(striped) {
+ return nil, -1, -1, -1, fmt.Errorf("short read while reading diffuse material for keyslot %q: expected %d, got %d", k, len(striped), n)
+ }
+ splitKey, err := v2decrypt(keyslot.Area.Encryption, 0, passwordDerived, striped, V1SectorSize, false)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error attempting to decrypt main key: %v\n", err)
+ continue
+ }
+ afhasher, err := hasherByName(keyslot.AF.Hash)
+ if err != nil {
+ return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", keyslot.AF.Hash, err)
+ }
+ mkCandidate, err := afMerge(splitKey, afhasher(), int(keyslot.KeySize), int(keyslot.AF.Stripes))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error attempting to compute main key: %v\n", err)
+ continue
+ }
+ digester, err := hasherByName(digest.Hash)
+ if err != nil {
+ return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", digest.Hash, err)
+ }
+ mkcandidateDerived := pbkdf2.Key(mkCandidate, digest.Salt, digest.Iterations, len(digest.Digest), digester)
+ decryptStream := func(ciphertext []byte) ([]byte, error) {
+ plaintext, err := v2decrypt(payloadEncryption, ivTweak, mkCandidate, ciphertext, payloadSectorSize, true)
+ ivTweak += len(ciphertext) / payloadSectorSize
+ return plaintext, err
+ }
+ if bytes.Equal(mkcandidateDerived, digest.Digest) {
+ return decryptStream, payloadSectorSize, payloadOffset, payloadSize, nil
+ }
+ activeKeys++
+ }
+ if activeKeys == 0 {
+ return nil, -1, -1, -1, fmt.Errorf("no passwords set on LUKS2 volume for digest %q", d)
+ }
+ }
+ if foundDigests == 0 {
+ return nil, -1, -1, -1, errors.New("no usable password-verification digests set on LUKS2 volume")
+ }
+ return nil, -1, -1, -1, errors.New("decryption error: incorrect password")
+}
diff --git a/encrypt.go b/encrypt.go
new file mode 100644
index 0000000..63b345e
--- /dev/null
+++ b/encrypt.go
@@ -0,0 +1,421 @@
+package luksy
+
+import (
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/google/uuid"
+ "golang.org/x/crypto/argon2"
+ "golang.org/x/crypto/pbkdf2"
+)
+
+// EncryptV1 prepares to encrypt data using one or more passwords and the
+// specified cipher (or a default, if the specified cipher is "").
+//
+// Returns a fixed LUKSv1 header which contains keying information, a function
+// which will encrypt blocks of data in succession, and the size of chunks of
+// data that it expects.
+func EncryptV1(password []string, cipher string) ([]byte, func([]byte) ([]byte, error), int, error) {
+ if len(password) == 0 {
+ return nil, nil, -1, errors.New("at least one password is required")
+ }
+ if len(password) > v1NumKeys {
+ return nil, nil, -1, fmt.Errorf("attempted to use %d passwords, only %d possible", len(password), v1NumKeys)
+ }
+ if cipher == "" {
+ cipher = "aes-xts-plain64"
+ }
+
+ salt := make([]byte, v1SaltSize)
+ n, err := rand.Read(salt)
+ if err != nil {
+ return nil, nil, -1, fmt.Errorf("reading random data: %w", err)
+ }
+ if n != len(salt) {
+ return nil, nil, -1, errors.New("short read")
+ }
+
+ cipherSpec := strings.SplitN(cipher, "-", 3)
+ if len(cipherSpec) != 3 || len(cipherSpec[0]) == 0 || len(cipherSpec[1]) == 0 || len(cipherSpec[2]) == 0 {
+ return nil, nil, -1, fmt.Errorf("invalid cipher %q", cipher)
+ }
+
+ var h V1Header
+ if err := h.SetMagic(V1Magic); err != nil {
+ return nil, nil, -1, fmt.Errorf("setting magic to v1: %w", err)
+ }
+ if err := h.SetVersion(1); err != nil {
+ return nil, nil, -1, fmt.Errorf("setting version to 1: %w", err)
+ }
+ h.SetCipherName(cipherSpec[0])
+ h.SetCipherMode(cipherSpec[1] + "-" + cipherSpec[2])
+ h.SetHashSpec("sha256")
+ h.SetKeyBytes(32)
+ if cipherSpec[1] == "xts" {
+ h.SetKeyBytes(64)
+ }
+ h.SetMKDigestSalt(salt)
+ h.SetMKDigestIter(V1Stripes)
+ h.SetUUID(uuid.NewString())
+
+ mkey := make([]byte, h.KeyBytes())
+ n, err = rand.Read(mkey)
+ if err != nil {
+ return nil, nil, -1, fmt.Errorf("reading random data: %w", err)
+ }
+ if n != len(mkey) {
+ return nil, nil, -1, errors.New("short read")
+ }
+
+ hasher, err := hasherByName(h.HashSpec())
+ if err != nil {
+ return nil, nil, -1, errors.New("internal error")
+ }
+
+ mkdigest := pbkdf2.Key(mkey, h.MKDigestSalt(), int(h.MKDigestIter()), v1DigestSize, hasher)
+ h.SetMKDigest(mkdigest)
+
+ headerLength := roundUpToMultiple(v1HeaderStructSize, V1AlignKeyslots)
+ iterations := IterationsPBKDF2(salt, int(h.KeyBytes()), hasher)
+ var stripes [][]byte
+ ksSalt := make([]byte, v1KeySlotSaltLength)
+ for i := 0; i < v1NumKeys; i++ {
+ n, err = rand.Read(ksSalt)
+ if err != nil {
+ return nil, nil, -1, fmt.Errorf("reading random data: %w", err)
+ }
+ if n != len(ksSalt) {
+ return nil, nil, -1, errors.New("short read")
+ }
+ var keyslot V1KeySlot
+ keyslot.SetActive(i < len(password))
+ keyslot.SetIterations(uint32(iterations))
+ keyslot.SetStripes(V1Stripes)
+ keyslot.SetKeySlotSalt(ksSalt)
+ if i < len(password) {
+ splitKey, err := afSplit(mkey, hasher(), int(h.MKDigestIter()))
+ if err != nil {
+ return nil, nil, -1, fmt.Errorf("splitting key: %w", err)
+ }
+ passwordDerived := pbkdf2.Key([]byte(password[i]), keyslot.KeySlotSalt(), int(keyslot.Iterations()), int(h.KeyBytes()), hasher)
+ striped, err := v1encrypt(h.CipherName(), h.CipherMode(), 0, passwordDerived, splitKey, V1SectorSize, false)
+ if err != nil {
+ return nil, nil, -1, fmt.Errorf("encrypting split key with password: %w", err)
+ }
+ if len(striped) != len(mkey)*int(keyslot.Stripes()) {
+ return nil, nil, -1, fmt.Errorf("internal error: got %d stripe bytes, expected %d", len(striped), len(mkey)*int(keyslot.Stripes()))
+ }
+ stripes = append(stripes, striped)
+ }
+ keyslot.SetKeyMaterialOffset(uint32(headerLength / V1SectorSize))
+ if err := h.SetKeySlot(i, keyslot); err != nil {
+ return nil, nil, -1, fmt.Errorf("internal error: setting value for key slot %d: %w", i, err)
+ }
+ headerLength += len(mkey) * int(keyslot.Stripes())
+ headerLength = roundUpToMultiple(headerLength, V1AlignKeyslots)
+ }
+ headerLength = roundUpToMultiple(headerLength, V1SectorSize)
+
+ h.SetPayloadOffset(uint32(headerLength / V1SectorSize))
+ head := make([]byte, headerLength)
+ offset := copy(head, h[:])
+ offset = roundUpToMultiple(offset, V1AlignKeyslots)
+ for _, stripe := range stripes {
+ copy(head[offset:], stripe)
+ offset = roundUpToMultiple(offset+len(stripe), V1AlignKeyslots)
+ }
+ ivTweak := 0
+ encryptStream := func(plaintext []byte) ([]byte, error) {
+ ciphertext, err := v1encrypt(h.CipherName(), h.CipherMode(), ivTweak, mkey, plaintext, V1SectorSize, true)
+ ivTweak += len(plaintext) / V1SectorSize
+ return ciphertext, err
+ }
+ return head, encryptStream, V1SectorSize, nil
+}
+
+// EncryptV2 prepares to encrypt data using one or more passwords and the
+// specified cipher (or a default, if the specified cipher is "").
+//
+// Returns a fixed LUKSv2 header which contains keying information, a
+// function which will encrypt blocks of data in succession, and the size of
+// chunks of data that it expects.
+func EncryptV2(password []string, cipher string, payloadSectorSize int) ([]byte, func([]byte) ([]byte, error), int, error) {
+ if len(password) == 0 {
+ return nil, nil, -1, errors.New("at least one password is required")
+ }
+ if cipher == "" {
+ cipher = "aes-xts-plain64"
+ }
+ cipherSpec := strings.SplitN(cipher, "-", 3)
+ if len(cipherSpec) != 3 || len(cipherSpec[0]) == 0 || len(cipherSpec[1]) == 0 || len(cipherSpec[2]) == 0 {
+ return nil, nil, -1, fmt.Errorf("invalid cipher %q", cipher)
+ }
+ if payloadSectorSize == 0 {
+ payloadSectorSize = V2SectorSize
+ }
+ switch payloadSectorSize {
+ default:
+ return nil, nil, -1, fmt.Errorf("invalid sector size %d", payloadSectorSize)
+ case 512, 1024, 2048, 4096:
+ }
+
+ headerSalts := make([]byte, v1SaltSize*3)
+ n, err := rand.Read(headerSalts)
+ if err != nil {
+ return nil, nil, -1, err
+ }
+ if n != len(headerSalts) {
+ return nil, nil, -1, errors.New("short read")
+ }
+ hSalt1 := headerSalts[:v1SaltSize]
+ hSalt2 := headerSalts[v1SaltSize : v1SaltSize*2]
+ mkeySalt := headerSalts[v1SaltSize*2:]
+
+ roundHeaderSize := func(size int) (int, error) {
+ switch {
+ case size < 0x4000:
+ return 0x4000, nil
+ case size < 0x8000:
+ return 0x8000, nil
+ case size < 0x10000:
+ return 0x10000, nil
+ case size < 0x20000:
+ return 0x20000, nil
+ case size < 0x40000:
+ return 0x40000, nil
+ case size < 0x80000:
+ return 0x80000, nil
+ case size < 0x100000:
+ return 0x100000, nil
+ case size < 0x200000:
+ return 0x200000, nil
+ case size < 0x400000:
+ return 0x400000, nil
+ }
+ return 0, fmt.Errorf("internal error: unsupported header size %d", size)
+ }
+
+ var h1, h2 V2Header
+ if err := h1.SetMagic(V2Magic1); err != nil {
+ return nil, nil, -1, fmt.Errorf("setting magic to v2: %w", err)
+ }
+ if err := h2.SetMagic(V2Magic2); err != nil {
+ return nil, nil, -1, fmt.Errorf("setting magic to v2: %w", err)
+ }
+ if err := h1.SetVersion(2); err != nil {
+ return nil, nil, -1, fmt.Errorf("setting version to 2: %w", err)
+ }
+ if err := h2.SetVersion(2); err != nil {
+ return nil, nil, -1, fmt.Errorf("setting version to 2: %w", err)
+ }
+ h1.SetSequenceID(1)
+ h2.SetSequenceID(1)
+ h1.SetLabel("")
+ h2.SetLabel("")
+ h1.SetChecksumAlgorithm("sha256")
+ h2.SetChecksumAlgorithm("sha256")
+ h1.SetSalt(hSalt1)
+ h2.SetSalt(hSalt2)
+ uuidString := uuid.NewString()
+ h1.SetUUID(uuidString)
+ h2.SetUUID(uuidString)
+ h1.SetHeaderOffset(0)
+ h2.SetHeaderOffset(0)
+ h1.SetChecksum(nil)
+ h2.SetChecksum(nil)
+
+ mkey := make([]byte, 32)
+ if cipherSpec[1] == "xts" {
+ mkey = make([]byte, 64)
+ }
+ n, err = rand.Read(mkey)
+ if err != nil {
+ return nil, nil, -1, fmt.Errorf("reading random data: %w", err)
+ }
+ if n != len(mkey) {
+ return nil, nil, -1, errors.New("short read")
+ }
+
+ tuningSalt := make([]byte, v1SaltSize)
+ hasher, err := hasherByName(h1.ChecksumAlgorithm())
+ if err != nil {
+ return nil, nil, -1, errors.New("internal error")
+ }
+ iterations := IterationsPBKDF2(tuningSalt, len(mkey), hasher)
+ timeCost := 16
+ threadsCost := 16
+ memoryCost := MemoryCostArgon2(tuningSalt, len(mkey), timeCost, threadsCost)
+ priority := V2JSONKeyslotPriorityNormal
+ var stripes [][]byte
+ var keyslots []V2JSONKeyslot
+
+ mdigest := pbkdf2.Key(mkey, mkeySalt, iterations, len(hasher().Sum([]byte{})), hasher)
+ digest0 := V2JSONDigest{
+ Type: "pbkdf2",
+ Salt: mkeySalt,
+ Digest: mdigest,
+ Segments: []string{"0"},
+ V2JSONDigestPbkdf2: &V2JSONDigestPbkdf2{
+ Hash: h1.ChecksumAlgorithm(),
+ Iterations: iterations,
+ },
+ }
+
+ for i := range password {
+ keyslotSalt := make([]byte, v1SaltSize)
+ n, err := rand.Read(keyslotSalt)
+ if err != nil {
+ return nil, nil, -1, err
+ }
+ if n != len(keyslotSalt) {
+ return nil, nil, -1, errors.New("short read")
+ }
+ key := argon2.Key([]byte(password[i]), keyslotSalt, uint32(timeCost), uint32(memoryCost), uint8(threadsCost), uint32(len(mkey)))
+ split, err := afSplit(mkey, hasher(), V2Stripes)
+ if err != nil {
+ return nil, nil, -1, fmt.Errorf("splitting: %w", err)
+ }
+ striped, err := v2encrypt(cipher, 0, key, split, V1SectorSize, false)
+ if err != nil {
+ return nil, nil, -1, fmt.Errorf("encrypting: %w", err)
+ }
+ stripes = append(stripes, striped)
+ keyslot := V2JSONKeyslot{
+ Type: "luks2",
+ KeySize: len(mkey),
+ Area: V2JSONArea{
+ Type: "raw",
+ Offset: 10000000, // gets updated later
+ Size: int64(roundUpToMultiple(len(striped), V2AlignKeyslots)),
+ V2JSONAreaRaw: &V2JSONAreaRaw{
+ Encryption: cipher,
+ KeySize: len(key),
+ },
+ },
+ Priority: &priority,
+ V2JSONKeyslotLUKS2: &V2JSONKeyslotLUKS2{
+ AF: V2JSONAF{
+ Type: "luks1",
+ V2JSONAFLUKS1: &V2JSONAFLUKS1{
+ Stripes: V2Stripes,
+ Hash: h1.ChecksumAlgorithm(),
+ },
+ },
+ Kdf: V2JSONKdf{
+ Type: "argon2i",
+ Salt: keyslotSalt,
+ V2JSONKdfArgon2i: &V2JSONKdfArgon2i{
+ Time: timeCost,
+ Memory: memoryCost,
+ CPUs: threadsCost,
+ },
+ },
+ },
+ }
+ keyslots = append(keyslots, keyslot)
+ digest0.Keyslots = append(digest0.Keyslots, strconv.Itoa(i))
+ }
+
+ segment0 := V2JSONSegment{
+ Type: "crypt",
+ Offset: "10000000", // gets updated later
+ Size: "dynamic",
+ V2JSONSegmentCrypt: &V2JSONSegmentCrypt{
+ IVTweak: 0,
+ Encryption: cipher,
+ SectorSize: payloadSectorSize,
+ },
+ }
+
+ j := V2JSON{
+ Config: V2JSONConfig{},
+ Keyslots: map[string]V2JSONKeyslot{},
+ Digests: map[string]V2JSONDigest{},
+ Segments: map[string]V2JSONSegment{},
+ Tokens: map[string]V2JSONToken{},
+ }
+rebuild:
+ j.Digests["0"] = digest0
+ j.Segments["0"] = segment0
+ encodedJSON, err := json.Marshal(j)
+ if err != nil {
+ return nil, nil, -1, err
+ }
+ headerPlusPaddedJsonSize, err := roundHeaderSize(int(V2SectorSize) /* binary header */ + len(encodedJSON) + 1)
+ if err != nil {
+ return nil, nil, -1, err
+ }
+ if j.Config.JsonSize != headerPlusPaddedJsonSize-V2SectorSize {
+ j.Config.JsonSize = headerPlusPaddedJsonSize - V2SectorSize
+ goto rebuild
+ }
+
+ if h1.HeaderSize() != uint64(headerPlusPaddedJsonSize) {
+ h1.SetHeaderSize(uint64(headerPlusPaddedJsonSize))
+ h2.SetHeaderSize(uint64(headerPlusPaddedJsonSize))
+ h1.SetHeaderOffset(0)
+ h2.SetHeaderOffset(uint64(headerPlusPaddedJsonSize))
+ goto rebuild
+ }
+
+ keyslotsOffset := h2.HeaderOffset() * 2
+ maxKeys := len(password)
+ if maxKeys < 64 {
+ maxKeys = 64
+ }
+ for i := 0; i < len(password); i++ {
+ oldOffset := keyslots[i].Area.Offset
+ keyslots[i].Area.Offset = int64(keyslotsOffset) + int64(roundUpToMultiple(len(mkey)*V2Stripes, V2AlignKeyslots))*int64(i)
+ j.Keyslots[strconv.Itoa(i)] = keyslots[i]
+ if keyslots[i].Area.Offset != oldOffset {
+ goto rebuild
+ }
+ }
+ keyslotsSize := roundUpToMultiple(len(mkey)*V2Stripes, V2AlignKeyslots) * maxKeys
+ if j.Config.KeyslotsSize != keyslotsSize {
+ j.Config.KeyslotsSize = keyslotsSize
+ goto rebuild
+ }
+
+ segmentOffsetInt := roundUpToMultiple(int(keyslotsOffset)+j.Config.KeyslotsSize, V2SectorSize)
+ segmentOffset := strconv.Itoa(segmentOffsetInt)
+ if segment0.Offset != segmentOffset {
+ segment0.Offset = segmentOffset
+ goto rebuild
+ }
+
+ d1 := hasher()
+ h1.SetChecksum(nil)
+ d1.Write(h1[:])
+ d1.Write(encodedJSON)
+ zeropad := make([]byte, headerPlusPaddedJsonSize-len(h1)-len(encodedJSON))
+ d1.Write(zeropad)
+ h1.SetChecksum(d1.Sum(nil))
+ d2 := hasher()
+ h2.SetChecksum(nil)
+ d2.Write(h2[:])
+ d2.Write(encodedJSON)
+ d1.Write(zeropad)
+ h2.SetChecksum(d2.Sum(nil))
+
+ head := make([]byte, segmentOffsetInt)
+ copy(head, h1[:])
+ copy(head[V2SectorSize:], encodedJSON)
+ copy(head[h2.HeaderOffset():], h2[:])
+ copy(head[h2.HeaderOffset()+V2SectorSize:], encodedJSON)
+ for i := 0; i < len(password); i++ {
+ iAsString := strconv.Itoa(i)
+ copy(head[j.Keyslots[iAsString].Area.Offset:], stripes[i])
+ }
+ ivTweak := 0
+ encryptStream := func(plaintext []byte) ([]byte, error) {
+ ciphertext, err := v2encrypt(cipher, ivTweak, mkey, plaintext, payloadSectorSize, true)
+ ivTweak += len(plaintext) / payloadSectorSize
+ return ciphertext, err
+ }
+ return head, encryptStream, segment0.SectorSize, nil
+}
diff --git a/encryption.go b/encryption.go
new file mode 100644
index 0000000..242bceb
--- /dev/null
+++ b/encryption.go
@@ -0,0 +1,572 @@
+package luksy
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "strings"
+
+ "github.com/aead/serpent"
+ "golang.org/x/crypto/cast5"
+ "golang.org/x/crypto/ripemd160"
+ "golang.org/x/crypto/twofish"
+ "golang.org/x/crypto/xts"
+)
+
+func v1encrypt(cipherName, cipherMode string, ivTweak int, key []byte, plaintext []byte, sectorSize int, bulk bool) ([]byte, error) {
+ var err error
+ var newBlockCipher func([]byte) (cipher.Block, error)
+ ciphertext := make([]byte, len(plaintext))
+
+ switch cipherName {
+ case "aes":
+ newBlockCipher = aes.NewCipher
+ case "twofish":
+ newBlockCipher = func(key []byte) (cipher.Block, error) { return twofish.NewCipher(key) }
+ case "cast5":
+ newBlockCipher = func(key []byte) (cipher.Block, error) { return cast5.NewCipher(key) }
+ case "serpent":
+ newBlockCipher = serpent.NewCipher
+ default:
+ return nil, fmt.Errorf("unsupported cipher %s", cipherName)
+ }
+ if sectorSize == 0 {
+ sectorSize = V1SectorSize
+ }
+ switch sectorSize {
+ default:
+ return nil, fmt.Errorf("invalid sector size %d", sectorSize)
+ case 512, 1024, 2048, 4096:
+ }
+
+ switch cipherMode {
+ case "ecb":
+ cipher, err := newBlockCipher(key)
+ if err != nil {
+ return nil, fmt.Errorf("initializing encryption: %w", err)
+ }
+ for processed := 0; processed < len(plaintext); processed += cipher.BlockSize() {
+ blockLeft := sectorSize
+ if processed+blockLeft > len(plaintext) {
+ blockLeft = len(plaintext) - processed
+ }
+ cipher.Encrypt(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft])
+ }
+ case "cbc-plain":
+ block, err := newBlockCipher(key)
+ if err != nil {
+ return nil, fmt.Errorf("initializing encryption: %w", err)
+ }
+ for processed := 0; processed < len(plaintext); processed += sectorSize {
+ blockLeft := sectorSize
+ if processed+blockLeft > len(plaintext) {
+ blockLeft = len(plaintext) - processed
+ }
+ ivValue := processed/sectorSize + ivTweak
+ if bulk { // iv_large_sectors is not being used
+ ivValue *= sectorSize / V1SectorSize
+ }
+ iv0 := make([]byte, block.BlockSize())
+ binary.LittleEndian.PutUint32(iv0, uint32(ivValue))
+ cipher := cipher.NewCBCEncrypter(block, iv0)
+ cipher.CryptBlocks(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft])
+ }
+ case "cbc-plain64":
+ block, err := newBlockCipher(key)
+ if err != nil {
+ return nil, fmt.Errorf("initializing encryption: %w", err)
+ }
+ for processed := 0; processed < len(plaintext); processed += sectorSize {
+ blockLeft := sectorSize
+ if processed+blockLeft > len(plaintext) {
+ blockLeft = len(plaintext) - processed
+ }
+ ivValue := processed/sectorSize + ivTweak
+ if bulk { // iv_large_sectors is not being used
+ ivValue *= sectorSize / V1SectorSize
+ }
+ iv0 := make([]byte, block.BlockSize())
+ binary.LittleEndian.PutUint64(iv0, uint64(ivValue))
+ cipher := cipher.NewCBCEncrypter(block, iv0)
+ cipher.CryptBlocks(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft])
+ }
+ case "cbc-essiv:sha256":
+ hasherName := strings.TrimPrefix(cipherMode, "cbc-essiv:")
+ hasher, err := hasherByName(hasherName)
+ if err != nil {
+ return nil, fmt.Errorf("initializing encryption using hash %s: %w", hasherName, err)
+ }
+ h := hasher()
+ h.Write(key)
+ makeiv, err := newBlockCipher(h.Sum(nil))
+ if err != nil {
+ return nil, fmt.Errorf("initializing encryption: %w", err)
+ }
+ block, err := newBlockCipher(key)
+ if err != nil {
+ return nil, fmt.Errorf("initializing encryption: %w", err)
+ }
+ for processed := 0; processed < len(plaintext); processed += sectorSize {
+ blockLeft := sectorSize
+ if processed+blockLeft > len(plaintext) {
+ blockLeft = len(plaintext) - processed
+ }
+ ivValue := (processed/sectorSize + ivTweak)
+ if bulk { // iv_large_sectors is not being used
+ ivValue *= sectorSize / V1SectorSize
+ }
+ plain0 := make([]byte, makeiv.BlockSize())
+ binary.LittleEndian.PutUint64(plain0, uint64(ivValue))
+ iv0 := make([]byte, makeiv.BlockSize())
+ makeiv.Encrypt(iv0, plain0)
+ cipher := cipher.NewCBCEncrypter(block, iv0)
+ cipher.CryptBlocks(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft])
+ }
+ case "xts-plain":
+ cipher, err := xts.NewCipher(newBlockCipher, key)
+ if err != nil {
+ return nil, fmt.Errorf("initializing encryption: %w", err)
+ }
+ for processed := 0; processed < len(plaintext); processed += sectorSize {
+ blockLeft := sectorSize
+ if processed+blockLeft > len(plaintext) {
+ blockLeft = len(plaintext) - processed
+ }
+ sector := uint64(processed/sectorSize + ivTweak)
+ if bulk { // iv_large_sectors is not being used
+ sector *= uint64(sectorSize / V1SectorSize)
+ }
+ sector = sector % 0x100000000
+ cipher.Encrypt(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft], sector)
+ }
+ case "xts-plain64":
+ cipher, err := xts.NewCipher(newBlockCipher, key)
+ if err != nil {
+ return nil, fmt.Errorf("initializing encryption: %w", err)
+ }
+ for processed := 0; processed < len(plaintext); processed += sectorSize {
+ blockLeft := sectorSize
+ if processed+blockLeft > len(plaintext) {
+ blockLeft = len(plaintext) - processed
+ }
+ sector := uint64(processed/sectorSize + ivTweak)
+ if bulk { // iv_large_sectors is not being used
+ sector *= uint64(sectorSize / V1SectorSize)
+ }
+ cipher.Encrypt(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft], sector)
+ }
+ default:
+ return nil, fmt.Errorf("unsupported cipher mode %s", cipherMode)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("cipher error: %w", err)
+ }
+ return ciphertext, nil
+}
+
+func v1decrypt(cipherName, cipherMode string, ivTweak int, key []byte, ciphertext []byte, sectorSize int, bulk bool) ([]byte, error) {
+ var err error
+ var newBlockCipher func([]byte) (cipher.Block, error)
+ plaintext := make([]byte, len(ciphertext))
+
+ switch cipherName {
+ case "aes":
+ newBlockCipher = aes.NewCipher
+ case "twofish":
+ newBlockCipher = func(key []byte) (cipher.Block, error) { return twofish.NewCipher(key) }
+ case "cast5":
+ newBlockCipher = func(key []byte) (cipher.Block, error) { return cast5.NewCipher(key) }
+ case "serpent":
+ newBlockCipher = serpent.NewCipher
+ default:
+ return nil, fmt.Errorf("unsupported cipher %s", cipherName)
+ }
+ if sectorSize == 0 {
+ sectorSize = V1SectorSize
+ }
+ switch sectorSize {
+ default:
+ return nil, fmt.Errorf("invalid sector size %d", sectorSize)
+ case 512, 1024, 2048, 4096:
+ }
+
+ switch cipherMode {
+ case "ecb":
+ cipher, err := newBlockCipher(key)
+ if err != nil {
+ return nil, fmt.Errorf("initializing decryption: %w", err)
+ }
+ for processed := 0; processed < len(ciphertext); processed += cipher.BlockSize() {
+ blockLeft := sectorSize
+ if processed+blockLeft > len(ciphertext) {
+ blockLeft = len(ciphertext) - processed
+ }
+ cipher.Decrypt(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft])
+ }
+ case "cbc-plain":
+ block, err := newBlockCipher(key)
+ if err != nil {
+ return nil, fmt.Errorf("initializing decryption: %w", err)
+ }
+ for processed := 0; processed < len(plaintext); processed += sectorSize {
+ blockLeft := sectorSize
+ if processed+blockLeft > len(plaintext) {
+ blockLeft = len(plaintext) - processed
+ }
+ ivValue := processed/sectorSize + ivTweak
+ if bulk { // iv_large_sectors is not being used
+ ivValue *= sectorSize / V1SectorSize
+ }
+ iv0 := make([]byte, block.BlockSize())
+ binary.LittleEndian.PutUint32(iv0, uint32(ivValue))
+ cipher := cipher.NewCBCDecrypter(block, iv0)
+ cipher.CryptBlocks(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft])
+ }
+ case "cbc-plain64":
+ block, err := newBlockCipher(key)
+ if err != nil {
+ return nil, fmt.Errorf("initializing decryption: %w", err)
+ }
+ for processed := 0; processed < len(plaintext); processed += sectorSize {
+ blockLeft := sectorSize
+ if processed+blockLeft > len(plaintext) {
+ blockLeft = len(plaintext) - processed
+ }
+ ivValue := processed/sectorSize + ivTweak
+ if bulk { // iv_large_sectors is not being used
+ ivValue *= sectorSize / V1SectorSize
+ }
+ iv0 := make([]byte, block.BlockSize())
+ binary.LittleEndian.PutUint64(iv0, uint64(ivValue))
+ cipher := cipher.NewCBCDecrypter(block, iv0)
+ cipher.CryptBlocks(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft])
+ }
+ case "cbc-essiv:sha256":
+ hasherName := strings.TrimPrefix(cipherMode, "cbc-essiv:")
+ hasher, err := hasherByName(hasherName)
+ if err != nil {
+ return nil, fmt.Errorf("initializing decryption using hash %s: %w", hasherName, err)
+ }
+ h := hasher()
+ h.Write(key)
+ makeiv, err := newBlockCipher(h.Sum(nil))
+ if err != nil {
+ return nil, fmt.Errorf("initializing decryption: %w", err)
+ }
+ block, err := newBlockCipher(key)
+ if err != nil {
+ return nil, fmt.Errorf("initializing decryption: %w", err)
+ }
+ for processed := 0; processed < len(plaintext); processed += sectorSize {
+ blockLeft := sectorSize
+ if processed+blockLeft > len(plaintext) {
+ blockLeft = len(plaintext) - processed
+ }
+ ivValue := (processed/sectorSize + ivTweak)
+ if bulk { // iv_large_sectors is not being used
+ ivValue *= sectorSize / V1SectorSize
+ }
+ plain0 := make([]byte, makeiv.BlockSize())
+ binary.LittleEndian.PutUint64(plain0, uint64(ivValue))
+ iv0 := make([]byte, makeiv.BlockSize())
+ makeiv.Encrypt(iv0, plain0)
+ cipher := cipher.NewCBCDecrypter(block, iv0)
+ cipher.CryptBlocks(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft])
+ }
+ case "xts-plain":
+ cipher, err := xts.NewCipher(newBlockCipher, key)
+ if err != nil {
+ return nil, fmt.Errorf("initializing decryption: %w", err)
+ }
+ for processed := 0; processed < len(ciphertext); processed += sectorSize {
+ blockLeft := sectorSize
+ if processed+blockLeft > len(ciphertext) {
+ blockLeft = len(ciphertext) - processed
+ }
+ sector := uint64(processed/sectorSize + ivTweak)
+ if bulk { // iv_large_sectors is not being used
+ sector *= uint64(sectorSize / V1SectorSize)
+ }
+ sector = sector % 0x100000000
+ cipher.Decrypt(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft], sector)
+ }
+ case "xts-plain64":
+ cipher, err := xts.NewCipher(newBlockCipher, key)
+ if err != nil {
+ return nil, fmt.Errorf("initializing decryption: %w", err)
+ }
+ for processed := 0; processed < len(ciphertext); processed += sectorSize {
+ blockLeft := sectorSize
+ if processed+blockLeft > len(ciphertext) {
+ blockLeft = len(ciphertext) - processed
+ }
+ sector := uint64(processed/sectorSize + ivTweak)
+ if bulk { // iv_large_sectors is not being used
+ sector *= uint64(sectorSize / V1SectorSize)
+ }
+ cipher.Decrypt(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft], sector)
+ }
+ default:
+ return nil, fmt.Errorf("unsupported cipher mode %s", cipherMode)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("cipher error: %w", err)
+ }
+ return plaintext, nil
+}
+
+func v2encrypt(cipherSuite string, ivTweak int, key []byte, ciphertext []byte, sectorSize int, bulk bool) ([]byte, error) {
+ var cipherName, cipherMode string
+ switch {
+ default:
+ cipherSpec := strings.SplitN(cipherSuite, "-", 2)
+ if len(cipherSpec) < 2 {
+ return nil, fmt.Errorf("unrecognized cipher suite %q", cipherSuite)
+ }
+ cipherName = cipherSpec[0]
+ cipherMode = cipherSpec[1]
+ }
+ return v1encrypt(cipherName, cipherMode, ivTweak, key, ciphertext, sectorSize, bulk)
+}
+
+func v2decrypt(cipherSuite string, ivTweak int, key []byte, ciphertext []byte, sectorSize int, bulk bool) ([]byte, error) {
+ var cipherName, cipherMode string
+ switch {
+ default:
+ cipherSpec := strings.SplitN(cipherSuite, "-", 2)
+ if len(cipherSpec) < 2 {
+ return nil, fmt.Errorf("unrecognized cipher suite %q", cipherSuite)
+ }
+ cipherName = cipherSpec[0]
+ cipherMode = cipherSpec[1]
+ }
+ return v1decrypt(cipherName, cipherMode, ivTweak, key, ciphertext, sectorSize, bulk)
+}
+
+func diffuse(key []byte, h hash.Hash) []byte {
+ sum := make([]byte, len(key))
+ counter := uint32(0)
+ for summed := 0; summed < len(key); summed += h.Size() {
+ h.Reset()
+ var buf [4]byte
+ binary.BigEndian.PutUint32(buf[:], counter)
+ h.Write(buf[:])
+ needed := len(key) - summed
+ if needed > h.Size() {
+ needed = h.Size()
+ }
+ h.Write(key[summed : summed+needed])
+ partial := h.Sum(nil)
+ copy(sum[summed:summed+needed], partial)
+ counter++
+ }
+ return sum
+}
+
+func afMerge(splitKey []byte, h hash.Hash, keysize int, stripes int) ([]byte, error) {
+ if len(splitKey) != keysize*stripes {
+ return nil, fmt.Errorf("expected %d af bytes, got %d", keysize*stripes, len(splitKey))
+ }
+ d := make([]byte, keysize)
+ for i := 0; i < stripes-1; i++ {
+ for j := 0; j < keysize; j++ {
+ d[j] = d[j] ^ splitKey[i*keysize+j]
+ }
+ d = diffuse(d, h)
+ }
+ for j := 0; j < keysize; j++ {
+ d[j] = d[j] ^ splitKey[(stripes-1)*keysize+j]
+ }
+ return d, nil
+}
+
+func afSplit(key []byte, h hash.Hash, stripes int) ([]byte, error) {
+ keysize := len(key)
+ s := make([]byte, keysize*stripes)
+ d := make([]byte, keysize)
+ n, err := rand.Read(s[0 : (keysize-1)*stripes])
+ if err != nil {
+ return nil, err
+ }
+ if n != (keysize-1)*stripes {
+ return nil, fmt.Errorf("short read when attempting to read random data: %d < %d", n, (keysize-1)*stripes)
+ }
+ for i := 0; i < stripes-1; i++ {
+ for j := 0; j < keysize; j++ {
+ d[j] = d[j] ^ s[i*keysize+j]
+ }
+ d = diffuse(d, h)
+ }
+ for j := 0; j < keysize; j++ {
+ s[(stripes-1)*keysize+j] = d[j] ^ key[j]
+ }
+ return s, nil
+}
+
+func roundUpToMultiple(i, factor int) int {
+ if i < 0 {
+ return 0
+ }
+ if factor < 1 {
+ return i
+ }
+ return i + ((factor - (i % factor)) % factor)
+}
+
+func roundDownToMultiple(i, factor int) int {
+ if i < 0 {
+ return 0
+ }
+ if factor < 1 {
+ return i
+ }
+ return i - (i % factor)
+}
+
+func hasherByName(name string) (func() hash.Hash, error) {
+ switch name {
+ case "sha1":
+ return sha1.New, nil
+ case "sha256":
+ return sha256.New, nil
+ case "sha512":
+ return sha512.New, nil
+ case "ripemd160":
+ return ripemd160.New, nil
+ default:
+ return nil, fmt.Errorf("unsupported digest algorithm %q", name)
+ }
+}
+
+type wrapper struct {
+ fn func(plaintext []byte) ([]byte, error)
+ blockSize int
+ buf []byte
+ buffered int
+ processed int
+ reader io.Reader
+ eof bool
+ writer io.Writer
+}
+
+func (w *wrapper) partialWrite() error {
+ if w.buffered-w.processed >= w.blockSize {
+ toProcess := roundDownToMultiple(w.buffered-w.processed, w.blockSize)
+ processed, err := w.fn(w.buf[w.processed : w.processed+toProcess])
+ if err != nil {
+ return err
+ }
+ nProcessed := copy(w.buf[w.processed:], processed)
+ w.processed += nProcessed
+ }
+ if w.processed >= w.blockSize {
+ nWritten, err := w.writer.Write(w.buf[:w.processed])
+ if err != nil {
+ return err
+ }
+ copy(w.buf, w.buf[nWritten:w.buffered])
+ w.buffered -= nWritten
+ w.processed -= nWritten
+ if w.processed != 0 {
+ return fmt.Errorf("short write: %d != %d", nWritten, nWritten+w.processed)
+ }
+ }
+ return nil
+}
+
+func (w *wrapper) Write(buf []byte) (int, error) {
+ n := 0
+ for n < len(buf) {
+ nBuffered := copy(w.buf[w.buffered:], buf[n:])
+ w.buffered += nBuffered
+ n += nBuffered
+ if err := w.partialWrite(); err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
+
+func (w *wrapper) Read(buf []byte) (int, error) {
+ n := 0
+ for n < len(buf) {
+ if !w.eof {
+ nRead, err := w.reader.Read(w.buf[w.buffered:])
+ if err != nil {
+ if !errors.Is(err, io.EOF) {
+ w.buffered += nRead
+ return n, err
+ }
+ w.eof = true
+ }
+ w.buffered += nRead
+ }
+ if w.buffered == 0 && w.eof {
+ return n, io.EOF
+ }
+ if w.buffered-w.processed >= w.blockSize {
+ toProcess := roundDownToMultiple(w.buffered-w.processed, w.blockSize)
+ processed, err := w.fn(w.buf[w.processed : w.processed+toProcess])
+ if err != nil {
+ return n, err
+ }
+ nProcessed := copy(w.buf[w.processed:], processed)
+ w.processed += nProcessed
+ }
+ nRead := copy(buf[n:], w.buf[:w.processed])
+ n += nRead
+ copy(w.buf, w.buf[nRead:w.buffered])
+ w.processed -= nRead
+ w.buffered -= nRead
+ if w.buffered-w.processed < w.blockSize {
+ break
+ }
+ }
+ return n, nil
+}
+
+func (w *wrapper) Close() error {
+ if w.writer != nil {
+ if w.buffered%w.blockSize != 0 {
+ nPadding := w.blockSize - w.buffered%w.blockSize
+ nWritten, err := w.Write(make([]byte, nPadding))
+ if err != nil {
+ return fmt.Errorf("flushing write: %v", err)
+ }
+ if nWritten < nPadding {
+ return fmt.Errorf("flushing write: %d != %d", nPadding, nWritten)
+ }
+ }
+ }
+ return nil
+}
+
+// EncryptWriter creates an io.WriteCloser which buffers writes through an
+// encryption function, transforming and writing multiples of the blockSize.
+// After writing a final block, the returned writer should be closed.
+// If only a partial block has been written when Close() is called, a final
+// block with its length padded with zero bytes will be transformed and
+// written.
+func EncryptWriter(fn func(plaintext []byte) ([]byte, error), writer io.Writer, blockSize int) io.WriteCloser {
+ bufferSize := roundUpToMultiple(1024*1024, blockSize)
+ return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), writer: writer}
+}
+
+// DecryptReader creates an io.ReadCloser which buffers reads through a
+// decryption function, decrypting and returning multiples of the blockSize
+// until it reaches the end of the file. When data will no longer be read, the
+// returned reader should be closed.
+func DecryptReader(fn func(ciphertext []byte) ([]byte, error), reader io.Reader, blockSize int) io.ReadCloser {
+ bufferSize := roundUpToMultiple(1024*1024, blockSize)
+ return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), reader: reader}
+}
diff --git a/encryption_test.go b/encryption_test.go
new file mode 100644
index 0000000..b5fc7c7
--- /dev/null
+++ b/encryption_test.go
@@ -0,0 +1,309 @@
+package luksy
+
+import (
+ "crypto/rand"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/crypto/ripemd160"
+)
+
+var (
+ _ io.Writer = &wrapper{}
+ _ io.Reader = &wrapper{}
+)
+
+func Test_HeaderSizes(t *testing.T) {
+ assert.Equal(t, 592, v1HeaderStructSize, "BUG: v1 header size is off")
+ assert.Equal(t, 4096, v2HeaderStructSize, "BUG: v2 header size is off")
+}
+
+func Test_AFroundtrip(t *testing.T) {
+ type hashmaker func() hash.Hash
+ hashes := map[string]hashmaker{
+ "sha1": sha1.New,
+ "sha256": sha256.New,
+ "sha512": sha512.New,
+ "ripemd160": ripemd160.New,
+ }
+ for _, stripes := range []int{2, 4, 1000, 4000} {
+ for hashName, hashMaker := range hashes {
+ t.Run(fmt.Sprintf("%s:%d", hashName, stripes), func(t *testing.T) {
+ h := hashMaker
+ key := make([]byte, 32)
+ n, err := rand.Read(key)
+ require.Nil(t, err, "unexpected error reading random data")
+ require.Equal(t, len(key), n, "short read while reading random data")
+
+ split, err := afSplit(key, h(), 4000)
+ require.Nil(t, err, "unexpected error splitting key")
+
+ recovered, err := afMerge(split, h(), len(key), 4000)
+ require.Nil(t, err, "unexpected error merging key")
+
+ assert.Equal(t, key, recovered, "data was changed")
+ })
+ }
+ }
+}
+
+func Test_enc_roundtrip(t *testing.T) {
+ type testCases struct {
+ cipher, mode string
+ keysize int
+ datasize int
+ }
+ for _, testCase := range []testCases{
+ {"aes", "ecb", 16, 16},
+ {"aes", "ecb", 16, 256},
+ {"aes", "ecb", 16, 2048},
+ {"aes", "ecb", 16, 65536},
+ {"aes", "ecb", 24, 16},
+ {"aes", "ecb", 24, 256},
+ {"aes", "ecb", 24, 2048},
+ {"aes", "ecb", 24, 65536},
+ {"aes", "ecb", 32, 16},
+ {"aes", "ecb", 32, 256},
+ {"aes", "ecb", 32, 2048},
+ {"aes", "ecb", 32, 65536},
+ {"aes", "cbc-plain", 16, 256},
+ {"aes", "cbc-plain", 16, 2048},
+ {"aes", "cbc-plain", 16, 65536},
+ {"aes", "cbc-plain64", 16, 256},
+ {"aes", "cbc-plain64", 16, 2048},
+ {"aes", "cbc-plain64", 16, 65536},
+ {"aes", "cbc-plain", 32, 256},
+ {"aes", "cbc-plain", 32, 2048},
+ {"aes", "cbc-plain", 32, 65536},
+ {"aes", "cbc-plain64", 32, 256},
+ {"aes", "cbc-plain64", 32, 2048},
+ {"aes", "cbc-plain64", 32, 65536},
+ {"aes", "cbc-essiv:sha256", 32, 256},
+ {"aes", "cbc-essiv:sha256", 32, 2048},
+ {"aes", "cbc-essiv:sha256", 32, 65536},
+ {"aes", "xts-plain", 64, 256},
+ {"aes", "xts-plain", 64, 2048},
+ {"aes", "xts-plain", 64, 65536},
+ {"aes", "xts-plain64", 64, 256},
+ {"aes", "xts-plain64", 64, 2048},
+ {"aes", "xts-plain64", 64, 65536},
+ {"serpent", "xts-plain", 64, 256},
+ {"serpent", "xts-plain", 64, 2048},
+ {"serpent", "xts-plain", 64, 65536},
+ {"serpent", "xts-plain64", 64, 256},
+ {"serpent", "xts-plain64", 64, 2048},
+ {"serpent", "xts-plain64", 64, 65536},
+ {"twofish", "xts-plain", 64, 256},
+ {"twofish", "xts-plain", 64, 2048},
+ {"twofish", "xts-plain", 64, 65536},
+ {"twofish", "xts-plain64", 64, 256},
+ {"twofish", "xts-plain64", 64, 2048},
+ {"twofish", "xts-plain64", 64, 65536},
+ } {
+ t.Run(fmt.Sprintf("%s-%s-%d:%d", testCase.cipher, testCase.mode, testCase.keysize, testCase.datasize), func(t *testing.T) {
+ key := make([]byte, testCase.keysize)
+ n, err := rand.Read(key)
+ require.Nil(t, err, "unexpected error reading random data")
+ require.Equalf(t, len(key), n, "short read while reading random data: %d < %d", n, len(key))
+ data := make([]byte, testCase.datasize)
+ for i := 0; i < len(data); i++ {
+ data[i] = uint8(i & 0xff)
+ }
+ encrypted, err := v1encrypt(testCase.cipher, testCase.mode, 0, key, data, 0, false)
+ require.Nil(t, err, "unexpected error encrypting data")
+ decrypted, err := v1decrypt(testCase.cipher, testCase.mode, 0, key, encrypted, 0, false)
+ require.Nil(t, err, "unexpected error decrypting data")
+ assert.Equal(t, data, decrypted, "data was altered somewhere")
+ })
+ }
+}
+
+func Test_roundUpToMultiple(t *testing.T) {
+ type testCases struct {
+ input, factor, result int
+ }
+ for _, testCase := range []testCases{
+ {1, 2048, 2048},
+ {2048, 2048, 2048},
+ {4095, 2048, 4096},
+ {4096, 2048, 4096},
+ {4097, 2048, 6144},
+ } {
+ t.Run(fmt.Sprintf("%d~^~%d", testCase.input, testCase.factor), func(t *testing.T) {
+ assert.Equal(t, testCase.result, roundUpToMultiple(testCase.input, testCase.factor))
+ })
+ }
+}
+
+func Test_roundDownToMultiple(t *testing.T) {
+ type testCases struct {
+ input, factor, result int
+ }
+ for _, testCase := range []testCases{
+ {1, 2048, 0},
+ {2048, 2048, 2048},
+ {4095, 2048, 2048},
+ {4096, 2048, 4096},
+ {4097, 2048, 4096},
+ } {
+ t.Run(fmt.Sprintf("%d~v~%d", testCase.input, testCase.factor), func(t *testing.T) {
+ assert.Equal(t, testCase.result, roundDownToMultiple(testCase.input, testCase.factor))
+ })
+ }
+}
+
+func TestWrappers(t *testing.T) {
+ for _, sectorSize := range []int{0, 512, 4096} {
+ var version string
+ switch sectorSize {
+ case 0:
+ version = "v1"
+ default:
+ version = fmt.Sprintf("v2,sector=%d", sectorSize)
+ }
+ for payloadIndex, payloadLength := range []int{0x80, 0x100, 0x1000, 0x4000, 0x10000, 0x100000} {
+ for _, trailerLength := range []int{0, 1, 0x1ff, 0x201, 0x1001} {
+ for _, chunkSize := range []int{0x123, 0x1234, 0x12345} {
+ if payloadIndex > 0 && chunkSize > payloadLength+trailerLength+sectorSize+512 {
+ continue
+ }
+ t.Run(fmt.Sprintf("%s,payload=%d,trailer=%d,chunk=%d", version, payloadLength, trailerLength, chunkSize), func(t *testing.T) {
+ password := t.Name()
+ buf := make([]byte, payloadLength)
+ n, err := rand.Read(buf)
+ require.NoError(t, err)
+ require.Equal(t, len(buf), n)
+
+ var header []byte
+ var encrypt func([]byte) ([]byte, error)
+ var blockSize int
+ switch sectorSize {
+ case 0:
+ header, encrypt, blockSize, err = EncryptV1([]string{password}, "")
+ default:
+ header, encrypt, blockSize, err = EncryptV2([]string{password}, "", sectorSize)
+ }
+ require.NoError(t, err)
+ require.NotNil(t, header)
+ require.NotZero(t, blockSize)
+
+ tempdir := t.TempDir()
+ encryptedFile := filepath.Join(tempdir, "encrypted")
+
+ f, err := os.Create(encryptedFile)
+ require.NoError(t, err)
+ writeCloser := io.WriteCloser(f)
+ n, err = writeCloser.Write(header)
+ require.NoError(t, err)
+ require.Equal(t, len(header), n)
+ encrypter := EncryptWriter(encrypt, writeCloser, blockSize)
+ var nWritten int
+ for offset := 0; offset < len(buf); offset += chunkSize {
+ chunkLength := chunkSize
+ if offset+chunkLength > len(buf) {
+ chunkLength = len(buf) - offset
+ }
+ written, err := encrypter.Write(buf[offset : offset+chunkLength])
+ require.NoError(t, err)
+ nWritten += written
+ }
+ require.Equal(t, len(buf), nWritten)
+ err = encrypter.Close()
+ require.NoError(t, err)
+ trailer := make([]byte, trailerLength)
+ copy(trailer, "TEST")
+ nWritten, err = writeCloser.Write(trailer)
+ require.NoError(t, err)
+ require.Equal(t, len(trailer), nWritten)
+
+ f, err = os.Open(encryptedFile)
+ require.NoError(t, err)
+ v1header, v2headerA, v2headerB, v2json, err := ReadHeaders(f, ReadHeaderOptions{})
+ require.NoError(t, err)
+
+ var decrypt func([]byte) ([]byte, error)
+ var payloadOffset int64
+ var payloadLength int64
+ switch sectorSize {
+ case 0:
+ require.NotNil(t, v1header)
+ _, _, _, _, err = v1header.Decrypt("", f)
+ assert.Error(t, err)
+
+ decrypt, blockSize, payloadOffset, payloadLength, err = v1header.Decrypt(password, f)
+ require.NoError(t, err)
+ require.NotZero(t, blockSize)
+ require.NotZero(t, payloadOffset)
+ require.NotZero(t, payloadLength)
+ assert.GreaterOrEqual(t, payloadLength, int64(len(buf)))
+ default:
+ require.NotNil(t, v2headerA)
+ require.NotNil(t, v2headerB)
+ require.NotNil(t, v2json)
+ _, _, _, _, err = v2headerA.Decrypt("", f, *v2json)
+ assert.Error(t, err)
+ _, _, _, _, err = v2headerB.Decrypt("", f, *v2json)
+ assert.Error(t, err)
+
+ decrypt, blockSize, payloadOffset, payloadLength, err = v2headerA.Decrypt(password, f, *v2json)
+ require.NoError(t, err)
+ require.NotZero(t, blockSize)
+ require.NotZero(t, payloadOffset)
+ require.NotZero(t, payloadLength)
+ assert.GreaterOrEqual(t, payloadLength, int64(len(buf)))
+ }
+
+ _, err = f.Seek(payloadOffset, io.SeekStart)
+ require.NoError(t, err)
+
+ decrypter := DecryptReader(decrypt, f, blockSize)
+ otherBuf := make([]byte, payloadLength)
+
+ var nRead int
+ var sawEOF bool
+ for offset := 0; offset < len(otherBuf); offset += chunkSize {
+ chunkLength := chunkSize
+ if offset+chunkLength > len(otherBuf) {
+ chunkLength = len(otherBuf) - offset
+ }
+ read, err := decrypter.Read(otherBuf[offset : offset+chunkLength])
+ if err != nil {
+ if !errors.Is(err, io.EOF) {
+ require.NoError(t, err)
+ }
+ sawEOF = true
+ }
+ nRead += read
+ if nRead == 0 && sawEOF {
+ break
+ }
+ }
+ err = decrypter.Close()
+ require.NoError(t, err)
+ require.Equal(t, roundDownToMultiple(int(payloadLength), blockSize), nRead)
+ require.Equal(t, buf, otherBuf[:len(buf)])
+
+ _, err = f.Seek(-int64(len(trailer)), io.SeekEnd)
+ require.NoError(t, err)
+
+ otherTrailer := make([]byte, len(trailer))
+ nRead, err = f.Read(otherTrailer)
+ require.NoError(t, err)
+ require.Equal(t, len(trailer), nRead)
+ require.Equal(t, trailer, otherTrailer)
+ })
+ }
+ }
+ }
+ }
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..b2d826e
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,22 @@
+module github.com/containers/luksy
+
+go 1.20
+
+require (
+ github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6
+ github.com/google/uuid v1.3.1
+ github.com/sirupsen/logrus v1.9.3
+ github.com/spf13/cobra v1.7.0
+ github.com/stretchr/testify v1.8.4
+ golang.org/x/crypto v0.14.0
+ golang.org/x/term v0.13.0
+)
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ golang.org/x/sys v0.13.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..285ab7f
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,35 @@
+github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 h1:5L8Mj9Co9sJVgW3TpYk2gxGJnDjsYuboNTcRmbtGKGs=
+github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6/go.mod h1:3HgLJ9d18kXMLQlJvIY3+FszZYMxCz8WfE2MQ7hDY0w=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
+github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/luks.go b/luks.go
new file mode 100644
index 0000000..c584c5f
--- /dev/null
+++ b/luks.go
@@ -0,0 +1,75 @@
+package luksy
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+// ReadHeaderOptions can control some of what ReadHeaders() does.
+type ReadHeaderOptions struct{}
+
+// ReadHeaders reads LUKS headers from the specified file, returning either a
+// LUKSv1 header, or two LUKSv2 headers and a LUKSv2 JSON block, depending on
+// which format is detected.
+func ReadHeaders(f io.ReaderAt, options ReadHeaderOptions) (*V1Header, *V2Header, *V2Header, *V2JSON, error) {
+ var v1 V1Header
+ var v2a, v2b V2Header
+ n, err := f.ReadAt(v2a[:], 0)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+ if n != len(v2a) {
+ return nil, nil, nil, nil, fmt.Errorf("only able to read %d bytes - file truncated?", n)
+ }
+ if n, err = f.ReadAt(v1[:], 0); err != nil {
+ return nil, nil, nil, nil, err
+ }
+ if n != len(v1) {
+ return nil, nil, nil, nil, fmt.Errorf("only able to read %d bytes - file truncated?", n)
+ }
+ if v2a.Magic() != V2Magic1 {
+ return nil, nil, nil, nil, fmt.Errorf("internal error: magic mismatch in LUKS header (%q)", v2a.Magic())
+ }
+ switch v2a.Version() { // is it a v1 header, or the first v2 header?
+ case 1:
+ return &v1, nil, nil, nil, nil
+ case 2:
+ size := v2a.HeaderSize()
+ if size > 0x7fffffffffffffff {
+ return nil, nil, nil, nil, fmt.Errorf("unsupported header size while looking for second header")
+ }
+ if size < 4096 {
+ return nil, nil, nil, nil, fmt.Errorf("unsupported header size while looking for JSON data")
+ }
+ if n, err = f.ReadAt(v2b[:], int64(size)); err != nil || n != len(v2b) {
+ if err == nil && n != len(v2b) {
+ err = fmt.Errorf("short read: read only %d bytes, should have read %d", n, len(v2b))
+ }
+ return nil, nil, nil, nil, err
+ }
+ if v2b.Magic() != V2Magic2 {
+ return nil, nil, nil, nil, fmt.Errorf("internal error: magic mismatch in second LUKS header (%q)", v2b.Magic())
+ }
+ jsonSize := size - 4096
+ buf := make([]byte, jsonSize)
+ n, err = f.ReadAt(buf[:], 4096)
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("internal error: while reading JSON data: %w", err)
+ }
+ if n < 0 || uint64(n) != jsonSize {
+ return nil, nil, nil, nil, fmt.Errorf("internal error: short read while reading JSON data (wanted %d, got %d)", jsonSize, n)
+ }
+ var jsonData V2JSON
+ buf = bytes.TrimRightFunc(buf, func(r rune) bool { return r == 0 })
+ if err = json.Unmarshal(buf, &jsonData); err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("internal error: decoding JSON data: %w", err)
+ }
+ if uint64(jsonData.Config.JsonSize) != jsonSize {
+ return nil, nil, nil, nil, fmt.Errorf("internal error: JSON data size mismatch: (expected %d, used %d)", jsonData.Config.JsonSize, jsonSize)
+ }
+ return nil, &v2a, &v2b, &jsonData, nil
+ }
+ return nil, nil, nil, nil, fmt.Errorf("error reading LUKS header - magic identifier not found")
+}
diff --git a/tests/passwords.bats b/tests/passwords.bats
new file mode 100755
index 0000000..e32a077
--- /dev/null
+++ b/tests/passwords.bats
@@ -0,0 +1,238 @@
+#!/usr/bin/env bats
+
+luksy=${LUKSY:-${BATS_TEST_DIRNAME}/../luksy}
+
+function multiple_passwords() {
+ dd if=/dev/urandom bs=1M count=64 of=${BATS_TEST_TMPDIR}/plaintext status=none
+ local passwords
+ echo -n short > ${BATS_TEST_TMPDIR}/short
+ echo -n morethaneight > ${BATS_TEST_TMPDIR}/morethaneight
+ echo -n morethansixteenchars > ${BATS_TEST_TMPDIR}/morethansixteenchars
+ ${luksy} encrypt --password-file ${BATS_TEST_TMPDIR}/short --password-file ${BATS_TEST_TMPDIR}/morethaneight --password-file ${BATS_TEST_TMPDIR}/morethansixteenchars "$@" ${BATS_TEST_TMPDIR}/plaintext ${BATS_TEST_TMPDIR}/encrypted
+ for password in short morethaneight morethansixteenchars ; do
+ echo testing password: "${password}"
+ echo -n "${password}" | cryptsetup -q --test-passphrase --key-file - luksOpen ${BATS_TEST_TMPDIR}/encrypted
+ echo password: "${password}" ok
+ done
+ rm -f ${BATS_TEST_TMPDIR}/encrypted
+ rm -f ${BATS_TEST_TMPDIR}/plaintext
+}
+
+@test multiple-passwords-defaults-luks1 {
+ multiple_passwords --luks1
+}
+
+@test multiple-passwords-defaults-luks2 {
+ multiple_passwords
+}
+
+function passwords() {
+ dd if=/dev/urandom bs=1M count=64 of=${BATS_TEST_TMPDIR}/plaintext status=none
+ for password in short morethaneight morethansixteenchars ; do
+ echo testing password: "${password}"
+ echo -n "${password}" | ${luksy} encrypt --password-fd 0 "$@" ${BATS_TEST_TMPDIR}/plaintext ${BATS_TEST_TMPDIR}/encrypted
+ echo -n "${password}" | cryptsetup -q --test-passphrase --key-file - luksOpen ${BATS_TEST_TMPDIR}/encrypted
+ echo password: "${password}" ok
+ rm -f ${BATS_TEST_TMPDIR}/encrypted
+ done
+ rm -f ${BATS_TEST_TMPDIR}/plaintext
+}
+
+@test passwords-defaults-luks1 {
+ passwords --luks1
+}
+
+@test passwords-defaults-luks2 {
+ passwords
+}
+
+@test passwords-defaults-luks2-512 {
+ passwords --sector-size 512
+}
+
+@test passwords-aes-xts-plain32-luks1 {
+ passwords --cipher aes-xts-plain --luks1
+}
+
+@test passwords-aes-xts-plain32-luks2 {
+ passwords --cipher aes-xts-plain
+}
+
+@test passwords-aes-xts-plain32-luks2-512 {
+ passwords --cipher aes-xts-plain --sector-size 512
+}
+
+@test passwords-aes-xts-plain64-luks1 {
+ passwords --cipher aes-xts-plain64 --luks1
+}
+
+@test passwords-aes-xts-plain64-luks2 {
+ passwords --cipher aes-xts-plain64
+}
+
+@test passwords-aes-xts-plain64-luks2-512 {
+ passwords --cipher aes-xts-plain64 --sector-size 512
+}
+
+@test passwords-serpent-xts-plain64-luks1 {
+ passwords --cipher serpent-xts-plain64 --luks1
+}
+
+@test passwords-serpent-xts-plain64-luks2 {
+ passwords --cipher serpent-xts-plain64
+}
+
+@test passwords-serpent-xts-plain64-luks2-512 {
+ passwords --cipher serpent-xts-plain64 --sector-size 512
+}
+
+@test passwords-twofish-xts-plain64-luks1 {
+ passwords --cipher twofish-xts-plain64 --luks1
+}
+
+@test passwords-twofish-xts-plain64-luks2 {
+ passwords --cipher twofish-xts-plain64
+}
+
+@test passwords-twofish-xts-plain64-luks2-512 {
+ passwords --cipher twofish-xts-plain64 --sector-size 512
+}
+
+@test passwords-aes-cbc-plain32-luks1 {
+ passwords --cipher aes-cbc-plain --luks1
+}
+
+@test passwords-aes-cbc-plain32-luks2 {
+ passwords --cipher aes-cbc-plain
+}
+
+@test passwords-aes-cbc-plain32-luks2-512 {
+ passwords --cipher aes-cbc-plain --sector-size 512
+}
+
+@test passwords-aes-cbc-plain64-luks1 {
+ passwords --cipher aes-cbc-plain64 --luks1
+}
+
+@test passwords-aes-cbc-plain64-luks2 {
+ passwords --cipher aes-cbc-plain64
+}
+
+@test passwords-aes-cbc-plain64-luks2-512 {
+ passwords --cipher aes-cbc-plain64 --sector-size 512
+}
+
+@test passwords-aes-cbc-essiv:sha256-luks1 {
+ passwords --cipher aes-cbc-essiv:sha256 --luks1
+}
+
+@test passwords-aes-cbc-essiv:sha256-luks2 {
+ passwords --cipher aes-cbc-essiv:sha256
+}
+
+@test passwords-aes-cbc-essiv:sha256-luks2-512 {
+ passwords --cipher aes-cbc-essiv:sha256 --sector-size 512
+}
+
+function multiple_passwords_cryptsetup() {
+ dd if=/dev/urandom bs=1M count=64 of=${BATS_TEST_TMPDIR}/plaintext status=none
+ local passwords
+ fallocate -l 1G ${BATS_TEST_TMPDIR}/encrypted
+ echo -n short | cryptsetup luksFormat -q "$@" ${BATS_TEST_TMPDIR}/encrypted -
+ echo -n morethaneight > ${BATS_TEST_TMPDIR}/new-key
+ echo -n short | cryptsetup luksAddKey ${BATS_TEST_TMPDIR}/encrypted ${BATS_TEST_TMPDIR}/new-key
+ echo -n morethansixteenchars > ${BATS_TEST_TMPDIR}/new-key
+ echo -n short | cryptsetup luksAddKey ${BATS_TEST_TMPDIR}/encrypted ${BATS_TEST_TMPDIR}/new-key
+ for password in short morethaneight morethansixteenchars; do
+ echo testing password: "${password}"
+ echo -n "${password}" | ${luksy} decrypt --password-fd 0 ${BATS_TEST_TMPDIR}/encrypted
+ echo password: "${password}" ok
+ done
+ rm -f ${BATS_TEST_TMPDIR}/encrypted
+ rm -f ${BATS_TEST_TMPDIR}/plaintext
+}
+
+@test multiple-passwords-cryptsetup-defaults-luks1 {
+ multiple_passwords_cryptsetup --type luks1
+}
+
+@test multiple-passwords-cryptsetup-defaults-luks2 {
+ multiple_passwords_cryptsetup --type luks2
+}
+
+function passwords_cryptsetup() {
+ dd if=/dev/urandom bs=1M count=64 of=${BATS_TEST_TMPDIR}/plaintext status=none
+ for password in short morethaneight morethansixteenchars ; do
+ echo testing password: "${password}"
+ fallocate -l 1G ${BATS_TEST_TMPDIR}/encrypted
+ echo -n "${password}" | cryptsetup luksFormat -q "$@" ${BATS_TEST_TMPDIR}/encrypted -
+ echo -n "${password}" | ${luksy} decrypt --password-fd 0 ${BATS_TEST_TMPDIR}/encrypted
+ rm -f ${BATS_TEST_TMPDIR}/encrypted
+ echo password: "${password}" ok
+ done
+ rm -f ${BATS_TEST_TMPDIR}/plaintext
+}
+
+@test passwords-cryptsetup-defaults-luks1 {
+ passwords_cryptsetup --type luks1
+}
+
+@test passwords-cryptsetup-defaults-luks2 {
+ passwords_cryptsetup --type luks2
+}
+
+@test passwords-cryptsetup-aes-xts-plain32-luks1 {
+ passwords_cryptsetup --cipher aes-xts-plain --type luks1
+}
+
+@test passwords-cryptsetup-aes-xts-plain32-luks2 {
+ passwords_cryptsetup --cipher aes-xts-plain --type luks2
+}
+
+@test passwords-cryptsetup-aes-xts-plain64-luks1 {
+ passwords_cryptsetup --cipher aes-xts-plain64 --type luks1
+}
+
+@test passwords-cryptsetup-aes-xts-plain64-luks2 {
+ passwords_cryptsetup --cipher aes-xts-plain64 --type luks2
+}
+
+@test passwords-cryptsetup-serpent-xts-plain64-luks1 {
+ passwords_cryptsetup --cipher serpent-xts-plain64 --type luks1
+}
+
+@test passwords-cryptsetup-serpent-xts-plain64-luks2 {
+ passwords_cryptsetup --cipher serpent-xts-plain64 --type luks2
+}
+
+@test passwords-cryptsetup-twofish-xts-plain64-luks1 {
+ passwords_cryptsetup --cipher twofish-xts-plain64 --type luks1
+}
+
+@test passwords-cryptsetup-twofish-xts-plain64-luks2 {
+ passwords_cryptsetup --cipher twofish-xts-plain64 --type luks2
+}
+
+@test passwords-cryptsetup-aes-cbc-plain32-luks1 {
+ passwords_cryptsetup --cipher aes-cbc-plain --type luks1
+}
+
+@test passwords-cryptsetup-aes-cbc-plain32-luks2 {
+ passwords_cryptsetup --cipher aes-cbc-plain --type luks2
+}
+
+@test passwords-cryptsetup-aes-cbc-plain64-luks1 {
+ passwords_cryptsetup --cipher aes-cbc-plain64 --type luks1
+}
+
+@test passwords-cryptsetup-aes-cbc-plain64-luks2 {
+ passwords_cryptsetup --cipher aes-cbc-plain64 --type luks2
+}
+
+@test passwords-cryptsetup-aes-cbc-essiv:sha256-luks1 {
+ passwords_cryptsetup --cipher aes-cbc-essiv:sha256 --type luks1
+}
+
+@test passwords-cryptsetup-aes-cbc-essiv:sha256-luks2 {
+ passwords_cryptsetup --cipher aes-cbc-essiv:sha256 --type luks2
+}
diff --git a/tests/wrapping.bats b/tests/wrapping.bats
new file mode 100755
index 0000000..d63f5c7
--- /dev/null
+++ b/tests/wrapping.bats
@@ -0,0 +1,181 @@
+#!/usr/bin/env bats
+
+luksy=${LUKSY:-${BATS_TEST_DIRNAME}/../luksy}
+
+uuid=
+
+teardown() {
+ if test -n "$uuid" ; then
+ cryptsetup close decrypted
+ uuid=
+ fi
+}
+
+function wrapping() {
+ dd if=/dev/urandom bs=1M count=64 of=${BATS_TEST_TMPDIR}/plaintext status=none
+ for password in short morethaneight morethansixteenchars ; do
+ echo testing password: "${password}"
+ echo -n "${password}" | ${luksy} encrypt --password-fd 0 "$@" ${BATS_TEST_TMPDIR}/plaintext ${BATS_TEST_TMPDIR}/encrypted
+ uuid=$(cryptsetup luksUUID ${BATS_TEST_TMPDIR}/encrypted)
+ if test -z "$uuid"; then
+ echo error reading UUID
+ false
+ fi
+ echo -n "${password}" | cryptsetup -q --key-file - luksOpen ${BATS_TEST_TMPDIR}/encrypted decrypted
+ cmp /dev/mapper/decrypted ${BATS_TEST_TMPDIR}/plaintext
+ cryptsetup close decrypted
+ uuid=
+ rm -f ${BATS_TEST_TMPDIR}/encrypted
+ echo password: "${password}" ok
+ done
+ rm -f ${BATS_TEST_TMPDIR}/plaintext
+}
+
+@test wrapping-defaults-luks1 {
+ wrapping --luks1
+}
+
+@test wrapping-defaults-luks2 {
+ wrapping
+}
+
+@test wrapping-aes-xts-plain32-luks1 {
+ wrapping --cipher aes-xts-plain --luks1
+}
+
+@test wrapping-aes-xts-plain32-luks2 {
+ wrapping --cipher aes-xts-plain
+}
+
+@test wrapping-aes-xts-plain64-luks1 {
+ wrapping --cipher aes-xts-plain64 --luks1
+}
+
+@test wrapping-aes-xts-plain64-luks2 {
+ wrapping --cipher aes-xts-plain64
+}
+
+@test wrapping-serpent-xts-plain64-luks1 {
+ wrapping --cipher serpent-xts-plain64 --luks1
+}
+
+@test wrapping-serpent-xts-plain64-luks2 {
+ wrapping --cipher serpent-xts-plain64
+}
+
+@test wrapping-twofish-xts-plain64-luks1 {
+ wrapping --cipher twofish-xts-plain64 --luks1
+}
+
+@test wrapping-twofish-xts-plain64-luks2 {
+ wrapping --cipher twofish-xts-plain64
+}
+
+@test wrapping-aes-cbc-plain32-luks1 {
+ wrapping --cipher aes-cbc-plain --luks1
+}
+
+@test wrapping-aes-cbc-plain32-luks2 {
+ wrapping --cipher aes-cbc-plain
+}
+
+@test wrapping-aes-cbc-plain64-luks1 {
+ wrapping --cipher aes-cbc-plain64 --luks1
+}
+
+@test wrapping-aes-cbc-plain64-luks2 {
+ wrapping --cipher aes-cbc-plain64
+}
+
+@test wrapping-aes-cbc-essiv:sha256-luks1 {
+ wrapping --cipher aes-cbc-essiv:sha256 --luks1
+}
+
+@test wrapping-aes-cbc-essiv:sha256-luks2 {
+ wrapping --cipher aes-cbc-essiv:sha256
+}
+
+function wrapping_cryptsetup() {
+ for password in short morethaneight morethansixteenchars ; do
+ echo testing password: "${password}"
+ dd if=/dev/urandom bs=1M count=1024 of=${BATS_TEST_TMPDIR}/encrypted
+ echo -n "${password}" | cryptsetup luksFormat -q "$@" ${BATS_TEST_TMPDIR}/encrypted -
+ echo -n "${password}" | ${luksy} decrypt --password-fd 0 ${BATS_TEST_TMPDIR}/encrypted ${BATS_TEST_TMPDIR}/plaintext
+ uuid=$(cryptsetup luksUUID ${BATS_TEST_TMPDIR}/encrypted)
+ if test -z "$uuid"; then
+ echo error reading UUID
+ false
+ fi
+ echo -n "${password}" | cryptsetup luksOpen -q --key-file - ${BATS_TEST_TMPDIR}/encrypted decrypted
+ cmp /dev/mapper/decrypted ${BATS_TEST_TMPDIR}/plaintext
+ cryptsetup close decrypted
+ uuid=
+ rm -f ${BATS_TEST_TMPDIR}/encrypted
+ rm -f ${BATS_TEST_TMPDIR}/plaintext
+ echo password: "${password}" ok
+ done
+}
+
+@test wrapping-cryptsetup-defaults-luks1 {
+ wrapping_cryptsetup --type luks1
+}
+
+@test wrapping-cryptsetup-defaults-luks2 {
+ wrapping_cryptsetup --type luks2
+}
+
+@test wrapping-cryptsetup-aes-xts-plain32-luks1 {
+ wrapping_cryptsetup --cipher aes-xts-plain --type luks1
+}
+
+@test wrapping-cryptsetup-aes-xts-plain32-luks2 {
+ wrapping_cryptsetup --cipher aes-xts-plain --type luks2
+}
+
+@test wrapping-cryptsetup-aes-xts-plain64-luks1 {
+ wrapping_cryptsetup --cipher aes-xts-plain64 --type luks1
+}
+
+@test wrapping-cryptsetup-aes-xts-plain64-luks2 {
+ wrapping_cryptsetup --cipher aes-xts-plain64 --type luks2
+}
+
+@test wrapping-cryptsetup-serpent-xts-plain64-luks1 {
+ wrapping_cryptsetup --cipher serpent-xts-plain64 --type luks1
+}
+
+@test wrapping-cryptsetup-serpent-xts-plain64-luks2 {
+ wrapping_cryptsetup --cipher serpent-xts-plain64 --type luks2
+}
+
+@test wrapping-cryptsetup-twofish-xts-plain64-luks1 {
+ wrapping_cryptsetup --cipher twofish-xts-plain64 --type luks1
+}
+
+@test wrapping-cryptsetup-twofish-xts-plain64-luks2 {
+ wrapping_cryptsetup --cipher twofish-xts-plain64 --type luks2
+}
+
+@test wrapping-cryptsetup-aes-cbc-plain32-luks1 {
+ wrapping_cryptsetup --cipher aes-cbc-plain --type luks1
+}
+
+@test wrapping-cryptsetup-aes-cbc-plain32-luks2 {
+ wrapping_cryptsetup --cipher aes-cbc-plain --type luks2
+}
+
+@test wrapping-cryptsetup-aes-cbc-plain64-luks1 {
+ wrapping_cryptsetup --cipher aes-cbc-plain64 --type luks1
+}
+
+@test wrapping-cryptsetup-aes-cbc-plain64-luks2 {
+ wrapping_cryptsetup --cipher aes-cbc-plain64 --type luks2
+}
+
+@test wrapping-cryptsetup-aes-cbc-essiv:sha256-luks1 {
+ wrapping_cryptsetup --cipher aes-cbc-essiv:sha256 --type luks1
+}
+
+@test wrapping-cryptsetup-aes-cbc-essiv:sha256-luks2 {
+ wrapping_cryptsetup --cipher aes-cbc-essiv:sha256 --type luks2
+}
diff --git a/tune.go b/tune.go
new file mode 100644
index 0000000..6624f88
--- /dev/null
+++ b/tune.go
@@ -0,0 +1,55 @@
+package luksy
+
+import (
+ "hash"
+ "time"
+
+ "golang.org/x/crypto/argon2"
+ "golang.org/x/crypto/pbkdf2"
+)
+
+func durationOf(f func()) time.Duration {
+ start := time.Now()
+ f()
+ return time.Since(start)
+}
+
+func IterationsPBKDF2(salt []byte, keyLen int, h func() hash.Hash) int {
+ iterations := 2
+ var d time.Duration
+ for d < time.Second {
+ d = durationOf(func() {
+ _ = pbkdf2.Key([]byte{}, salt, iterations, keyLen, h)
+ })
+ if d < time.Second/10 {
+ iterations *= 2
+ } else {
+ return iterations * int(time.Second) / int(d)
+ }
+ }
+ return iterations
+}
+
+func memoryCostArgon2(salt []byte, keyLen, timeCost, threadsCost int, kdf func([]byte, []byte, uint32, uint32, uint8, uint32) []byte) int {
+ memoryCost := 2
+ var d time.Duration
+ for d < time.Second {
+ d = durationOf(func() {
+ _ = kdf([]byte{}, salt, uint32(timeCost), uint32(memoryCost), uint8(threadsCost), uint32(keyLen))
+ })
+ if d < time.Second/10 {
+ memoryCost *= 2
+ } else {
+ return memoryCost * int(float64(time.Second)/float64(d))
+ }
+ }
+ return memoryCost
+}
+
+func MemoryCostArgon2(salt []byte, keyLen, timeCost, threadsCost int) int {
+ return memoryCostArgon2(salt, keyLen, timeCost, threadsCost, argon2.Key)
+}
+
+func MemoryCostArgon2i(salt []byte, keyLen, timeCost, threadsCost int) int {
+ return memoryCostArgon2(salt, keyLen, timeCost, threadsCost, argon2.IDKey)
+}
diff --git a/v1header.go b/v1header.go
new file mode 100644
index 0000000..ded4a61
--- /dev/null
+++ b/v1header.go
@@ -0,0 +1,321 @@
+package luksy
+
+import (
+ "encoding/binary"
+ "fmt"
+ "syscall"
+)
+
+type (
+ V1Header [592]uint8
+ V1KeySlot [48]uint8
+)
+
+const (
+ // Mostly verbatim from LUKS1 On-Disk Format Specification version 1.2.3
+ V1Magic = "LUKS\xba\xbe"
+ v1MagicStart = 0
+ v1MagicLength = 6
+ v1VersionStart = v1MagicStart + v1MagicLength
+ v1VersionLength = 2
+ v1CipherNameStart = v1VersionStart + v1VersionLength
+ v1CipherNameLength = 32
+ v1CipherModeStart = v1CipherNameStart + v1CipherNameLength
+ v1CipherModeLength = 32
+ v1HashSpecStart = v1CipherModeStart + v1CipherModeLength
+ v1HashSpecLength = 32
+ v1PayloadOffsetStart = v1HashSpecStart + v1HashSpecLength
+ v1PayloadOffsetLength = 4
+ v1KeyBytesStart = v1PayloadOffsetStart + v1PayloadOffsetLength
+ v1KeyBytesLength = 4
+ v1MKDigestStart = v1KeyBytesStart + v1KeyBytesLength
+ v1MKDigestLength = v1DigestSize
+ v1MKDigestSaltStart = v1MKDigestStart + v1MKDigestLength
+ v1MKDigestSaltLength = v1SaltSize
+ v1MKDigestIterStart = v1MKDigestSaltStart + v1MKDigestSaltLength
+ v1MKDigestIterLength = 4
+ v1UUIDStart = v1MKDigestIterStart + v1MKDigestIterLength
+ v1UUIDLength = 40
+ v1KeySlot1Start = v1UUIDStart + v1UUIDLength
+ v1KeySlot1Length = 48
+ v1KeySlot2Start = v1KeySlot1Start + v1KeySlot1Length
+ v1KeySlot2Length = 48
+ v1KeySlot3Start = v1KeySlot2Start + v1KeySlot2Length
+ v1KeySlot3Length = 48
+ v1KeySlot4Start = v1KeySlot3Start + v1KeySlot3Length
+ v1KeySlot4Length = 48
+ v1KeySlot5Start = v1KeySlot4Start + v1KeySlot4Length
+ v1KeySlot5Length = 48
+ v1KeySlot6Start = v1KeySlot5Start + v1KeySlot5Length
+ v1KeySlot6Length = 48
+ v1KeySlot7Start = v1KeySlot6Start + v1KeySlot6Length
+ v1KeySlot7Length = 48
+ v1KeySlot8Start = v1KeySlot7Start + v1KeySlot7Length
+ v1KeySlot8Length = 48
+ v1HeaderStructSize = v1KeySlot8Start + v1KeySlot8Length
+
+ v1KeySlotActiveStart = 0
+ v1KeySlotActiveLength = 4
+ v1KeySlotIterationsStart = v1KeySlotActiveStart + v1KeySlotActiveLength
+ v1KeySlotIterationsLength = 4
+ v1KeySlotSaltStart = v1KeySlotIterationsStart + v1KeySlotIterationsLength
+ v1KeySlotSaltLength = v1SaltSize
+ v1KeySlotKeyMaterialOffsetStart = v1KeySlotSaltStart + v1KeySlotSaltLength
+ v1KeySlotKeyMaterialOffsetLength = 4
+ v1KeySlotStripesStart = v1KeySlotKeyMaterialOffsetStart + v1KeySlotKeyMaterialOffsetLength
+ v1KeySlotStripesLength = 4
+ v1KeySlotStructSize = v1KeySlotStripesStart + v1KeySlotStripesLength
+
+ v1DigestSize = 20
+ v1SaltSize = 32
+ v1NumKeys = 8
+ v1KeySlotActiveKeyDisabled = 0x0000dead
+ v1KeySlotActiveKeyEnabled = 0x00ac71f3
+ V1Stripes = 4000
+ V1AlignKeyslots = 4096
+ V1SectorSize = 512
+)
+
+func (h V1Header) readu2(offset int) uint16 {
+ return binary.BigEndian.Uint16(h[offset:])
+}
+
+func (h V1Header) readu4(offset int) uint32 {
+ return binary.BigEndian.Uint32(h[offset:])
+}
+
+func (h *V1Header) writeu2(offset int, value uint16) {
+ binary.BigEndian.PutUint16(h[offset:], value)
+}
+
+func (h *V1Header) writeu4(offset int, value uint32) {
+ binary.BigEndian.PutUint32(h[offset:], value)
+}
+
+func (h V1Header) Magic() string {
+ return trimZeroPad(string(h[v1MagicStart : v1MagicStart+v1MagicLength]))
+}
+
+func (h *V1Header) SetMagic(magic string) error {
+ switch magic {
+ case V1Magic:
+ copy(h[v1MagicStart:v1MagicStart+v1MagicLength], []uint8(magic))
+ return nil
+ }
+ return fmt.Errorf("magic %q not acceptable, only %q is an acceptable magic value: %w", magic, V1Magic, syscall.EINVAL)
+}
+
+func (h V1Header) Version() uint16 {
+ return h.readu2(v1VersionStart)
+}
+
+func (h *V1Header) SetVersion(version uint16) error {
+ switch version {
+ case 1:
+ h.writeu2(v1VersionStart, version)
+ return nil
+ }
+ return fmt.Errorf("version %d not acceptable, only 1 is an acceptable version: %w", version, syscall.EINVAL)
+}
+
+func (h *V1Header) setZeroString(offset int, value string, length int) {
+ for len(value) < length {
+ value = value + "\000"
+ }
+ copy(h[offset:offset+length], []uint8(value))
+}
+
+func (h *V1Header) setInt8(offset int, s []uint8, length int) {
+ t := make([]byte, length)
+ copy(t, s)
+ copy(h[offset:offset+length], s)
+}
+
+func (h V1Header) CipherName() string {
+ return trimZeroPad(string(h[v1CipherNameStart : v1CipherNameStart+v1CipherNameLength]))
+}
+
+func (h *V1Header) SetCipherName(name string) {
+ h.setZeroString(v1CipherNameStart, name, v1CipherNameLength)
+}
+
+func (h V1Header) CipherMode() string {
+ return trimZeroPad(string(h[v1CipherModeStart : v1CipherModeStart+v1CipherModeLength]))
+}
+
+func (h *V1Header) SetCipherMode(mode string) {
+ h.setZeroString(v1CipherModeStart, mode, v1CipherModeLength)
+}
+
+func (h V1Header) HashSpec() string {
+ return trimZeroPad(string(h[v1HashSpecStart : v1HashSpecStart+v1HashSpecLength]))
+}
+
+func (h *V1Header) SetHashSpec(spec string) {
+ h.setZeroString(v1HashSpecStart, spec, v1HashSpecLength)
+}
+
+func (h V1Header) PayloadOffset() uint32 {
+ return h.readu4(v1PayloadOffsetStart)
+}
+
+func (h *V1Header) SetPayloadOffset(offset uint32) {
+ h.writeu4(v1PayloadOffsetStart, offset)
+}
+
+func (h V1Header) KeyBytes() uint32 {
+ return h.readu4(v1KeyBytesStart)
+}
+
+func (h *V1Header) SetKeyBytes(bytes uint32) {
+ h.writeu4(v1KeyBytesStart, bytes)
+}
+
+func (h *V1Header) KeySlot(slot int) (V1KeySlot, error) {
+ var ks V1KeySlot
+ if slot < 0 || slot >= v1NumKeys {
+ return ks, fmt.Errorf("invalid key slot number (must be 0..%d)", v1NumKeys-1)
+ }
+ switch slot {
+ case 0:
+ copy(ks[:], h[v1KeySlot1Start:v1KeySlot1Start+v1KeySlot1Length])
+ case 1:
+ copy(ks[:], h[v1KeySlot2Start:v1KeySlot2Start+v1KeySlot2Length])
+ case 2:
+ copy(ks[:], h[v1KeySlot3Start:v1KeySlot3Start+v1KeySlot3Length])
+ case 3:
+ copy(ks[:], h[v1KeySlot4Start:v1KeySlot4Start+v1KeySlot4Length])
+ case 4:
+ copy(ks[:], h[v1KeySlot5Start:v1KeySlot5Start+v1KeySlot5Length])
+ case 5:
+ copy(ks[:], h[v1KeySlot6Start:v1KeySlot6Start+v1KeySlot6Length])
+ case 6:
+ copy(ks[:], h[v1KeySlot7Start:v1KeySlot7Start+v1KeySlot7Length])
+ case 7:
+ copy(ks[:], h[v1KeySlot8Start:v1KeySlot8Start+v1KeySlot8Length])
+ }
+ return ks, nil
+}
+
+func (h *V1Header) SetKeySlot(slot int, ks V1KeySlot) error {
+ if slot < 0 || slot >= v1NumKeys {
+ return fmt.Errorf("invalid key slot number (must be 0..%d)", v1NumKeys-1)
+ }
+ switch slot {
+ case 0:
+ copy(h[v1KeySlot1Start:v1KeySlot1Start+v1KeySlot1Length], ks[:])
+ case 1:
+ copy(h[v1KeySlot2Start:v1KeySlot2Start+v1KeySlot2Length], ks[:])
+ case 2:
+ copy(h[v1KeySlot3Start:v1KeySlot3Start+v1KeySlot3Length], ks[:])
+ case 3:
+ copy(h[v1KeySlot4Start:v1KeySlot4Start+v1KeySlot4Length], ks[:])
+ case 4:
+ copy(h[v1KeySlot5Start:v1KeySlot5Start+v1KeySlot5Length], ks[:])
+ case 5:
+ copy(h[v1KeySlot6Start:v1KeySlot6Start+v1KeySlot6Length], ks[:])
+ case 6:
+ copy(h[v1KeySlot7Start:v1KeySlot7Start+v1KeySlot7Length], ks[:])
+ case 7:
+ copy(h[v1KeySlot8Start:v1KeySlot8Start+v1KeySlot8Length], ks[:])
+ }
+ return nil
+}
+
+func (h V1Header) MKDigest() []uint8 {
+ return dupInt8(h[v1MKDigestStart : v1MKDigestStart+v1MKDigestLength])
+}
+
+func (h *V1Header) SetMKDigest(digest []uint8) {
+ h.setInt8(v1MKDigestStart, digest, v1MKDigestLength)
+}
+
+func (h V1Header) MKDigestSalt() []uint8 {
+ return dupInt8(h[v1MKDigestSaltStart : v1MKDigestSaltStart+v1MKDigestSaltLength])
+}
+
+func (h *V1Header) SetMKDigestSalt(salt []uint8) {
+ h.setInt8(v1MKDigestSaltStart, salt, v1MKDigestSaltLength)
+}
+
+func (h V1Header) MKDigestIter() uint32 {
+ return h.readu4(v1MKDigestIterStart)
+}
+
+func (h *V1Header) SetMKDigestIter(bytes uint32) {
+ h.writeu4(v1MKDigestIterStart, bytes)
+}
+
+func (h V1Header) UUID() string {
+ return trimZeroPad(string(h[v1UUIDStart : v1UUIDStart+v1UUIDLength]))
+}
+
+func (h *V1Header) SetUUID(uuid string) {
+ h.setZeroString(v1UUIDStart, uuid, v1UUIDLength)
+}
+
+func (s V1KeySlot) readu4(offset int) uint32 {
+ return binary.BigEndian.Uint32(s[offset:])
+}
+
+func (s *V1KeySlot) writeu4(offset int, value uint32) {
+ binary.BigEndian.PutUint32(s[offset:], value)
+}
+
+func (s *V1KeySlot) setInt8(offset int, i []uint8, length int) {
+ for len(s) < length {
+ i = append(i, 0)
+ }
+ copy(s[offset:offset+length], i)
+}
+
+func (s V1KeySlot) Active() (bool, error) {
+ active := s.readu4(v1KeySlotActiveStart)
+ switch active {
+ case v1KeySlotActiveKeyDisabled:
+ return false, nil
+ case v1KeySlotActiveKeyEnabled:
+ return true, nil
+ }
+ return false, fmt.Errorf("got invalid active value %#0x: %w", active, syscall.EINVAL)
+}
+
+func (s *V1KeySlot) SetActive(active bool) {
+ if active {
+ s.writeu4(v1KeySlotActiveStart, v1KeySlotActiveKeyEnabled)
+ return
+ }
+ s.writeu4(v1KeySlotActiveStart, v1KeySlotActiveKeyDisabled)
+}
+
+func (s V1KeySlot) Iterations() uint32 {
+ return s.readu4(v1KeySlotIterationsStart)
+}
+
+func (s *V1KeySlot) SetIterations(iterations uint32) {
+ s.writeu4(v1KeySlotIterationsStart, iterations)
+}
+
+func (s V1KeySlot) KeySlotSalt() []uint8 {
+ return dupInt8(s[v1KeySlotSaltStart : v1KeySlotSaltStart+v1KeySlotSaltLength])
+}
+
+func (s *V1KeySlot) SetKeySlotSalt(salt []uint8) {
+ s.setInt8(v1KeySlotSaltStart, salt, v1KeySlotSaltLength)
+}
+
+func (s V1KeySlot) KeyMaterialOffset() uint32 {
+ return s.readu4(v1KeySlotKeyMaterialOffsetStart)
+}
+
+func (s *V1KeySlot) SetKeyMaterialOffset(material uint32) {
+ s.writeu4(v1KeySlotKeyMaterialOffsetStart, material)
+}
+
+func (s V1KeySlot) Stripes() uint32 {
+ return s.readu4(v1KeySlotStripesStart)
+}
+
+func (s *V1KeySlot) SetStripes(stripes uint32) {
+ s.writeu4(v1KeySlotStripesStart, stripes)
+}
diff --git a/v2header.go b/v2header.go
new file mode 100644
index 0000000..4f94a05
--- /dev/null
+++ b/v2header.go
@@ -0,0 +1,203 @@
+package luksy
+
+import (
+ "fmt"
+ "strings"
+ "syscall"
+)
+
+type V2Header [4096]uint8
+
+const (
+ // Mostly verbatim from LUKS2 On-Disk Format Specification version 1.1.1
+ V2Magic1 = V1Magic
+ V2Magic2 = "SKUL\xba\xbe"
+ v2MagicStart = 0
+ v2MagicLength = 6
+ v2VersionStart = v2MagicStart + v2MagicLength
+ v2VersionLength = 2
+ v2HeaderSizeStart = v2VersionStart + v2VersionLength
+ v2HeaderSizeLength = 8
+ v2SequenceIDStart = v2HeaderSizeStart + v2HeaderSizeLength
+ v2SequenceIDLength = 8
+ v2LabelStart = v2SequenceIDStart + v2SequenceIDLength
+ v2LabelLength = 48
+ v2ChecksumAlgorithmStart = v2LabelStart + v2LabelLength
+ v2ChecksumAlgorithmLength = 32
+ v2SaltStart = v2ChecksumAlgorithmStart + v2ChecksumAlgorithmLength
+ v2SaltLength = 64
+ v2UUIDStart = v2SaltStart + v2SaltLength
+ v2UUIDLength = 40
+ v2SubsystemStart = v2UUIDStart + v2UUIDLength
+ v2SubsystemLength = v2LabelLength
+ v2HeaderOffsetStart = v2SubsystemStart + v2SubsystemLength
+ v2HeaderOffsetLength = 8
+ v2Padding1Start = v2HeaderOffsetStart + v2HeaderOffsetLength
+ v2Padding1Length = 184
+ v2ChecksumStart = v2Padding1Start + v2Padding1Length
+ v2ChecksumLength = 64
+ v2Padding4096Start = v2ChecksumStart + v2ChecksumLength
+ v2Padding4096Length = 7 * 512
+ v2HeaderStructSize = v2Padding4096Start + v2Padding4096Length
+
+ V2Stripes = 4000
+ V2AlignKeyslots = 4096
+ V2SectorSize = 4096
+)
+
+func (h V2Header) Magic() string {
+ return string(h[v2MagicStart : v2MagicStart+v2MagicLength])
+}
+
+func (h *V2Header) SetMagic(magic string) error {
+ switch magic {
+ case V2Magic1, V2Magic2:
+ copy(h[v2MagicStart:v2MagicStart+v2MagicLength], []uint8(magic))
+ return nil
+ }
+ return fmt.Errorf("magic %q not acceptable, only %q and %q are acceptable magic values: %w", magic, V2Magic1, V2Magic2, syscall.EINVAL)
+}
+
+func (h V2Header) readu2(offset int) uint16 {
+ t := uint16(0)
+ for i := 0; i < 2; i++ {
+ t = (t << 8) + uint16(h[offset+i])
+ }
+ return t
+}
+
+func (h V2Header) readu8(offset int) uint64 {
+ t := uint64(0)
+ for i := 0; i < 8; i++ {
+ t = (t << 8) + uint64(h[offset+i])
+ }
+ return t
+}
+
+func (h *V2Header) writeu2(offset int, value uint16) {
+ t := value
+ for i := 0; i < 2; i++ {
+ h[offset+1-i] = uint8(uint64(t) & 0xff)
+ t >>= 8
+ }
+}
+
+func (h *V2Header) writeu8(offset int, value uint64) {
+ t := value
+ for i := 0; i < 8; i++ {
+ h[offset+7-i] = uint8(uint64(t) & 0xff)
+ t >>= 8
+ }
+}
+
+func (h V2Header) Version() uint16 {
+ return h.readu2(v2VersionStart)
+}
+
+func (h *V2Header) SetVersion(version uint16) error {
+ switch version {
+ case 2:
+ h.writeu2(v2VersionStart, version)
+ return nil
+ }
+ return fmt.Errorf("version %d not acceptable, only 2 is an acceptable version: %w", version, syscall.EINVAL)
+}
+
+func (h V2Header) HeaderSize() uint64 {
+ return h.readu8(v2HeaderSizeStart)
+}
+
+func (h *V2Header) SetHeaderSize(size uint64) {
+ h.writeu8(v2HeaderSizeStart, size)
+}
+
+func (h V2Header) SequenceID() uint64 {
+ return h.readu8(v2SequenceIDStart)
+}
+
+func (h *V2Header) SetSequenceID(id uint64) {
+ h.writeu8(v2SequenceIDStart, id)
+}
+
+func trimZeroPad(s string) string {
+ return strings.TrimRightFunc(s, func(r rune) bool { return r == 0 })
+}
+
+func (h V2Header) Label() string {
+ return trimZeroPad(string(h[v2LabelStart : v2LabelStart+v2LabelLength]))
+}
+
+func (h *V2Header) setZeroString(offset int, value string, length int) {
+ for len(value) < length {
+ value = value + "\000"
+ }
+ copy(h[offset:offset+length], []uint8(value))
+}
+
+func (h *V2Header) SetLabel(label string) {
+ h.setZeroString(v2LabelStart, label, v2LabelLength)
+}
+
+func (h V2Header) ChecksumAlgorithm() string {
+ return trimZeroPad(string(h[v2ChecksumAlgorithmStart : v2ChecksumAlgorithmStart+v2ChecksumAlgorithmLength]))
+}
+
+func (h *V2Header) SetChecksumAlgorithm(alg string) {
+ h.setZeroString(v2ChecksumAlgorithmStart, alg, v2ChecksumAlgorithmLength)
+}
+
+func dupInt8(s []uint8) []uint8 {
+ c := make([]uint8, len(s))
+ copy(c, s)
+ return c
+}
+
+func (h *V2Header) setInt8(offset int, s []uint8, length int) {
+ t := make([]byte, length)
+ copy(t, s)
+ copy(h[offset:offset+length], t)
+}
+
+func (h V2Header) Salt() []uint8 {
+ return dupInt8(h[v2SaltStart : v2SaltStart+v2SaltLength])
+}
+
+func (h *V2Header) SetSalt(salt []uint8) {
+ h.setInt8(v2SaltStart, salt, v2SaltLength)
+}
+
+func (h V2Header) UUID() string {
+ return trimZeroPad(string(h[v2UUIDStart : v2UUIDStart+v2UUIDLength]))
+}
+
+func (h *V2Header) SetUUID(uuid string) {
+ h.setZeroString(v2UUIDStart, uuid, v2UUIDLength)
+}
+
+func (h V2Header) Subsystem() string {
+ return trimZeroPad(string(h[v2SubsystemStart : v2SubsystemStart+v2SubsystemLength]))
+}
+
+func (h *V2Header) SetSubsystem(ss string) {
+ h.setZeroString(v2SubsystemStart, ss, v2SubsystemLength)
+}
+
+func (h V2Header) HeaderOffset() uint64 {
+ return h.readu8(v2HeaderOffsetStart)
+}
+
+func (h *V2Header) SetHeaderOffset(o uint64) {
+ h.writeu8(v2HeaderOffsetStart, o)
+}
+
+func (h V2Header) Checksum() []uint8 {
+ hasher, err := hasherByName(h.ChecksumAlgorithm())
+ if err == nil {
+ return dupInt8(h[v2ChecksumStart : v2ChecksumStart+hasher().Size()])
+ }
+ return dupInt8(h[v2ChecksumStart : v2ChecksumStart+v2ChecksumLength])
+}
+
+func (h *V2Header) SetChecksum(sum []uint8) {
+ h.setInt8(v2ChecksumStart, sum, v2ChecksumLength)
+}
diff --git a/v2json.go b/v2json.go
new file mode 100644
index 0000000..5d7650d
--- /dev/null
+++ b/v2json.go
@@ -0,0 +1,157 @@
+package luksy
+
+type V2JSON struct {
+ Config V2JSONConfig `json:"config"`
+ Keyslots map[string]V2JSONKeyslot `json:"keyslots"`
+ Digests map[string]V2JSONDigest `json:"digests"`
+ Segments map[string]V2JSONSegment `json:"segments"`
+ Tokens map[string]V2JSONToken `json:"tokens"`
+}
+
+type V2JSONKeyslotPriority int
+
+func (p V2JSONKeyslotPriority) String() string {
+ switch p {
+ case V2JSONKeyslotPriorityIgnore:
+ return "ignore"
+ case V2JSONKeyslotPriorityNormal:
+ return "normal"
+ case V2JSONKeyslotPriorityHigh:
+ return "high"
+ }
+ return "unknown"
+}
+
+const (
+ V2JSONKeyslotPriorityIgnore = V2JSONKeyslotPriority(0)
+ V2JSONKeyslotPriorityNormal = V2JSONKeyslotPriority(1)
+ V2JSONKeyslotPriorityHigh = V2JSONKeyslotPriority(2)
+)
+
+type V2JSONKeyslot struct {
+ Type string `json:"type"`
+ KeySize int `json:"key_size"`
+ Area V2JSONArea `json:"area"`
+ Priority *V2JSONKeyslotPriority `json:"priority,omitempty"`
+ *V2JSONKeyslotLUKS2 // type = "luks2"
+ *V2JSONKeyslotReencrypt // type = "reencrypt"
+}
+
+type V2JSONKeyslotLUKS2 struct {
+ AF V2JSONAF `json:"af"`
+ Kdf V2JSONKdf `json:"kdf"`
+}
+
+type V2JSONKeyslotReencrypt struct {
+ Mode string `json:"mode"` // only "reencrypt", "encrypt", "decrypt"
+ Direction string `json:"direction"` // only "forward", "backward"
+}
+
+type V2JSONArea struct {
+ Type string `json:"type"` // only "raw", "none", "journal", "checksum", "datashift", "datashift-journal", "datashift-checksum"
+ Offset int64 `json:"offset,string"`
+ Size int64 `json:"size,string"`
+ *V2JSONAreaRaw // type = "raw"
+ *V2JSONAreaChecksum // type = "checksum"
+ *V2JSONAreaDatashift // type = "datashift"
+ *V2JSONAreaDatashiftChecksum // type = "datashift-checksum"
+}
+
+type V2JSONAreaRaw struct {
+ Encryption string `json:"encryption"`
+ KeySize int `json:"key_size"`
+}
+
+type V2JSONAreaChecksum struct {
+ Hash string `json:"hash"`
+ SectorSize int `json:"sector_size"`
+}
+
+type V2JSONAreaDatashift struct {
+ ShiftSize int `json:"shift_size,string"`
+}
+
+type V2JSONAreaDatashiftChecksum struct {
+ V2JSONAreaChecksum
+ V2JSONAreaDatashift
+}
+
+type V2JSONAF struct {
+ Type string `json:"type"` // "luks1"
+ *V2JSONAFLUKS1 // type == "luks1"
+}
+
+type V2JSONAFLUKS1 struct {
+ Stripes int `json:"stripes"` // 4000
+ Hash string `json:"hash"` // "sha256"
+}
+
+type V2JSONKdf struct {
+ Type string `json:"type"`
+ Salt []byte `json:"salt"`
+ *V2JSONKdfPbkdf2 // type = "pbkdf2"
+ *V2JSONKdfArgon2i // type = "argon2i" or type = "argon2id"
+}
+
+type V2JSONKdfPbkdf2 struct {
+ Hash string `json:"hash"`
+ Iterations int `json:"iterations"`
+}
+
+type V2JSONKdfArgon2i struct {
+ Time int `json:"time"`
+ Memory int `json:"memory"`
+ CPUs int `json:"cpus"`
+}
+
+type V2JSONSegment struct {
+ Type string `json:"type"` // only "linear", "crypt"
+ Offset string `json:"offset"`
+ Size string `json:"size"` // numeric value or "dynamic"
+ Flags []string `json:"flags,omitempty"`
+ *V2JSONSegmentCrypt `json:",omitempty"` // type = "crypt"
+}
+
+type V2JSONSegmentCrypt struct {
+ IVTweak int `json:"iv_tweak,string"`
+ Encryption string `json:"encryption"`
+ SectorSize int `json:"sector_size"` // 512 or 1024 or 2048 or 4096
+ Integrity *V2JSONSegmentIntegrity `json:"integrity,omitempty"`
+}
+
+type V2JSONSegmentIntegrity struct {
+ Type string `json:"type"`
+ JournalEncryption string `json:"journal_encryption"`
+ JournalIntegrity string `json:"journal_integrity"`
+}
+
+type V2JSONDigest struct {
+ Type string `json:"type"`
+ Keyslots []string `json:"keyslots"`
+ Segments []string `json:"segments"`
+ Salt []byte `json:"salt"`
+ Digest []byte `json:"digest"`
+ *V2JSONDigestPbkdf2 // type == "pbkdf2"
+}
+
+type V2JSONDigestPbkdf2 struct {
+ Hash string `json:"hash"`
+ Iterations int `json:"iterations"`
+}
+
+type V2JSONConfig struct {
+ JsonSize int `json:"json_size,string"`
+ KeyslotsSize int `json:"keyslots_size,string,omitempty"`
+ Flags []string `json:"flags,omitempty"` // one or more of "allow-discards", "same-cpu-crypt", "submit-from-crypt-cpus", "no-journal", "no-read-workqueue", "no-write-workqueue"
+ Requirements []string `json:"requirements,omitempty"`
+}
+
+type V2JSONToken struct {
+ Type string `json:"type"` // "luks2-keyring"
+ Keyslots []string `json:"keyslots,omitempty"`
+ *V2JSONTokenLUKS2Keyring // type == "luks2-keyring"
+}
+
+type V2JSONTokenLUKS2Keyring struct {
+ KeyDescription string `json:"key_description"`
+}